aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2019-08-20 20:50:49 +0000
committerDimitry Andric <dim@FreeBSD.org>2019-08-20 20:50:49 +0000
commit2298981669bf3bd63335a4be179bc0f96823a8f4 (patch)
tree1cbe2eb27f030d2d70b80ee5ca3c86bee7326a9f /lib
parent9a83721404652cea39e9f02ae3e3b5c964602a5c (diff)
downloadsrc-2298981669bf3bd63335a4be179bc0f96823a8f4.tar.gz
src-2298981669bf3bd63335a4be179bc0f96823a8f4.zip
Vendor import of stripped clang trunk r366426 (just before thevendor/clang/clang-trunk-r366426
Notes
Notes: svn path=/vendor/clang/dist/; revision=351280 svn path=/vendor/clang/clang-trunk-r366426/; revision=351281; tag=vendor/clang/clang-trunk-r366426
Diffstat (limited to 'lib')
-rw-r--r--lib/ARCMigrate/ARCMT.cpp17
-rw-r--r--lib/ARCMigrate/ARCMTActions.cpp7
-rw-r--r--lib/ARCMigrate/FileRemapper.cpp7
-rw-r--r--lib/ARCMigrate/Internals.h7
-rw-r--r--lib/ARCMigrate/ObjCMT.cpp68
-rw-r--r--lib/ARCMigrate/PlistReporter.cpp9
-rw-r--r--lib/ARCMigrate/TransAPIUses.cpp7
-rw-r--r--lib/ARCMigrate/TransARCAssign.cpp7
-rw-r--r--lib/ARCMigrate/TransAutoreleasePool.cpp7
-rw-r--r--lib/ARCMigrate/TransBlockObjCVariable.cpp7
-rw-r--r--lib/ARCMigrate/TransEmptyStatementsAndDealloc.cpp14
-rw-r--r--lib/ARCMigrate/TransGCAttrs.cpp12
-rw-r--r--lib/ARCMigrate/TransGCCalls.cpp7
-rw-r--r--lib/ARCMigrate/TransProperties.cpp7
-rw-r--r--lib/ARCMigrate/TransProtectedScope.cpp7
-rw-r--r--lib/ARCMigrate/TransRetainReleaseDealloc.cpp15
-rw-r--r--lib/ARCMigrate/TransUnbridgedCasts.cpp7
-rw-r--r--lib/ARCMigrate/TransUnusedInitDelegate.cpp7
-rw-r--r--lib/ARCMigrate/TransZeroOutPropsInDealloc.cpp7
-rw-r--r--lib/ARCMigrate/TransformActions.cpp13
-rw-r--r--lib/ARCMigrate/Transforms.cpp14
-rw-r--r--lib/ARCMigrate/Transforms.h7
-rw-r--r--lib/AST/APValue.cpp166
-rw-r--r--lib/AST/ASTConsumer.cpp7
-rw-r--r--lib/AST/ASTContext.cpp584
-rw-r--r--lib/AST/ASTDiagnostic.cpp12
-rw-r--r--lib/AST/ASTDumper.cpp1455
-rw-r--r--lib/AST/ASTImporter.cpp1888
-rw-r--r--lib/AST/ASTImporterLookupTable.cpp32
-rw-r--r--lib/AST/ASTStructuralEquivalence.cpp387
-rw-r--r--lib/AST/ASTTypeTraits.cpp25
-rw-r--r--lib/AST/AttrImpl.cpp7
-rw-r--r--lib/AST/CXXABI.h7
-rw-r--r--lib/AST/CXXInheritance.cpp25
-rw-r--r--lib/AST/Comment.cpp7
-rw-r--r--lib/AST/CommentBriefParser.cpp7
-rw-r--r--lib/AST/CommentCommandTraits.cpp7
-rw-r--r--lib/AST/CommentLexer.cpp7
-rw-r--r--lib/AST/CommentParser.cpp7
-rw-r--r--lib/AST/CommentSema.cpp7
-rw-r--r--lib/AST/ComparisonCategories.cpp7
-rw-r--r--lib/AST/DataCollection.cpp7
-rw-r--r--lib/AST/Decl.cpp465
-rw-r--r--lib/AST/DeclBase.cpp61
-rw-r--r--lib/AST/DeclCXX.cpp264
-rw-r--r--lib/AST/DeclFriend.cpp7
-rw-r--r--lib/AST/DeclGroup.cpp7
-rw-r--r--lib/AST/DeclObjC.cpp9
-rw-r--r--lib/AST/DeclOpenMP.cpp110
-rw-r--r--lib/AST/DeclPrinter.cpp229
-rw-r--r--lib/AST/DeclTemplate.cpp50
-rw-r--r--lib/AST/DeclarationName.cpp15
-rw-r--r--lib/AST/Expr.cpp880
-rw-r--r--lib/AST/ExprCXX.cpp57
-rw-r--r--lib/AST/ExprClassification.cpp9
-rw-r--r--lib/AST/ExprConstant.cpp2310
-rw-r--r--lib/AST/ExprObjC.cpp38
-rw-r--r--lib/AST/ExternalASTMerger.cpp67
-rw-r--r--lib/AST/ExternalASTSource.cpp7
-rw-r--r--lib/AST/FormatString.cpp41
-rw-r--r--lib/AST/InheritViz.cpp7
-rw-r--r--lib/AST/ItaniumCXXABI.cpp7
-rw-r--r--lib/AST/ItaniumMangle.cpp139
-rw-r--r--lib/AST/JSONNodeDumper.cpp1569
-rw-r--r--lib/AST/Linkage.h7
-rw-r--r--lib/AST/Mangle.cpp212
-rw-r--r--lib/AST/MicrosoftCXXABI.cpp7
-rw-r--r--lib/AST/MicrosoftMangle.cpp176
-rw-r--r--lib/AST/NSAPI.cpp7
-rw-r--r--lib/AST/NestedNameSpecifier.cpp7
-rw-r--r--lib/AST/ODRHash.cpp90
-rw-r--r--lib/AST/OpenMPClause.cpp360
-rw-r--r--lib/AST/ParentMap.cpp19
-rw-r--r--lib/AST/PrintfFormatString.cpp41
-rw-r--r--lib/AST/QualTypeNames.cpp22
-rw-r--r--lib/AST/RawCommentList.cpp7
-rw-r--r--lib/AST/RecordLayout.cpp7
-rw-r--r--lib/AST/RecordLayoutBuilder.cpp167
-rw-r--r--lib/AST/ScanfFormatString.cpp18
-rw-r--r--lib/AST/SelectorLocationsKind.cpp7
-rw-r--r--lib/AST/Stmt.cpp91
-rw-r--r--lib/AST/StmtCXX.cpp7
-rw-r--r--lib/AST/StmtIterator.cpp7
-rw-r--r--lib/AST/StmtObjC.cpp7
-rw-r--r--lib/AST/StmtOpenMP.cpp26
-rw-r--r--lib/AST/StmtPrinter.cpp102
-rw-r--r--lib/AST/StmtProfile.cpp36
-rw-r--r--lib/AST/StmtViz.cpp7
-rw-r--r--lib/AST/TemplateBase.cpp7
-rw-r--r--lib/AST/TemplateName.cpp37
-rw-r--r--lib/AST/TextNodeDumper.cpp802
-rw-r--r--lib/AST/Type.cpp542
-rw-r--r--lib/AST/TypeLoc.cpp7
-rw-r--r--lib/AST/TypePrinter.cpp80
-rw-r--r--lib/AST/VTTBuilder.cpp7
-rw-r--r--lib/AST/VTableBuilder.cpp25
-rw-r--r--lib/ASTMatchers/ASTMatchFinder.cpp41
-rw-r--r--lib/ASTMatchers/ASTMatchersInternal.cpp15
-rw-r--r--lib/ASTMatchers/Dynamic/Diagnostics.cpp7
-rw-r--r--lib/ASTMatchers/Dynamic/Marshallers.h30
-rw-r--r--lib/ASTMatchers/Dynamic/Parser.cpp7
-rw-r--r--lib/ASTMatchers/Dynamic/Registry.cpp57
-rw-r--r--lib/ASTMatchers/Dynamic/VariantValue.cpp7
-rw-r--r--lib/Analysis/AnalysisDeclContext.cpp104
-rw-r--r--lib/Analysis/BodyFarm.cpp40
-rw-r--r--lib/Analysis/CFG.cpp288
-rw-r--r--lib/Analysis/CFGReachabilityAnalysis.cpp7
-rw-r--r--lib/Analysis/CFGStmtMap.cpp9
-rw-r--r--lib/Analysis/CallGraph.cpp7
-rw-r--r--lib/Analysis/CloneDetection.cpp13
-rw-r--r--lib/Analysis/CocoaConventions.cpp7
-rw-r--r--lib/Analysis/CodeInjector.cpp7
-rw-r--r--lib/Analysis/ConstructionContext.cpp7
-rw-r--r--lib/Analysis/Consumed.cpp9
-rw-r--r--lib/Analysis/Dominators.cpp17
-rw-r--r--lib/Analysis/ExprMutationAnalyzer.cpp53
-rw-r--r--lib/Analysis/LiveVariables.cpp9
-rw-r--r--lib/Analysis/ObjCNoReturn.cpp7
-rw-r--r--lib/Analysis/PostOrderCFGView.cpp7
-rw-r--r--lib/Analysis/ProgramPoint.cpp225
-rw-r--r--lib/Analysis/ReachableCode.cpp31
-rw-r--r--lib/Analysis/RetainSummaryManager.cpp (renamed from lib/StaticAnalyzer/Core/RetainSummaryManager.cpp)350
-rw-r--r--lib/Analysis/ThreadSafety.cpp28
-rw-r--r--lib/Analysis/ThreadSafetyCommon.cpp28
-rw-r--r--lib/Analysis/ThreadSafetyLogical.cpp7
-rw-r--r--lib/Analysis/ThreadSafetyTIL.cpp7
-rw-r--r--lib/Analysis/UninitializedValues.cpp26
-rw-r--r--lib/Analysis/plugins/CheckerDependencyHandling/CheckerDependencyHandling.cpp28
-rw-r--r--lib/Analysis/plugins/CheckerDependencyHandling/CheckerDependencyHandlingAnalyzerPlugin.exports2
-rw-r--r--lib/Analysis/plugins/CheckerOptionHandling/CheckerOptionHandling.cpp44
-rw-r--r--lib/Analysis/plugins/CheckerOptionHandling/CheckerOptionHandlingAnalyzerPlugin.exports2
-rw-r--r--lib/Analysis/plugins/SampleAnalyzer/MainCallChecker.cpp54
-rw-r--r--lib/Analysis/plugins/SampleAnalyzer/SampleAnalyzerPlugin.exports2
-rw-r--r--lib/Basic/Builtins.cpp44
-rw-r--r--lib/Basic/CharInfo.cpp7
-rw-r--r--lib/Basic/CodeGenOptions.cpp7
-rw-r--r--lib/Basic/Cuda.cpp76
-rw-r--r--lib/Basic/Diagnostic.cpp14
-rw-r--r--lib/Basic/DiagnosticIDs.cpp46
-rw-r--r--lib/Basic/DiagnosticOptions.cpp7
-rw-r--r--lib/Basic/FileManager.cpp172
-rw-r--r--lib/Basic/FileSystemStatCache.cpp80
-rw-r--r--lib/Basic/FixedPoint.cpp169
-rw-r--r--lib/Basic/IdentifierTable.cpp13
-rw-r--r--lib/Basic/LangOptions.cpp7
-rw-r--r--lib/Basic/MemoryBufferCache.cpp48
-rw-r--r--lib/Basic/Module.cpp24
-rw-r--r--lib/Basic/ObjCRuntime.cpp7
-rw-r--r--lib/Basic/OpenMPKinds.cpp73
-rw-r--r--lib/Basic/OperatorPrecedence.cpp7
-rw-r--r--lib/Basic/SanitizerBlacklist.cpp7
-rw-r--r--lib/Basic/SanitizerSpecialCaseList.cpp9
-rw-r--r--lib/Basic/Sanitizers.cpp30
-rw-r--r--lib/Basic/SourceLocation.cpp7
-rw-r--r--lib/Basic/SourceManager.cpp92
-rw-r--r--lib/Basic/TargetInfo.cpp27
-rw-r--r--lib/Basic/Targets.cpp52
-rw-r--r--lib/Basic/Targets.h11
-rw-r--r--lib/Basic/Targets/AArch64.cpp108
-rw-r--r--lib/Basic/Targets/AArch64.h31
-rw-r--r--lib/Basic/Targets/AMDGPU.cpp56
-rw-r--r--lib/Basic/Targets/AMDGPU.h10
-rw-r--r--lib/Basic/Targets/ARC.cpp9
-rw-r--r--lib/Basic/Targets/ARC.h7
-rw-r--r--lib/Basic/Targets/ARM.cpp157
-rw-r--r--lib/Basic/Targets/ARM.h21
-rw-r--r--lib/Basic/Targets/AVR.cpp7
-rw-r--r--lib/Basic/Targets/AVR.h7
-rw-r--r--lib/Basic/Targets/BPF.cpp11
-rw-r--r--lib/Basic/Targets/BPF.h7
-rw-r--r--lib/Basic/Targets/Hexagon.cpp7
-rw-r--r--lib/Basic/Targets/Hexagon.h7
-rw-r--r--lib/Basic/Targets/Lanai.cpp7
-rw-r--r--lib/Basic/Targets/Lanai.h7
-rw-r--r--lib/Basic/Targets/Le64.cpp7
-rw-r--r--lib/Basic/Targets/Le64.h7
-rw-r--r--lib/Basic/Targets/MSP430.cpp7
-rw-r--r--lib/Basic/Targets/MSP430.h13
-rw-r--r--lib/Basic/Targets/Mips.cpp15
-rw-r--r--lib/Basic/Targets/Mips.h9
-rw-r--r--lib/Basic/Targets/NVPTX.cpp15
-rw-r--r--lib/Basic/Targets/NVPTX.h31
-rw-r--r--lib/Basic/Targets/OSTargets.cpp86
-rw-r--r--lib/Basic/Targets/OSTargets.h194
-rw-r--r--lib/Basic/Targets/PNaCl.cpp7
-rw-r--r--lib/Basic/Targets/PNaCl.h7
-rw-r--r--lib/Basic/Targets/PPC.cpp36
-rw-r--r--lib/Basic/Targets/PPC.h87
-rw-r--r--lib/Basic/Targets/RISCV.cpp29
-rw-r--r--lib/Basic/Targets/RISCV.h21
-rw-r--r--lib/Basic/Targets/SPIR.cpp7
-rw-r--r--lib/Basic/Targets/SPIR.h8
-rw-r--r--lib/Basic/Targets/Sparc.cpp7
-rw-r--r--lib/Basic/Targets/Sparc.h9
-rw-r--r--lib/Basic/Targets/SystemZ.cpp13
-rw-r--r--lib/Basic/Targets/SystemZ.h11
-rw-r--r--lib/Basic/Targets/TCE.cpp7
-rw-r--r--lib/Basic/Targets/TCE.h7
-rw-r--r--lib/Basic/Targets/WebAssembly.cpp80
-rw-r--r--lib/Basic/Targets/WebAssembly.h21
-rw-r--r--lib/Basic/Targets/X86.cpp229
-rw-r--r--lib/Basic/Targets/X86.h33
-rw-r--r--lib/Basic/Targets/XCore.cpp7
-rw-r--r--lib/Basic/Targets/XCore.h7
-rw-r--r--lib/Basic/TokenKinds.cpp7
-rw-r--r--lib/Basic/Version.cpp21
-rw-r--r--lib/Basic/Warnings.cpp7
-rw-r--r--lib/Basic/XRayInstr.cpp7
-rw-r--r--lib/Basic/XRayLists.cpp9
-rw-r--r--lib/CodeGen/ABIInfo.h7
-rw-r--r--lib/CodeGen/Address.h7
-rw-r--r--lib/CodeGen/BackendUtil.cpp324
-rw-r--r--lib/CodeGen/CGAtomic.cpp34
-rw-r--r--lib/CodeGen/CGBlocks.cpp177
-rw-r--r--lib/CodeGen/CGBlocks.h7
-rw-r--r--lib/CodeGen/CGBuilder.h95
-rw-r--r--lib/CodeGen/CGBuiltin.cpp1751
-rw-r--r--lib/CodeGen/CGCUDANV.cpp249
-rw-r--r--lib/CodeGen/CGCUDARuntime.cpp7
-rw-r--r--lib/CodeGen/CGCUDARuntime.h16
-rw-r--r--lib/CodeGen/CGCXX.cpp54
-rw-r--r--lib/CodeGen/CGCXXABI.cpp15
-rw-r--r--lib/CodeGen/CGCXXABI.h31
-rw-r--r--lib/CodeGen/CGCall.cpp381
-rw-r--r--lib/CodeGen/CGCall.h22
-rw-r--r--lib/CodeGen/CGClass.cpp127
-rw-r--r--lib/CodeGen/CGCleanup.cpp25
-rw-r--r--lib/CodeGen/CGCleanup.h7
-rw-r--r--lib/CodeGen/CGCoroutine.cpp14
-rw-r--r--lib/CodeGen/CGDebugInfo.cpp417
-rw-r--r--lib/CodeGen/CGDebugInfo.h45
-rw-r--r--lib/CodeGen/CGDecl.cpp630
-rw-r--r--lib/CodeGen/CGDeclCXX.cpp104
-rw-r--r--lib/CodeGen/CGException.cpp92
-rw-r--r--lib/CodeGen/CGExpr.cpp386
-rw-r--r--lib/CodeGen/CGExprAgg.cpp125
-rw-r--r--lib/CodeGen/CGExprCXX.cpp197
-rw-r--r--lib/CodeGen/CGExprComplex.cpp35
-rw-r--r--lib/CodeGen/CGExprConstant.cpp1283
-rw-r--r--lib/CodeGen/CGExprScalar.cpp467
-rw-r--r--lib/CodeGen/CGGPUBuiltin.cpp7
-rw-r--r--lib/CodeGen/CGLoopInfo.cpp551
-rw-r--r--lib/CodeGen/CGLoopInfo.h91
-rw-r--r--lib/CodeGen/CGNonTrivialStruct.cpp225
-rw-r--r--lib/CodeGen/CGObjC.cpp324
-rw-r--r--lib/CodeGen/CGObjCGNU.cpp317
-rw-r--r--lib/CodeGen/CGObjCMac.cpp576
-rw-r--r--lib/CodeGen/CGObjCRuntime.cpp29
-rw-r--r--lib/CodeGen/CGObjCRuntime.h40
-rw-r--r--lib/CodeGen/CGOpenCLRuntime.cpp37
-rw-r--r--lib/CodeGen/CGOpenCLRuntime.h11
-rw-r--r--lib/CodeGen/CGOpenMPRuntime.cpp1871
-rw-r--r--lib/CodeGen/CGOpenMPRuntime.h141
-rw-r--r--lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp1117
-rw-r--r--lib/CodeGen/CGOpenMPRuntimeNVPTX.h40
-rw-r--r--lib/CodeGen/CGRecordLayout.h7
-rw-r--r--lib/CodeGen/CGRecordLayoutBuilder.cpp39
-rw-r--r--lib/CodeGen/CGStmt.cpp230
-rw-r--r--lib/CodeGen/CGStmtOpenMP.cpp156
-rw-r--r--lib/CodeGen/CGVTT.cpp7
-rw-r--r--lib/CodeGen/CGVTables.cpp39
-rw-r--r--lib/CodeGen/CGVTables.h7
-rw-r--r--lib/CodeGen/CGValue.h7
-rw-r--r--lib/CodeGen/CodeGenABITypes.cpp16
-rw-r--r--lib/CodeGen/CodeGenAction.cpp63
-rw-r--r--lib/CodeGen/CodeGenFunction.cpp251
-rw-r--r--lib/CodeGen/CodeGenFunction.h176
-rw-r--r--lib/CodeGen/CodeGenModule.cpp577
-rw-r--r--lib/CodeGen/CodeGenModule.h160
-rw-r--r--lib/CodeGen/CodeGenPGO.cpp13
-rw-r--r--lib/CodeGen/CodeGenPGO.h7
-rw-r--r--lib/CodeGen/CodeGenTBAA.cpp13
-rw-r--r--lib/CodeGen/CodeGenTBAA.h7
-rw-r--r--lib/CodeGen/CodeGenTypeCache.h7
-rw-r--r--lib/CodeGen/CodeGenTypes.cpp18
-rw-r--r--lib/CodeGen/CodeGenTypes.h87
-rw-r--r--lib/CodeGen/ConstantEmitter.h7
-rw-r--r--lib/CodeGen/ConstantInitBuilder.cpp7
-rw-r--r--lib/CodeGen/CoverageMappingGen.cpp28
-rw-r--r--lib/CodeGen/CoverageMappingGen.h7
-rw-r--r--lib/CodeGen/EHScopeStack.h7
-rw-r--r--lib/CodeGen/ItaniumCXXABI.cpp278
-rw-r--r--lib/CodeGen/MacroPPCallbacks.cpp7
-rw-r--r--lib/CodeGen/MacroPPCallbacks.h7
-rw-r--r--lib/CodeGen/MicrosoftCXXABI.cpp257
-rw-r--r--lib/CodeGen/ModuleBuilder.cpp7
-rw-r--r--lib/CodeGen/ObjectFilePCHContainerOperations.cpp19
-rw-r--r--lib/CodeGen/PatternInit.cpp85
-rw-r--r--lib/CodeGen/PatternInit.h27
-rw-r--r--lib/CodeGen/SanitizerMetadata.cpp28
-rw-r--r--lib/CodeGen/SanitizerMetadata.h7
-rw-r--r--lib/CodeGen/SwiftCallingConv.cpp7
-rw-r--r--lib/CodeGen/TargetInfo.cpp444
-rw-r--r--lib/CodeGen/TargetInfo.h24
-rw-r--r--lib/CodeGen/VarBypassDetector.cpp7
-rw-r--r--lib/CodeGen/VarBypassDetector.h7
-rw-r--r--lib/CrossTU/CrossTranslationUnit.cpp216
-rw-r--r--lib/DirectoryWatcher/DirectoryScanner.cpp54
-rw-r--r--lib/DirectoryWatcher/DirectoryScanner.h29
-rw-r--r--lib/DirectoryWatcher/default/DirectoryWatcher-not-implemented.cpp19
-rw-r--r--lib/DirectoryWatcher/linux/DirectoryWatcher-linux.cpp353
-rw-r--r--lib/DirectoryWatcher/mac/DirectoryWatcher-mac.cpp233
-rw-r--r--lib/Driver/Action.cpp7
-rw-r--r--lib/Driver/Compilation.cpp19
-rw-r--r--lib/Driver/DarwinSDKInfo.cpp7
-rw-r--r--lib/Driver/Distro.cpp12
-rw-r--r--lib/Driver/Driver.cpp215
-rw-r--r--lib/Driver/DriverOptions.cpp7
-rw-r--r--lib/Driver/InputInfo.h7
-rw-r--r--lib/Driver/Job.cpp13
-rw-r--r--lib/Driver/Multilib.cpp27
-rw-r--r--lib/Driver/Phases.cpp7
-rw-r--r--lib/Driver/SanitizerArgs.cpp291
-rw-r--r--lib/Driver/Tool.cpp7
-rw-r--r--lib/Driver/ToolChain.cpp147
-rw-r--r--lib/Driver/ToolChains/AMDGPU.cpp28
-rw-r--r--lib/Driver/ToolChains/AMDGPU.h11
-rw-r--r--lib/Driver/ToolChains/AVR.cpp129
-rw-r--r--lib/Driver/ToolChains/AVR.h30
-rw-r--r--lib/Driver/ToolChains/Ananas.cpp7
-rw-r--r--lib/Driver/ToolChains/Ananas.h7
-rw-r--r--lib/Driver/ToolChains/Arch/AArch64.cpp69
-rw-r--r--lib/Driver/ToolChains/Arch/AArch64.h7
-rw-r--r--lib/Driver/ToolChains/Arch/ARM.cpp132
-rw-r--r--lib/Driver/ToolChains/Arch/ARM.h10
-rw-r--r--lib/Driver/ToolChains/Arch/Mips.cpp8
-rw-r--r--lib/Driver/ToolChains/Arch/Mips.h7
-rw-r--r--lib/Driver/ToolChains/Arch/PPC.cpp9
-rw-r--r--lib/Driver/ToolChains/Arch/PPC.h7
-rw-r--r--lib/Driver/ToolChains/Arch/RISCV.cpp55
-rw-r--r--lib/Driver/ToolChains/Arch/RISCV.h7
-rw-r--r--lib/Driver/ToolChains/Arch/Sparc.cpp7
-rw-r--r--lib/Driver/ToolChains/Arch/Sparc.h7
-rw-r--r--lib/Driver/ToolChains/Arch/SystemZ.cpp7
-rw-r--r--lib/Driver/ToolChains/Arch/SystemZ.h7
-rw-r--r--lib/Driver/ToolChains/Arch/X86.cpp8
-rw-r--r--lib/Driver/ToolChains/Arch/X86.h7
-rw-r--r--lib/Driver/ToolChains/BareMetal.cpp9
-rw-r--r--lib/Driver/ToolChains/BareMetal.h7
-rw-r--r--lib/Driver/ToolChains/Clang.cpp559
-rw-r--r--lib/Driver/ToolChains/Clang.h9
-rw-r--r--lib/Driver/ToolChains/CloudABI.cpp7
-rw-r--r--lib/Driver/ToolChains/CloudABI.h7
-rw-r--r--lib/Driver/ToolChains/CommonArgs.cpp199
-rw-r--r--lib/Driver/ToolChains/CommonArgs.h20
-rw-r--r--lib/Driver/ToolChains/Contiki.cpp7
-rw-r--r--lib/Driver/ToolChains/Contiki.h7
-rw-r--r--lib/Driver/ToolChains/CrossWindows.cpp11
-rw-r--r--lib/Driver/ToolChains/CrossWindows.h7
-rw-r--r--lib/Driver/ToolChains/Cuda.cpp53
-rw-r--r--lib/Driver/ToolChains/Cuda.h7
-rw-r--r--lib/Driver/ToolChains/Darwin.cpp336
-rw-r--r--lib/Driver/ToolChains/Darwin.h25
-rw-r--r--lib/Driver/ToolChains/DragonFly.cpp7
-rw-r--r--lib/Driver/ToolChains/DragonFly.h7
-rw-r--r--lib/Driver/ToolChains/FreeBSD.cpp9
-rw-r--r--lib/Driver/ToolChains/FreeBSD.h7
-rw-r--r--lib/Driver/ToolChains/Fuchsia.cpp68
-rw-r--r--lib/Driver/ToolChains/Fuchsia.h7
-rw-r--r--lib/Driver/ToolChains/Gnu.cpp206
-rw-r--r--lib/Driver/ToolChains/Gnu.h7
-rw-r--r--lib/Driver/ToolChains/HIP.cpp134
-rw-r--r--lib/Driver/ToolChains/HIP.h7
-rw-r--r--lib/Driver/ToolChains/Haiku.cpp7
-rw-r--r--lib/Driver/ToolChains/Haiku.h7
-rw-r--r--lib/Driver/ToolChains/Hexagon.cpp13
-rw-r--r--lib/Driver/ToolChains/Hexagon.h7
-rw-r--r--lib/Driver/ToolChains/Hurd.cpp7
-rw-r--r--lib/Driver/ToolChains/Hurd.h7
-rw-r--r--lib/Driver/ToolChains/Lanai.h7
-rw-r--r--lib/Driver/ToolChains/Linux.cpp91
-rw-r--r--lib/Driver/ToolChains/Linux.h8
-rw-r--r--lib/Driver/ToolChains/MSP430.cpp7
-rw-r--r--lib/Driver/ToolChains/MSP430.h11
-rw-r--r--lib/Driver/ToolChains/MSVC.cpp43
-rw-r--r--lib/Driver/ToolChains/MSVC.h7
-rw-r--r--lib/Driver/ToolChains/MinGW.cpp38
-rw-r--r--lib/Driver/ToolChains/MinGW.h7
-rw-r--r--lib/Driver/ToolChains/Minix.cpp7
-rw-r--r--lib/Driver/ToolChains/Minix.h7
-rw-r--r--lib/Driver/ToolChains/MipsLinux.cpp27
-rw-r--r--lib/Driver/ToolChains/MipsLinux.h12
-rw-r--r--lib/Driver/ToolChains/Myriad.cpp7
-rw-r--r--lib/Driver/ToolChains/Myriad.h7
-rw-r--r--lib/Driver/ToolChains/NaCl.cpp7
-rw-r--r--lib/Driver/ToolChains/NaCl.h7
-rw-r--r--lib/Driver/ToolChains/NetBSD.cpp37
-rw-r--r--lib/Driver/ToolChains/NetBSD.h7
-rw-r--r--lib/Driver/ToolChains/OpenBSD.cpp15
-rw-r--r--lib/Driver/ToolChains/OpenBSD.h7
-rw-r--r--lib/Driver/ToolChains/PPCLinux.cpp31
-rw-r--r--lib/Driver/ToolChains/PPCLinux.h33
-rw-r--r--lib/Driver/ToolChains/PS4CPU.cpp9
-rw-r--r--lib/Driver/ToolChains/PS4CPU.h7
-rw-r--r--lib/Driver/ToolChains/RISCVToolchain.cpp7
-rw-r--r--lib/Driver/ToolChains/RISCVToolchain.h7
-rw-r--r--lib/Driver/ToolChains/Solaris.cpp27
-rw-r--r--lib/Driver/ToolChains/Solaris.h7
-rw-r--r--lib/Driver/ToolChains/TCE.cpp7
-rw-r--r--lib/Driver/ToolChains/TCE.h7
-rw-r--r--lib/Driver/ToolChains/WebAssembly.cpp116
-rw-r--r--lib/Driver/ToolChains/WebAssembly.h17
-rw-r--r--lib/Driver/ToolChains/XCore.cpp7
-rw-r--r--lib/Driver/ToolChains/XCore.h7
-rw-r--r--lib/Driver/Types.cpp7
-rw-r--r--lib/Driver/XRayArgs.cpp7
-rw-r--r--lib/Edit/Commit.cpp7
-rw-r--r--lib/Edit/EditedSource.cpp9
-rw-r--r--lib/Edit/RewriteObjCFoundationAPI.cpp10
-rw-r--r--lib/Format/AffectedRangeManager.cpp7
-rw-r--r--lib/Format/AffectedRangeManager.h7
-rw-r--r--lib/Format/BreakableToken.cpp51
-rw-r--r--lib/Format/BreakableToken.h20
-rw-r--r--lib/Format/ContinuationIndenter.cpp120
-rw-r--r--lib/Format/ContinuationIndenter.h11
-rw-r--r--lib/Format/Encoding.h7
-rw-r--r--lib/Format/Format.cpp213
-rw-r--r--lib/Format/FormatInternal.h7
-rw-r--r--lib/Format/FormatToken.cpp7
-rw-r--r--lib/Format/FormatToken.h156
-rw-r--r--lib/Format/FormatTokenLexer.cpp140
-rw-r--r--lib/Format/FormatTokenLexer.h14
-rw-r--r--lib/Format/NamespaceEndCommentsFixer.cpp98
-rw-r--r--lib/Format/NamespaceEndCommentsFixer.h7
-rw-r--r--lib/Format/SortJavaScriptImports.cpp19
-rw-r--r--lib/Format/SortJavaScriptImports.h7
-rw-r--r--lib/Format/TokenAnalyzer.cpp7
-rw-r--r--lib/Format/TokenAnalyzer.h11
-rw-r--r--lib/Format/TokenAnnotator.cpp215
-rw-r--r--lib/Format/TokenAnnotator.h24
-rw-r--r--lib/Format/UnwrappedLineFormatter.cpp90
-rw-r--r--lib/Format/UnwrappedLineFormatter.h13
-rw-r--r--lib/Format/UnwrappedLineParser.cpp123
-rw-r--r--lib/Format/UnwrappedLineParser.h10
-rw-r--r--lib/Format/UsingDeclarationsSorter.cpp10
-rw-r--r--lib/Format/UsingDeclarationsSorter.h7
-rw-r--r--lib/Format/WhitespaceManager.cpp207
-rw-r--r--lib/Format/WhitespaceManager.h12
-rw-r--r--lib/Frontend/ASTConsumers.cpp30
-rw-r--r--lib/Frontend/ASTMerge.cpp21
-rw-r--r--lib/Frontend/ASTUnit.cpp202
-rw-r--r--lib/Frontend/ChainedDiagnosticConsumer.cpp7
-rw-r--r--lib/Frontend/ChainedIncludesSource.cpp15
-rw-r--r--lib/Frontend/CompilerInstance.cpp150
-rw-r--r--lib/Frontend/CompilerInvocation.cpp193
-rw-r--r--lib/Frontend/CreateInvocationFromCommandLine.cpp7
-rw-r--r--lib/Frontend/DependencyFile.cpp285
-rw-r--r--lib/Frontend/DependencyGraph.cpp7
-rw-r--r--lib/Frontend/DiagnosticRenderer.cpp7
-rw-r--r--lib/Frontend/FrontendAction.cpp30
-rw-r--r--lib/Frontend/FrontendActions.cpp85
-rw-r--r--lib/Frontend/FrontendOptions.cpp7
-rw-r--r--lib/Frontend/FrontendTiming.cpp9
-rw-r--r--lib/Frontend/HeaderIncludeGen.cpp9
-rw-r--r--lib/Frontend/InitHeaderSearch.cpp89
-rw-r--r--lib/Frontend/InitPreprocessor.cpp45
-rw-r--r--lib/Frontend/InterfaceStubFunctionsConsumer.cpp378
-rw-r--r--lib/Frontend/LangStandards.cpp7
-rw-r--r--lib/Frontend/LayoutOverrideSource.cpp7
-rw-r--r--lib/Frontend/LogDiagnosticPrinter.cpp7
-rw-r--r--lib/Frontend/ModuleDependencyCollector.cpp31
-rw-r--r--lib/Frontend/MultiplexConsumer.cpp13
-rw-r--r--lib/Frontend/PrecompiledPreamble.cpp70
-rw-r--r--lib/Frontend/PrintPreprocessedOutput.cpp45
-rw-r--r--lib/Frontend/Rewrite/FixItRewriter.cpp7
-rw-r--r--lib/Frontend/Rewrite/FrontendActions.cpp15
-rw-r--r--lib/Frontend/Rewrite/HTMLPrint.cpp7
-rw-r--r--lib/Frontend/Rewrite/InclusionRewriter.cpp9
-rw-r--r--lib/Frontend/Rewrite/RewriteMacros.cpp7
-rw-r--r--lib/Frontend/Rewrite/RewriteModernObjC.cpp73
-rw-r--r--lib/Frontend/Rewrite/RewriteObjC.cpp41
-rw-r--r--lib/Frontend/Rewrite/RewriteTest.cpp7
-rw-r--r--lib/Frontend/SerializedDiagnosticPrinter.cpp9
-rw-r--r--lib/Frontend/SerializedDiagnosticReader.cpp118
-rw-r--r--lib/Frontend/TestModuleFileExtension.cpp24
-rw-r--r--lib/Frontend/TestModuleFileExtension.h9
-rw-r--r--lib/Frontend/TextDiagnostic.cpp35
-rw-r--r--lib/Frontend/TextDiagnosticBuffer.cpp7
-rw-r--r--lib/Frontend/TextDiagnosticPrinter.cpp7
-rw-r--r--lib/Frontend/VerifyDiagnosticConsumer.cpp443
-rw-r--r--lib/FrontendTool/ExecuteCompilerInvocation.cpp49
-rw-r--r--lib/Headers/__clang_cuda_builtin_vars.h20
-rw-r--r--lib/Headers/__clang_cuda_cmath.h49
-rw-r--r--lib/Headers/__clang_cuda_complex_builtins.h20
-rw-r--r--lib/Headers/__clang_cuda_device_functions.h93
-rw-r--r--lib/Headers/__clang_cuda_intrinsics.h20
-rw-r--r--lib/Headers/__clang_cuda_libdevice_declares.h890
-rw-r--r--lib/Headers/__clang_cuda_math_forward_declares.h70
-rw-r--r--lib/Headers/__clang_cuda_runtime_wrapper.h32
-rw-r--r--lib/Headers/__stddef_max_align_t.h22
-rw-r--r--lib/Headers/__wmmintrin_aes.h20
-rw-r--r--lib/Headers/__wmmintrin_pclmul.h20
-rw-r--r--lib/Headers/adxintrin.h20
-rw-r--r--lib/Headers/altivec.h20
-rw-r--r--lib/Headers/ammintrin.h20
-rw-r--r--lib/Headers/arm64intr.h20
-rw-r--r--lib/Headers/arm_acle.h38
-rw-r--r--lib/Headers/armintr.h20
-rw-r--r--lib/Headers/avx2intrin.h32
-rw-r--r--lib/Headers/avx512bf16intrin.h279
-rw-r--r--lib/Headers/avx512bitalgintrin.h20
-rw-r--r--lib/Headers/avx512bwintrin.h44
-rw-r--r--lib/Headers/avx512cdintrin.h52
-rw-r--r--lib/Headers/avx512dqintrin.h20
-rw-r--r--lib/Headers/avx512erintrin.h20
-rw-r--r--lib/Headers/avx512fintrin.h115
-rw-r--r--lib/Headers/avx512ifmaintrin.h20
-rw-r--r--lib/Headers/avx512ifmavlintrin.h20
-rw-r--r--lib/Headers/avx512pfintrin.h20
-rw-r--r--lib/Headers/avx512vbmi2intrin.h20
-rw-r--r--lib/Headers/avx512vbmiintrin.h20
-rw-r--r--lib/Headers/avx512vbmivlintrin.h20
-rw-r--r--lib/Headers/avx512vlbf16intrin.h474
-rw-r--r--lib/Headers/avx512vlbitalgintrin.h20
-rw-r--r--lib/Headers/avx512vlbwintrin.h36
-rw-r--r--lib/Headers/avx512vlcdintrin.h86
-rw-r--r--lib/Headers/avx512vldqintrin.h52
-rw-r--r--lib/Headers/avx512vlintrin.h77
-rw-r--r--lib/Headers/avx512vlvbmi2intrin.h20
-rw-r--r--lib/Headers/avx512vlvnniintrin.h20
-rw-r--r--lib/Headers/avx512vlvp2intersectintrin.h121
-rw-r--r--lib/Headers/avx512vnniintrin.h20
-rw-r--r--lib/Headers/avx512vp2intersectintrin.h77
-rw-r--r--lib/Headers/avx512vpopcntdqintrin.h20
-rw-r--r--lib/Headers/avx512vpopcntdqvlintrin.h20
-rw-r--r--lib/Headers/avxintrin.h50
-rw-r--r--lib/Headers/bmi2intrin.h20
-rw-r--r--lib/Headers/bmiintrin.h20
-rw-r--r--lib/Headers/cetintrin.h20
-rw-r--r--lib/Headers/cldemoteintrin.h20
-rw-r--r--lib/Headers/clflushoptintrin.h20
-rw-r--r--lib/Headers/clwbintrin.h20
-rw-r--r--lib/Headers/clzerointrin.h20
-rw-r--r--lib/Headers/cpuid.h24
-rw-r--r--lib/Headers/emmintrin.h55
-rw-r--r--lib/Headers/enqcmdintrin.h63
-rw-r--r--lib/Headers/f16cintrin.h26
-rw-r--r--lib/Headers/float.h28
-rw-r--r--lib/Headers/fma4intrin.h20
-rw-r--r--lib/Headers/fmaintrin.h20
-rw-r--r--lib/Headers/fxsrintrin.h20
-rw-r--r--lib/Headers/gfniintrin.h20
-rw-r--r--lib/Headers/htmintrin.h20
-rw-r--r--lib/Headers/htmxlintrin.h20
-rw-r--r--lib/Headers/ia32intrin.h320
-rw-r--r--lib/Headers/immintrin.h62
-rw-r--r--lib/Headers/intrin.h46
-rw-r--r--lib/Headers/inttypes.h25
-rw-r--r--lib/Headers/invpcidintrin.h20
-rw-r--r--lib/Headers/iso646.h22
-rw-r--r--lib/Headers/limits.h22
-rw-r--r--lib/Headers/lwpintrin.h20
-rw-r--r--lib/Headers/lzcntintrin.h20
-rw-r--r--lib/Headers/mm3dnow.h20
-rw-r--r--lib/Headers/mm_malloc.h20
-rw-r--r--lib/Headers/mmintrin.h22
-rw-r--r--lib/Headers/module.modulemap21
-rw-r--r--lib/Headers/movdirintrin.h20
-rw-r--r--lib/Headers/msa.h20
-rw-r--r--lib/Headers/mwaitxintrin.h20
-rw-r--r--lib/Headers/nmmintrin.h20
-rw-r--r--lib/Headers/opencl-c-base.h578
-rw-r--r--lib/Headers/opencl-c.h696
-rw-r--r--lib/Headers/openmp_wrappers/__clang_openmp_math.h35
-rw-r--r--lib/Headers/openmp_wrappers/__clang_openmp_math_declares.h33
-rw-r--r--lib/Headers/openmp_wrappers/cmath16
-rw-r--r--lib/Headers/openmp_wrappers/math.h17
-rw-r--r--lib/Headers/pconfigintrin.h24
-rw-r--r--lib/Headers/pkuintrin.h20
-rw-r--r--lib/Headers/pmmintrin.h20
-rw-r--r--lib/Headers/popcntintrin.h52
-rw-r--r--lib/Headers/ppc_wrappers/emmintrin.h2318
-rw-r--r--lib/Headers/ppc_wrappers/mm_malloc.h44
-rw-r--r--lib/Headers/ppc_wrappers/mmintrin.h1443
-rw-r--r--lib/Headers/ppc_wrappers/xmmintrin.h1838
-rw-r--r--lib/Headers/prfchwintrin.h20
-rw-r--r--lib/Headers/ptwriteintrin.h20
-rw-r--r--lib/Headers/rdseedintrin.h20
-rw-r--r--lib/Headers/rtmintrin.h20
-rw-r--r--lib/Headers/s390intrin.h20
-rw-r--r--lib/Headers/sgxintrin.h24
-rw-r--r--lib/Headers/shaintrin.h20
-rw-r--r--lib/Headers/smmintrin.h20
-rw-r--r--lib/Headers/stdalign.h20
-rw-r--r--lib/Headers/stdarg.h22
-rw-r--r--lib/Headers/stdatomic.h20
-rw-r--r--lib/Headers/stdbool.h22
-rw-r--r--lib/Headers/stddef.h22
-rw-r--r--lib/Headers/stdint.h27
-rw-r--r--lib/Headers/stdnoreturn.h20
-rw-r--r--lib/Headers/tbmintrin.h20
-rw-r--r--lib/Headers/tgmath.h22
-rw-r--r--lib/Headers/tmmintrin.h20
-rw-r--r--lib/Headers/unwind.h24
-rw-r--r--lib/Headers/vadefs.h20
-rw-r--r--lib/Headers/vaesintrin.h20
-rw-r--r--lib/Headers/varargs.h20
-rw-r--r--lib/Headers/vecintrin.h426
-rw-r--r--lib/Headers/vpclmulqdqintrin.h20
-rw-r--r--lib/Headers/waitpkgintrin.h20
-rw-r--r--lib/Headers/wbnoinvdintrin.h20
-rw-r--r--lib/Headers/wmmintrin.h20
-rw-r--r--lib/Headers/x86intrin.h20
-rw-r--r--lib/Headers/xmmintrin.h40
-rw-r--r--lib/Headers/xopintrin.h20
-rw-r--r--lib/Headers/xsavecintrin.h20
-rw-r--r--lib/Headers/xsaveintrin.h39
-rw-r--r--lib/Headers/xsaveoptintrin.h20
-rw-r--r--lib/Headers/xsavesintrin.h20
-rw-r--r--lib/Headers/xtestintrin.h20
-rw-r--r--lib/Index/CodegenNameGenerator.cpp200
-rw-r--r--lib/Index/CommentToXML.cpp14
-rw-r--r--lib/Index/FileIndexRecord.cpp60
-rw-r--r--lib/Index/FileIndexRecord.h57
-rw-r--r--lib/Index/IndexBody.cpp7
-rw-r--r--lib/Index/IndexDecl.cpp35
-rw-r--r--lib/Index/IndexSymbol.cpp49
-rw-r--r--lib/Index/IndexTypeSourceInfo.cpp58
-rw-r--r--lib/Index/IndexingAction.cpp7
-rw-r--r--lib/Index/IndexingContext.cpp28
-rw-r--r--lib/Index/IndexingContext.h11
-rw-r--r--lib/Index/SimpleFormatContext.h7
-rw-r--r--lib/Index/USRGeneration.cpp16
-rw-r--r--lib/Lex/DependencyDirectivesSourceMinimizer.cpp763
-rw-r--r--lib/Lex/HeaderMap.cpp7
-rw-r--r--lib/Lex/HeaderSearch.cpp111
-rw-r--r--lib/Lex/Lexer.cpp16
-rw-r--r--lib/Lex/LiteralSupport.cpp19
-rw-r--r--lib/Lex/MacroArgs.cpp18
-rw-r--r--lib/Lex/MacroInfo.cpp7
-rw-r--r--lib/Lex/ModuleMap.cpp23
-rw-r--r--lib/Lex/PPCaching.cpp59
-rw-r--r--lib/Lex/PPCallbacks.cpp7
-rw-r--r--lib/Lex/PPConditionalDirectiveRecord.cpp17
-rw-r--r--lib/Lex/PPDirectives.cpp562
-rw-r--r--lib/Lex/PPExpressions.cpp27
-rw-r--r--lib/Lex/PPLexerChange.cpp34
-rw-r--r--lib/Lex/PPMacroExpansion.cpp112
-rw-r--r--lib/Lex/Pragma.cpp338
-rw-r--r--lib/Lex/PreprocessingRecord.cpp25
-rw-r--r--lib/Lex/Preprocessor.cpp412
-rw-r--r--lib/Lex/PreprocessorLexer.cpp15
-rw-r--r--lib/Lex/ScratchBuffer.cpp7
-rw-r--r--lib/Lex/TokenConcatenation.cpp12
-rw-r--r--lib/Lex/TokenLexer.cpp67
-rw-r--r--lib/Lex/UnicodeCharSets.h7
-rw-r--r--lib/Parse/ParseAST.cpp9
-rw-r--r--lib/Parse/ParseCXXInlineMethods.cpp24
-rw-r--r--lib/Parse/ParseDecl.cpp258
-rw-r--r--lib/Parse/ParseDeclCXX.cpp178
-rw-r--r--lib/Parse/ParseExpr.cpp182
-rw-r--r--lib/Parse/ParseExprCXX.cpp636
-rw-r--r--lib/Parse/ParseInit.cpp38
-rw-r--r--lib/Parse/ParseObjc.cpp65
-rw-r--r--lib/Parse/ParseOpenMP.cpp528
-rw-r--r--lib/Parse/ParsePragma.cpp225
-rw-r--r--lib/Parse/ParseStmt.cpp149
-rw-r--r--lib/Parse/ParseStmtAsm.cpp87
-rw-r--r--lib/Parse/ParseTemplate.cpp156
-rw-r--r--lib/Parse/ParseTentative.cpp247
-rw-r--r--lib/Parse/Parser.cpp291
-rw-r--r--lib/Rewrite/DeltaTree.cpp7
-rw-r--r--lib/Rewrite/HTMLRewrite.cpp58
-rw-r--r--lib/Rewrite/RewriteRope.cpp7
-rw-r--r--lib/Rewrite/Rewriter.cpp16
-rw-r--r--lib/Rewrite/TokenRewriter.cpp7
-rw-r--r--lib/Sema/AnalysisBasedWarnings.cpp118
-rw-r--r--lib/Sema/CodeCompleteConsumer.cpp7
-rw-r--r--lib/Sema/CoroutineStmtBuilder.h7
-rw-r--r--lib/Sema/DeclSpec.cpp83
-rw-r--r--lib/Sema/DelayedDiagnostic.cpp7
-rw-r--r--lib/Sema/IdentifierResolver.cpp7
-rw-r--r--lib/Sema/JumpDiagnostics.cpp121
-rw-r--r--lib/Sema/MultiplexExternalSemaSource.cpp7
-rw-r--r--lib/Sema/OpenCLBuiltins.td296
-rw-r--r--lib/Sema/ParsedAttr.cpp7
-rw-r--r--lib/Sema/Scope.cpp11
-rw-r--r--lib/Sema/ScopeInfo.cpp63
-rw-r--r--lib/Sema/Sema.cpp433
-rw-r--r--lib/Sema/SemaAccess.cpp10
-rw-r--r--lib/Sema/SemaAttr.cpp22
-rw-r--r--lib/Sema/SemaCUDA.cpp262
-rw-r--r--lib/Sema/SemaCXXScopeSpec.cpp45
-rw-r--r--lib/Sema/SemaCast.cpp219
-rw-r--r--lib/Sema/SemaChecking.cpp829
-rw-r--r--lib/Sema/SemaCodeComplete.cpp904
-rw-r--r--lib/Sema/SemaConsumer.cpp7
-rw-r--r--lib/Sema/SemaCoroutine.cpp102
-rw-r--r--lib/Sema/SemaDecl.cpp1331
-rw-r--r--lib/Sema/SemaDeclAttr.cpp700
-rw-r--r--lib/Sema/SemaDeclCXX.cpp623
-rw-r--r--lib/Sema/SemaDeclObjC.cpp121
-rw-r--r--lib/Sema/SemaExceptionSpec.cpp42
-rw-r--r--lib/Sema/SemaExpr.cpp2122
-rw-r--r--lib/Sema/SemaExprCXX.cpp368
-rw-r--r--lib/Sema/SemaExprMember.cpp101
-rw-r--r--lib/Sema/SemaExprObjC.cpp92
-rw-r--r--lib/Sema/SemaFixItUtils.cpp7
-rw-r--r--lib/Sema/SemaInit.cpp480
-rw-r--r--lib/Sema/SemaLambda.cpp407
-rw-r--r--lib/Sema/SemaLookup.cpp390
-rw-r--r--lib/Sema/SemaModule.cpp710
-rw-r--r--lib/Sema/SemaObjCProperty.cpp27
-rw-r--r--lib/Sema/SemaOpenMP.cpp2385
-rw-r--r--lib/Sema/SemaOverload.cpp762
-rw-r--r--lib/Sema/SemaPseudoObject.cpp44
-rw-r--r--lib/Sema/SemaStmt.cpp149
-rw-r--r--lib/Sema/SemaStmtAsm.cpp180
-rw-r--r--lib/Sema/SemaStmtAttr.cpp77
-rw-r--r--lib/Sema/SemaTemplate.cpp609
-rw-r--r--lib/Sema/SemaTemplateDeduction.cpp54
-rw-r--r--lib/Sema/SemaTemplateInstantiate.cpp129
-rw-r--r--lib/Sema/SemaTemplateInstantiateDecl.cpp578
-rw-r--r--lib/Sema/SemaTemplateVariadic.cpp69
-rw-r--r--lib/Sema/SemaType.cpp508
-rw-r--r--lib/Sema/TreeTransform.h733
-rw-r--r--lib/Sema/TypeLocBuilder.cpp7
-rw-r--r--lib/Sema/TypeLocBuilder.h7
-rw-r--r--lib/Serialization/ASTCommon.cpp10
-rw-r--r--lib/Serialization/ASTCommon.h23
-rw-r--r--lib/Serialization/ASTReader.cpp1030
-rw-r--r--lib/Serialization/ASTReaderDecl.cpp189
-rw-r--r--lib/Serialization/ASTReaderInternals.h7
-rw-r--r--lib/Serialization/ASTReaderStmt.cpp220
-rw-r--r--lib/Serialization/ASTWriter.cpp212
-rw-r--r--lib/Serialization/ASTWriterDecl.cpp110
-rw-r--r--lib/Serialization/ASTWriterStmt.cpp131
-rw-r--r--lib/Serialization/GeneratePCH.cpp24
-rw-r--r--lib/Serialization/GlobalModuleIndex.cpp166
-rw-r--r--lib/Serialization/InMemoryModuleCache.cpp80
-rw-r--r--lib/Serialization/Module.cpp7
-rw-r--r--lib/Serialization/ModuleFileExtension.cpp7
-rw-r--r--lib/Serialization/ModuleManager.cpp60
-rw-r--r--lib/Serialization/MultiOnDiskHashTable.h7
-rw-r--r--lib/Serialization/PCHContainerOperations.cpp9
-rw-r--r--lib/StaticAnalyzer/Checkers/AllocationState.h7
-rw-r--r--lib/StaticAnalyzer/Checkers/AnalysisOrderChecker.cpp15
-rw-r--r--lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp11
-rw-r--r--lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp11
-rw-r--r--lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp15
-rw-r--r--lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp38
-rw-r--r--lib/StaticAnalyzer/Checkers/BlockInCriticalSectionChecker.cpp11
-rw-r--r--lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp11
-rw-r--r--lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp53
-rw-r--r--lib/StaticAnalyzer/Checkers/CStringChecker.cpp237
-rw-r--r--lib/StaticAnalyzer/Checkers/CStringSyntaxChecker.cpp21
-rw-r--r--lib/StaticAnalyzer/Checkers/CXXSelfAssignmentChecker.cpp31
-rw-r--r--lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp45
-rw-r--r--lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp14
-rw-r--r--lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp11
-rw-r--r--lib/StaticAnalyzer/Checkers/CastValueChecker.cpp190
-rw-r--r--lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp17
-rw-r--r--lib/StaticAnalyzer/Checkers/CheckObjCInstMethSignature.cpp13
-rw-r--r--lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp124
-rw-r--r--lib/StaticAnalyzer/Checkers/CheckSizeofPointer.cpp11
-rw-r--r--lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp9
-rw-r--r--lib/StaticAnalyzer/Checkers/ChrootChecker.cpp70
-rw-r--r--lib/StaticAnalyzer/Checkers/CloneChecker.cpp49
-rw-r--r--lib/StaticAnalyzer/Checkers/ConversionChecker.cpp11
-rw-r--r--lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp34
-rw-r--r--lib/StaticAnalyzer/Checkers/DebugCheckers.cpp129
-rw-r--r--lib/StaticAnalyzer/Checkers/DeleteWithNonVirtualDtorChecker.cpp12
-rw-r--r--lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp13
-rw-r--r--lib/StaticAnalyzer/Checkers/DirectIvarAssignment.cpp30
-rw-r--r--lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp17
-rw-r--r--lib/StaticAnalyzer/Checkers/DynamicTypeChecker.cpp11
-rw-r--r--lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp28
-rw-r--r--lib/StaticAnalyzer/Checkers/EnumCastOutOfRangeChecker.cpp11
-rw-r--r--lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp20
-rw-r--r--lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp11
-rw-r--r--lib/StaticAnalyzer/Checkers/GCDAntipatternChecker.cpp15
-rw-r--r--lib/StaticAnalyzer/Checkers/GTestChecker.cpp17
-rw-r--r--lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp403
-rw-r--r--lib/StaticAnalyzer/Checkers/IdenticalExprChecker.cpp13
-rw-r--r--lib/StaticAnalyzer/Checkers/InnerPointerChecker.cpp11
-rw-r--r--lib/StaticAnalyzer/Checkers/InterCheckerAPI.h10
-rw-r--r--lib/StaticAnalyzer/Checkers/IteratorChecker.cpp609
-rw-r--r--lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp21
-rw-r--r--lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp11
-rw-r--r--lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp29
-rw-r--r--lib/StaticAnalyzer/Checkers/MIGChecker.cpp295
-rw-r--r--lib/StaticAnalyzer/Checkers/MPI-Checker/MPIBugReporter.cpp7
-rw-r--r--lib/StaticAnalyzer/Checkers/MPI-Checker/MPIBugReporter.h7
-rw-r--r--lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.cpp11
-rw-r--r--lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.h7
-rw-r--r--lib/StaticAnalyzer/Checkers/MPI-Checker/MPIFunctionClassifier.cpp7
-rw-r--r--lib/StaticAnalyzer/Checkers/MPI-Checker/MPITypes.h7
-rw-r--r--lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp11
-rw-r--r--lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp11
-rw-r--r--lib/StaticAnalyzer/Checkers/MallocChecker.cpp134
-rw-r--r--lib/StaticAnalyzer/Checkers/MallocOverflowSecurityChecker.cpp14
-rw-r--r--lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp11
-rw-r--r--lib/StaticAnalyzer/Checkers/MmapWriteExecChecker.cpp15
-rw-r--r--lib/StaticAnalyzer/Checkers/Move.h30
-rw-r--r--lib/StaticAnalyzer/Checkers/MoveChecker.cpp40
-rw-r--r--lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp14
-rw-r--r--lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp29
-rw-r--r--lib/StaticAnalyzer/Checkers/NoReturnFunctionChecker.cpp11
-rw-r--r--lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp11
-rw-r--r--lib/StaticAnalyzer/Checkers/NonnullGlobalConstantsChecker.cpp34
-rw-r--r--lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp29
-rw-r--r--lib/StaticAnalyzer/Checkers/NumberObjectConversionChecker.cpp13
-rw-r--r--lib/StaticAnalyzer/Checkers/OSObjectCStyleCast.cpp90
-rw-r--r--lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp14
-rw-r--r--lib/StaticAnalyzer/Checkers/ObjCAutoreleaseWriteChecker.cpp13
-rw-r--r--lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp11
-rw-r--r--lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp11
-rw-r--r--lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp10
-rw-r--r--lib/StaticAnalyzer/Checkers/ObjCPropertyChecker.cpp11
-rw-r--r--lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp11
-rw-r--r--lib/StaticAnalyzer/Checkers/ObjCSuperDeallocChecker.cpp14
-rw-r--r--lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp11
-rw-r--r--lib/StaticAnalyzer/Checkers/PaddingChecker.cpp32
-rw-r--r--lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp11
-rw-r--r--lib/StaticAnalyzer/Checkers/PointerIterationChecker.cpp100
-rw-r--r--lib/StaticAnalyzer/Checkers/PointerSortingChecker.cpp113
-rw-r--r--lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp11
-rw-r--r--lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp11
-rw-r--r--lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.cpp445
-rw-r--r--lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.h63
-rw-r--r--lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp186
-rw-r--r--lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.h52
-rw-r--r--lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp11
-rw-r--r--lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp11
-rw-r--r--lib/StaticAnalyzer/Checkers/ReturnValueChecker.cpp170
-rw-r--r--lib/StaticAnalyzer/Checkers/RunLoopAutoreleaseLeakChecker.cpp13
-rw-r--r--lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp18
-rw-r--r--lib/StaticAnalyzer/Checkers/SmartPtrModeling.cpp72
-rw-r--r--lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp29
-rw-r--r--lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp21
-rw-r--r--lib/StaticAnalyzer/Checkers/StreamChecker.cpp22
-rw-r--r--lib/StaticAnalyzer/Checkers/Taint.cpp227
-rw-r--r--lib/StaticAnalyzer/Checkers/Taint.h102
-rw-r--r--lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp16
-rw-r--r--lib/StaticAnalyzer/Checkers/TestAfterDivZeroChecker.cpp11
-rw-r--r--lib/StaticAnalyzer/Checkers/TraversalChecker.cpp15
-rw-r--r--lib/StaticAnalyzer/Checkers/TrustNonnullChecker.cpp16
-rw-r--r--lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp11
-rw-r--r--lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp11
-rw-r--r--lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp11
-rw-r--r--lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp11
-rw-r--r--lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp11
-rw-r--r--lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedObject.h37
-rw-r--r--lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedObjectChecker.cpp129
-rw-r--r--lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedPointee.cpp7
-rw-r--r--lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp238
-rw-r--r--lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp15
-rw-r--r--lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp15
-rw-r--r--lib/StaticAnalyzer/Checkers/ValistChecker.cpp25
-rw-r--r--lib/StaticAnalyzer/Checkers/VforkChecker.cpp11
-rw-r--r--lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp14
-rw-r--r--lib/StaticAnalyzer/Core/APSIntType.cpp7
-rw-r--r--lib/StaticAnalyzer/Core/AnalysisManager.cpp13
-rw-r--r--lib/StaticAnalyzer/Core/AnalyzerOptions.cpp119
-rw-r--r--lib/StaticAnalyzer/Core/BasicValueFactory.cpp17
-rw-r--r--lib/StaticAnalyzer/Core/BlockCounter.cpp7
-rw-r--r--lib/StaticAnalyzer/Core/BugReporter.cpp47
-rw-r--r--lib/StaticAnalyzer/Core/BugReporterVisitors.cpp1365
-rw-r--r--lib/StaticAnalyzer/Core/CallEvent.cpp37
-rw-r--r--lib/StaticAnalyzer/Core/Checker.cpp7
-rw-r--r--lib/StaticAnalyzer/Core/CheckerContext.cpp7
-rw-r--r--lib/StaticAnalyzer/Core/CheckerHelpers.cpp7
-rw-r--r--lib/StaticAnalyzer/Core/CheckerManager.cpp104
-rw-r--r--lib/StaticAnalyzer/Core/CommonBugCategories.cpp7
-rw-r--r--lib/StaticAnalyzer/Core/ConstraintManager.cpp7
-rw-r--r--lib/StaticAnalyzer/Core/CoreEngine.cpp63
-rw-r--r--lib/StaticAnalyzer/Core/DynamicTypeMap.cpp53
-rw-r--r--lib/StaticAnalyzer/Core/Environment.cpp87
-rw-r--r--lib/StaticAnalyzer/Core/ExplodedGraph.cpp7
-rw-r--r--lib/StaticAnalyzer/Core/ExprEngine.cpp282
-rw-r--r--lib/StaticAnalyzer/Core/ExprEngineC.cpp54
-rw-r--r--lib/StaticAnalyzer/Core/ExprEngineCXX.cpp55
-rw-r--r--lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp78
-rw-r--r--lib/StaticAnalyzer/Core/ExprEngineObjC.cpp7
-rw-r--r--lib/StaticAnalyzer/Core/FunctionSummary.cpp7
-rw-r--r--lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp133
-rw-r--r--lib/StaticAnalyzer/Core/IssueHash.cpp11
-rw-r--r--lib/StaticAnalyzer/Core/LoopUnrolling.cpp9
-rw-r--r--lib/StaticAnalyzer/Core/LoopWidening.cpp7
-rw-r--r--lib/StaticAnalyzer/Core/MemRegion.cpp10
-rw-r--r--lib/StaticAnalyzer/Core/PathDiagnostic.cpp70
-rw-r--r--lib/StaticAnalyzer/Core/PlistDiagnostics.cpp136
-rw-r--r--lib/StaticAnalyzer/Core/PrettyStackTraceLocationContext.h13
-rw-r--r--lib/StaticAnalyzer/Core/ProgramState.cpp229
-rw-r--r--lib/StaticAnalyzer/Core/RangeConstraintManager.cpp86
-rw-r--r--lib/StaticAnalyzer/Core/RangedConstraintManager.cpp7
-rw-r--r--lib/StaticAnalyzer/Core/RegionStore.cpp227
-rw-r--r--lib/StaticAnalyzer/Core/SMTConstraintManager.cpp18
-rw-r--r--lib/StaticAnalyzer/Core/SValBuilder.cpp7
-rw-r--r--lib/StaticAnalyzer/Core/SVals.cpp19
-rw-r--r--lib/StaticAnalyzer/Core/SarifDiagnostics.cpp24
-rw-r--r--lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp7
-rw-r--r--lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp19
-rw-r--r--lib/StaticAnalyzer/Core/Store.cpp7
-rw-r--r--lib/StaticAnalyzer/Core/SubEngine.cpp7
-rw-r--r--lib/StaticAnalyzer/Core/SymbolManager.cpp17
-rw-r--r--lib/StaticAnalyzer/Core/TaintManager.cpp23
-rw-r--r--lib/StaticAnalyzer/Core/WorkList.cpp7
-rw-r--r--lib/StaticAnalyzer/Core/Z3ConstraintManager.cpp841
-rw-r--r--lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp94
-rw-r--r--lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp83
-rw-r--r--lib/StaticAnalyzer/Frontend/CheckerRegistry.cpp659
-rw-r--r--lib/StaticAnalyzer/Frontend/FrontendActions.cpp7
-rw-r--r--lib/StaticAnalyzer/Frontend/ModelConsumer.cpp7
-rw-r--r--lib/StaticAnalyzer/Frontend/ModelInjector.cpp9
-rw-r--r--lib/StaticAnalyzer/Frontend/ModelInjector.h7
-rw-r--r--lib/Tooling/ASTDiff/ASTDiff.cpp11
-rw-r--r--lib/Tooling/AllTUsExecution.cpp28
-rw-r--r--lib/Tooling/ArgumentsAdjusters.cpp35
-rw-r--r--lib/Tooling/CommonOptionsParser.cpp11
-rw-r--r--lib/Tooling/CompilationDatabase.cpp7
-rw-r--r--lib/Tooling/Core/Diagnostic.cpp22
-rw-r--r--lib/Tooling/Core/Lookup.cpp92
-rw-r--r--lib/Tooling/Core/Replacement.cpp18
-rw-r--r--lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp149
-rw-r--r--lib/Tooling/Execution.cpp7
-rw-r--r--lib/Tooling/FileMatchTrie.cpp7
-rw-r--r--lib/Tooling/FixIt.cpp14
-rw-r--r--lib/Tooling/GuessTargetAndModeCompilationDatabase.cpp57
-rw-r--r--lib/Tooling/Inclusions/HeaderIncludes.cpp69
-rw-r--r--lib/Tooling/Inclusions/IncludeStyle.cpp7
-rw-r--r--lib/Tooling/InterpolatingCompilationDatabase.cpp28
-rw-r--r--lib/Tooling/JSONCompilationDatabase.cpp69
-rw-r--r--lib/Tooling/Refactoring.cpp7
-rw-r--r--lib/Tooling/Refactoring/ASTSelection.cpp7
-rw-r--r--lib/Tooling/Refactoring/ASTSelectionRequirements.cpp7
-rw-r--r--lib/Tooling/Refactoring/AtomicChange.cpp7
-rw-r--r--lib/Tooling/Refactoring/Extract/Extract.cpp7
-rw-r--r--lib/Tooling/Refactoring/Extract/SourceExtraction.cpp7
-rw-r--r--lib/Tooling/Refactoring/Extract/SourceExtraction.h7
-rw-r--r--lib/Tooling/Refactoring/RangeSelector.cpp296
-rw-r--r--lib/Tooling/Refactoring/RefactoringActions.cpp7
-rw-r--r--lib/Tooling/Refactoring/Rename/RenamingAction.cpp9
-rw-r--r--lib/Tooling/Refactoring/Rename/SymbolOccurrences.cpp7
-rw-r--r--lib/Tooling/Refactoring/Rename/USRFinder.cpp7
-rw-r--r--lib/Tooling/Refactoring/Rename/USRFindingAction.cpp7
-rw-r--r--lib/Tooling/Refactoring/Rename/USRLocFinder.cpp11
-rw-r--r--lib/Tooling/Refactoring/SourceCode.cpp31
-rw-r--r--lib/Tooling/Refactoring/Stencil.cpp175
-rw-r--r--lib/Tooling/Refactoring/Transformer.cpp263
-rw-r--r--lib/Tooling/RefactoringCallbacks.cpp7
-rw-r--r--lib/Tooling/StandaloneExecution.cpp7
-rw-r--r--lib/Tooling/Syntax/BuildTree.cpp273
-rw-r--r--lib/Tooling/Syntax/Nodes.cpp35
-rw-r--r--lib/Tooling/Syntax/Tokens.cpp618
-rw-r--r--lib/Tooling/Syntax/Tree.cpp149
-rw-r--r--lib/Tooling/Tooling.cpp18
947 files changed, 65563 insertions, 29717 deletions
diff --git a/lib/ARCMigrate/ARCMT.cpp b/lib/ARCMigrate/ARCMT.cpp
index 6da87903a488..568e06f21fba 100644
--- a/lib/ARCMigrate/ARCMT.cpp
+++ b/lib/ARCMigrate/ARCMT.cpp
@@ -1,9 +1,8 @@
//===--- ARCMT.cpp - Migration to ARC mode --------------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -36,10 +35,10 @@ bool CapturedDiagList::clearDiagnostic(ArrayRef<unsigned> IDs,
while (I != List.end()) {
FullSourceLoc diagLoc = I->getLocation();
if ((IDs.empty() || // empty means clear all diagnostics in the range.
- std::find(IDs.begin(), IDs.end(), I->getID()) != IDs.end()) &&
+ llvm::is_contained(IDs, I->getID())) &&
!diagLoc.isBeforeInTranslationUnitThan(range.getBegin()) &&
(diagLoc == range.getEnd() ||
- diagLoc.isBeforeInTranslationUnitThan(range.getEnd()))) {
+ diagLoc.isBeforeInTranslationUnitThan(range.getEnd()))) {
cleared = true;
ListTy::iterator eraseS = I++;
if (eraseS->getLevel() != DiagnosticsEngine::Note)
@@ -65,10 +64,10 @@ bool CapturedDiagList::hasDiagnostic(ArrayRef<unsigned> IDs,
while (I != List.end()) {
FullSourceLoc diagLoc = I->getLocation();
if ((IDs.empty() || // empty means any diagnostic in the range.
- std::find(IDs.begin(), IDs.end(), I->getID()) != IDs.end()) &&
+ llvm::find(IDs, I->getID()) != IDs.end()) &&
!diagLoc.isBeforeInTranslationUnitThan(range.getBegin()) &&
(diagLoc == range.getEnd() ||
- diagLoc.isBeforeInTranslationUnitThan(range.getEnd()))) {
+ diagLoc.isBeforeInTranslationUnitThan(range.getEnd()))) {
return true;
}
@@ -515,7 +514,7 @@ MigrationProcess::MigrationProcess(
IntrusiveRefCntPtr<DiagnosticsEngine> Diags(
new DiagnosticsEngine(DiagID, &CI.getDiagnosticOpts(),
DiagClient, /*ShouldOwnClient=*/false));
- Remapper.initFromDisk(outputDir, *Diags, /*ignoreIfFilesChanges=*/true);
+ Remapper.initFromDisk(outputDir, *Diags, /*ignoreIfFilesChanged=*/true);
}
}
diff --git a/lib/ARCMigrate/ARCMTActions.cpp b/lib/ARCMigrate/ARCMTActions.cpp
index 0a5473ab19ec..d72f53806e37 100644
--- a/lib/ARCMigrate/ARCMTActions.cpp
+++ b/lib/ARCMigrate/ARCMTActions.cpp
@@ -1,9 +1,8 @@
//===--- ARCMTActions.cpp - ARC Migrate Tool Frontend Actions ---*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/ARCMigrate/FileRemapper.cpp b/lib/ARCMigrate/FileRemapper.cpp
index 225f47119b00..1a4862d09aa6 100644
--- a/lib/ARCMigrate/FileRemapper.cpp
+++ b/lib/ARCMigrate/FileRemapper.cpp
@@ -1,9 +1,8 @@
//===--- FileRemapper.cpp - File Remapping Helper -------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/ARCMigrate/Internals.h b/lib/ARCMigrate/Internals.h
index 1a261c1e31aa..47fc09317500 100644
--- a/lib/ARCMigrate/Internals.h
+++ b/lib/ARCMigrate/Internals.h
@@ -1,9 +1,8 @@
//===-- Internals.h - Implementation Details---------------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/ARCMigrate/ObjCMT.cpp b/lib/ARCMigrate/ObjCMT.cpp
index 6950ce0e12f3..7126a0873ea0 100644
--- a/lib/ARCMigrate/ObjCMT.cpp
+++ b/lib/ARCMigrate/ObjCMT.cpp
@@ -1,13 +1,13 @@
//===--- ObjCMT.cpp - ObjC Migrate Tool -----------------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "Transforms.h"
+#include "clang/Analysis/RetainSummaryManager.h"
#include "clang/ARCMigrate/ARCMT.h"
#include "clang/ARCMigrate/ARCMTActions.h"
#include "clang/AST/ASTConsumer.h"
@@ -27,7 +27,6 @@
#include "clang/Lex/PPConditionalDirectiveRecord.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Rewrite/Core/Rewriter.h"
-#include "clang/StaticAnalyzer/Core/RetainSummaryManager.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/Support/Path.h"
@@ -65,9 +64,11 @@ class ObjCMigrateASTConsumer : public ASTConsumer {
ObjCInstanceTypeFamily OIT_Family = OIT_None);
void migrateCFAnnotation(ASTContext &Ctx, const Decl *Decl);
- void AddCFAnnotations(ASTContext &Ctx, const CallEffects &CE,
+ void AddCFAnnotations(ASTContext &Ctx,
+ const RetainSummary *RS,
const FunctionDecl *FuncDecl, bool ResultAnnotated);
- void AddCFAnnotations(ASTContext &Ctx, const CallEffects &CE,
+ void AddCFAnnotations(ASTContext &Ctx,
+ const RetainSummary *RS,
const ObjCMethodDecl *MethodDecl, bool ResultAnnotated);
void AnnotateImplicitBridging(ASTContext &Ctx);
@@ -85,6 +86,8 @@ class ObjCMigrateASTConsumer : public ASTConsumer {
bool InsertFoundation(ASTContext &Ctx, SourceLocation Loc);
+ std::unique_ptr<RetainSummaryManager> Summaries;
+
public:
std::string MigrateDir;
unsigned ASTMigrateActions;
@@ -103,6 +106,14 @@ public:
llvm::SmallVector<const Decl *, 8> CFFunctionIBCandidates;
llvm::StringSet<> WhiteListFilenames;
+ RetainSummaryManager &getSummaryManager(ASTContext &Ctx) {
+ if (!Summaries)
+ Summaries.reset(new RetainSummaryManager(Ctx,
+ /*TrackNSCFObjects=*/true,
+ /*trackOSObjects=*/false));
+ return *Summaries;
+ }
+
ObjCMigrateASTConsumer(StringRef migrateDir,
unsigned astMigrateActions,
FileRemapper &remapper,
@@ -205,7 +216,7 @@ ObjCMigrateAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
bool ObjCMigrateAction::BeginInvocation(CompilerInstance &CI) {
Remapper.initFromDisk(MigrateDir, CI.getDiagnostics(),
- /*ignoreIfFilesChanges=*/true);
+ /*ignoreIfFilesChanged=*/true);
CompInst = &CI;
CI.getDiagnostics().setIgnoreAllWarnings(true);
return true;
@@ -1453,12 +1464,12 @@ void ObjCMigrateASTConsumer::migrateCFAnnotation(ASTContext &Ctx, const Decl *De
}
void ObjCMigrateASTConsumer::AddCFAnnotations(ASTContext &Ctx,
- const CallEffects &CE,
+ const RetainSummary *RS,
const FunctionDecl *FuncDecl,
bool ResultAnnotated) {
// Annotate function.
if (!ResultAnnotated) {
- RetEffect Ret = CE.getReturnValue();
+ RetEffect Ret = RS->getRetEffect();
const char *AnnotationString = nullptr;
if (Ret.getObjKind() == ObjKind::CF) {
if (Ret.isOwned() && NSAPIObj->isMacroDefined("CF_RETURNS_RETAINED"))
@@ -1478,12 +1489,11 @@ void ObjCMigrateASTConsumer::AddCFAnnotations(ASTContext &Ctx,
Editor->commit(commit);
}
}
- ArrayRef<ArgEffect> AEArgs = CE.getArgs();
unsigned i = 0;
for (FunctionDecl::param_const_iterator pi = FuncDecl->param_begin(),
pe = FuncDecl->param_end(); pi != pe; ++pi, ++i) {
const ParmVarDecl *pd = *pi;
- ArgEffect AE = AEArgs[i];
+ ArgEffect AE = RS->getArg(i);
if (AE.getKind() == DecRef && AE.getObjKind() == ObjKind::CF &&
!pd->hasAttr<CFConsumedAttr>() &&
NSAPIObj->isMacroDefined("CF_CONSUMED")) {
@@ -1507,7 +1517,8 @@ ObjCMigrateASTConsumer::CF_BRIDGING_KIND
if (FuncDecl->hasBody())
return CF_BRIDGING_NONE;
- CallEffects CE = CallEffects::getEffect(FuncDecl);
+ const RetainSummary *RS =
+ getSummaryManager(Ctx).getSummary(AnyCall(FuncDecl));
bool FuncIsReturnAnnotated = (FuncDecl->hasAttr<CFReturnsRetainedAttr>() ||
FuncDecl->hasAttr<CFReturnsNotRetainedAttr>() ||
FuncDecl->hasAttr<NSReturnsRetainedAttr>() ||
@@ -1520,7 +1531,7 @@ ObjCMigrateASTConsumer::CF_BRIDGING_KIND
bool ReturnCFAudited = false;
if (!FuncIsReturnAnnotated) {
- RetEffect Ret = CE.getReturnValue();
+ RetEffect Ret = RS->getRetEffect();
if (Ret.getObjKind() == ObjKind::CF &&
(Ret.isOwned() || Ret.notOwned()))
ReturnCFAudited = true;
@@ -1529,14 +1540,12 @@ ObjCMigrateASTConsumer::CF_BRIDGING_KIND
}
// At this point result type is audited for potential inclusion.
- // Now, how about argument types.
- ArrayRef<ArgEffect> AEArgs = CE.getArgs();
unsigned i = 0;
bool ArgCFAudited = false;
for (FunctionDecl::param_const_iterator pi = FuncDecl->param_begin(),
pe = FuncDecl->param_end(); pi != pe; ++pi, ++i) {
const ParmVarDecl *pd = *pi;
- ArgEffect AE = AEArgs[i];
+ ArgEffect AE = RS->getArg(i);
if ((AE.getKind() == DecRef /*CFConsumed annotated*/ ||
AE.getKind() == IncRef) && AE.getObjKind() == ObjKind::CF) {
if (AE.getKind() == DecRef && !pd->hasAttr<CFConsumedAttr>())
@@ -1546,7 +1555,7 @@ ObjCMigrateASTConsumer::CF_BRIDGING_KIND
} else {
QualType AT = pd->getType();
if (!AuditedType(AT)) {
- AddCFAnnotations(Ctx, CE, FuncDecl, FuncIsReturnAnnotated);
+ AddCFAnnotations(Ctx, RS, FuncDecl, FuncIsReturnAnnotated);
return CF_BRIDGING_NONE;
}
}
@@ -1568,12 +1577,12 @@ void ObjCMigrateASTConsumer::migrateARCSafeAnnotation(ASTContext &Ctx,
}
void ObjCMigrateASTConsumer::AddCFAnnotations(ASTContext &Ctx,
- const CallEffects &CE,
+ const RetainSummary *RS,
const ObjCMethodDecl *MethodDecl,
bool ResultAnnotated) {
// Annotate function.
if (!ResultAnnotated) {
- RetEffect Ret = CE.getReturnValue();
+ RetEffect Ret = RS->getRetEffect();
const char *AnnotationString = nullptr;
if (Ret.getObjKind() == ObjKind::CF) {
if (Ret.isOwned() && NSAPIObj->isMacroDefined("CF_RETURNS_RETAINED"))
@@ -1605,12 +1614,11 @@ void ObjCMigrateASTConsumer::AddCFAnnotations(ASTContext &Ctx,
Editor->commit(commit);
}
}
- ArrayRef<ArgEffect> AEArgs = CE.getArgs();
unsigned i = 0;
for (ObjCMethodDecl::param_const_iterator pi = MethodDecl->param_begin(),
pe = MethodDecl->param_end(); pi != pe; ++pi, ++i) {
const ParmVarDecl *pd = *pi;
- ArgEffect AE = AEArgs[i];
+ ArgEffect AE = RS->getArg(i);
if (AE.getKind() == DecRef
&& AE.getObjKind() == ObjKind::CF
&& !pd->hasAttr<CFConsumedAttr>() &&
@@ -1628,7 +1636,9 @@ void ObjCMigrateASTConsumer::migrateAddMethodAnnotation(
if (MethodDecl->hasBody() || MethodDecl->isImplicit())
return;
- CallEffects CE = CallEffects::getEffect(MethodDecl);
+ const RetainSummary *RS =
+ getSummaryManager(Ctx).getSummary(AnyCall(MethodDecl));
+
bool MethodIsReturnAnnotated =
(MethodDecl->hasAttr<CFReturnsRetainedAttr>() ||
MethodDecl->hasAttr<CFReturnsNotRetainedAttr>() ||
@@ -1636,7 +1646,7 @@ void ObjCMigrateASTConsumer::migrateAddMethodAnnotation(
MethodDecl->hasAttr<NSReturnsNotRetainedAttr>() ||
MethodDecl->hasAttr<NSReturnsAutoreleasedAttr>());
- if (CE.getReceiver().getKind() == DecRef &&
+ if (RS->getReceiverEffect().getKind() == DecRef &&
!MethodDecl->hasAttr<NSConsumesSelfAttr>() &&
MethodDecl->getMethodFamily() != OMF_init &&
MethodDecl->getMethodFamily() != OMF_release &&
@@ -1652,27 +1662,25 @@ void ObjCMigrateASTConsumer::migrateAddMethodAnnotation(
return;
if (!MethodIsReturnAnnotated) {
- RetEffect Ret = CE.getReturnValue();
+ RetEffect Ret = RS->getRetEffect();
if ((Ret.getObjKind() == ObjKind::CF ||
Ret.getObjKind() == ObjKind::ObjC) &&
(Ret.isOwned() || Ret.notOwned())) {
- AddCFAnnotations(Ctx, CE, MethodDecl, false);
+ AddCFAnnotations(Ctx, RS, MethodDecl, false);
return;
} else if (!AuditedType(MethodDecl->getReturnType()))
return;
}
// At this point result type is either annotated or audited.
- // Now, how about argument types.
- ArrayRef<ArgEffect> AEArgs = CE.getArgs();
unsigned i = 0;
for (ObjCMethodDecl::param_const_iterator pi = MethodDecl->param_begin(),
pe = MethodDecl->param_end(); pi != pe; ++pi, ++i) {
const ParmVarDecl *pd = *pi;
- ArgEffect AE = AEArgs[i];
+ ArgEffect AE = RS->getArg(i);
if ((AE.getKind() == DecRef && !pd->hasAttr<CFConsumedAttr>()) ||
AE.getKind() == IncRef || !AuditedType(pd->getType())) {
- AddCFAnnotations(Ctx, CE, MethodDecl, MethodIsReturnAnnotated);
+ AddCFAnnotations(Ctx, RS, MethodDecl, MethodIsReturnAnnotated);
return;
}
}
diff --git a/lib/ARCMigrate/PlistReporter.cpp b/lib/ARCMigrate/PlistReporter.cpp
index 9e4cb3f4cd83..6d7fcb053b48 100644
--- a/lib/ARCMigrate/PlistReporter.cpp
+++ b/lib/ARCMigrate/PlistReporter.cpp
@@ -1,9 +1,8 @@
//===--- PlistReporter.cpp - ARC Migrate Tool Plist Reporter ----*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -121,5 +120,5 @@ void arcmt::writeARCDiagsToPlist(const std::string &outPath,
o << " </array>\n";
// Finish.
- o << "</dict>\n</plist>";
+ o << "</dict>\n</plist>\n";
}
diff --git a/lib/ARCMigrate/TransAPIUses.cpp b/lib/ARCMigrate/TransAPIUses.cpp
index 6146e07b1d2a..638850dcf9ec 100644
--- a/lib/ARCMigrate/TransAPIUses.cpp
+++ b/lib/ARCMigrate/TransAPIUses.cpp
@@ -1,9 +1,8 @@
//===--- TransAPIUses.cpp - Transformations to ARC mode -------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/ARCMigrate/TransARCAssign.cpp b/lib/ARCMigrate/TransARCAssign.cpp
index d2b4de4891c3..d1d5b9e014b1 100644
--- a/lib/ARCMigrate/TransARCAssign.cpp
+++ b/lib/ARCMigrate/TransARCAssign.cpp
@@ -1,9 +1,8 @@
//===--- TransARCAssign.cpp - Transformations to ARC mode -----------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/ARCMigrate/TransAutoreleasePool.cpp b/lib/ARCMigrate/TransAutoreleasePool.cpp
index 9d20774a89a6..393adcd85a3f 100644
--- a/lib/ARCMigrate/TransAutoreleasePool.cpp
+++ b/lib/ARCMigrate/TransAutoreleasePool.cpp
@@ -1,9 +1,8 @@
//===--- TransAutoreleasePool.cpp - Transformations to ARC mode -----------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/ARCMigrate/TransBlockObjCVariable.cpp b/lib/ARCMigrate/TransBlockObjCVariable.cpp
index 85bdabb50461..1e4db33135b6 100644
--- a/lib/ARCMigrate/TransBlockObjCVariable.cpp
+++ b/lib/ARCMigrate/TransBlockObjCVariable.cpp
@@ -1,9 +1,8 @@
//===--- TransBlockObjCVariable.cpp - Transformations to ARC mode ---------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/ARCMigrate/TransEmptyStatementsAndDealloc.cpp b/lib/ARCMigrate/TransEmptyStatementsAndDealloc.cpp
index 0327b0def1e6..5d0cfb8a8b9c 100644
--- a/lib/ARCMigrate/TransEmptyStatementsAndDealloc.cpp
+++ b/lib/ARCMigrate/TransEmptyStatementsAndDealloc.cpp
@@ -1,9 +1,8 @@
-//===--- TransEmptyStatements.cpp - Transformations to ARC mode -----------===//
+//===-- TransEmptyStatementsAndDealloc.cpp - Transformations to ARC mode --===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -43,9 +42,8 @@ static bool isEmptyARCMTMacroStatement(NullStmt *S,
return false;
SourceManager &SM = Ctx.getSourceManager();
- std::vector<SourceLocation>::iterator
- I = std::upper_bound(MacroLocs.begin(), MacroLocs.end(), SemiLoc,
- BeforeThanCompare<SourceLocation>(SM));
+ std::vector<SourceLocation>::iterator I = llvm::upper_bound(
+ MacroLocs, SemiLoc, BeforeThanCompare<SourceLocation>(SM));
--I;
SourceLocation
AfterMacroLoc = I->getLocWithOffset(getARCMTMacroName().size());
diff --git a/lib/ARCMigrate/TransGCAttrs.cpp b/lib/ARCMigrate/TransGCAttrs.cpp
index 7697d3f048e6..5e3162197ed1 100644
--- a/lib/ARCMigrate/TransGCAttrs.cpp
+++ b/lib/ARCMigrate/TransGCAttrs.cpp
@@ -1,9 +1,8 @@
//===--- TransGCAttrs.cpp - Transformations to ARC mode --------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -69,6 +68,9 @@ public:
if (handleAttr(Attr, D))
break;
TL = Attr.getModifiedLoc();
+ } else if (MacroQualifiedTypeLoc MDTL =
+ TL.getAs<MacroQualifiedTypeLoc>()) {
+ TL = MDTL.getInnerLoc();
} else if (ArrayTypeLoc Arr = TL.getAs<ArrayTypeLoc>()) {
TL = Arr.getElementLoc();
} else if (PointerTypeLoc PT = TL.getAs<PointerTypeLoc>()) {
@@ -267,7 +269,7 @@ static void checkAllAtProps(MigrationContext &MigrateCtx,
StringRef toAttr = "strong";
if (hasWeak) {
if (canApplyWeak(MigrateCtx.Pass.Ctx, IndProps.front()->getType(),
- /*AllowOnUnkwownClass=*/true))
+ /*AllowOnUnknownClass=*/true))
toAttr = "weak";
else
toAttr = "unsafe_unretained";
diff --git a/lib/ARCMigrate/TransGCCalls.cpp b/lib/ARCMigrate/TransGCCalls.cpp
index eff142ba3922..43233e2d0b45 100644
--- a/lib/ARCMigrate/TransGCCalls.cpp
+++ b/lib/ARCMigrate/TransGCCalls.cpp
@@ -1,9 +1,8 @@
//===--- TransGCCalls.cpp - Transformations to ARC mode -------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/ARCMigrate/TransProperties.cpp b/lib/ARCMigrate/TransProperties.cpp
index 912f77aeb789..0675fb0baeb8 100644
--- a/lib/ARCMigrate/TransProperties.cpp
+++ b/lib/ARCMigrate/TransProperties.cpp
@@ -1,9 +1,8 @@
//===--- TransProperties.cpp - Transformations to ARC mode ----------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/ARCMigrate/TransProtectedScope.cpp b/lib/ARCMigrate/TransProtectedScope.cpp
index bfc542e7497c..9e9e9cb7a96d 100644
--- a/lib/ARCMigrate/TransProtectedScope.cpp
+++ b/lib/ARCMigrate/TransProtectedScope.cpp
@@ -1,9 +1,8 @@
//===--- TransProtectedScope.cpp - Transformations to ARC mode ------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/ARCMigrate/TransRetainReleaseDealloc.cpp b/lib/ARCMigrate/TransRetainReleaseDealloc.cpp
index d199bb936547..b76fc65574dd 100644
--- a/lib/ARCMigrate/TransRetainReleaseDealloc.cpp
+++ b/lib/ARCMigrate/TransRetainReleaseDealloc.cpp
@@ -1,9 +1,8 @@
//===--- TransRetainReleaseDealloc.cpp - Transformations to ARC mode ------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -270,8 +269,8 @@ private:
if (prevChildS != childE) {
prevStmt = *prevChildS;
- if (prevStmt)
- prevStmt = prevStmt->IgnoreImplicit();
+ if (auto *E = dyn_cast_or_null<Expr>(prevStmt))
+ prevStmt = E->IgnoreImplicit();
}
if (currChildS == childE)
@@ -281,8 +280,8 @@ private:
return std::make_pair(prevStmt, nextStmt);
nextStmt = *currChildS;
- if (nextStmt)
- nextStmt = nextStmt->IgnoreImplicit();
+ if (auto *E = dyn_cast_or_null<Expr>(nextStmt))
+ nextStmt = E->IgnoreImplicit();
return std::make_pair(prevStmt, nextStmt);
}
diff --git a/lib/ARCMigrate/TransUnbridgedCasts.cpp b/lib/ARCMigrate/TransUnbridgedCasts.cpp
index 9d46d8c5fcae..e767ad5346c3 100644
--- a/lib/ARCMigrate/TransUnbridgedCasts.cpp
+++ b/lib/ARCMigrate/TransUnbridgedCasts.cpp
@@ -1,9 +1,8 @@
//===--- TransUnbridgedCasts.cpp - Transformations to ARC mode ------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/ARCMigrate/TransUnusedInitDelegate.cpp b/lib/ARCMigrate/TransUnusedInitDelegate.cpp
index 70370ecc4e90..bac8dfac9b41 100644
--- a/lib/ARCMigrate/TransUnusedInitDelegate.cpp
+++ b/lib/ARCMigrate/TransUnusedInitDelegate.cpp
@@ -1,9 +1,8 @@
//===--- TransUnusedInitDelegate.cpp - Transformations to ARC mode --------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// Transformations:
diff --git a/lib/ARCMigrate/TransZeroOutPropsInDealloc.cpp b/lib/ARCMigrate/TransZeroOutPropsInDealloc.cpp
index 220102ec49ce..d28bd378acc1 100644
--- a/lib/ARCMigrate/TransZeroOutPropsInDealloc.cpp
+++ b/lib/ARCMigrate/TransZeroOutPropsInDealloc.cpp
@@ -1,9 +1,8 @@
//===--- TransZeroOutPropsInDealloc.cpp - Transformations to ARC mode -----===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/ARCMigrate/TransformActions.cpp b/lib/ARCMigrate/TransformActions.cpp
index d1768bc56cfc..2c48e479dce8 100644
--- a/lib/ARCMigrate/TransformActions.cpp
+++ b/lib/ARCMigrate/TransformActions.cpp
@@ -1,9 +1,8 @@
-//===--- ARCMT.cpp - Migration to ARC mode --------------------------------===//
+//===-- TransformActions.cpp - Migration to ARC mode ----------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -314,7 +313,9 @@ void TransformActionsImpl::removeStmt(Stmt *S) {
assert(IsInTransaction && "Actions only allowed during a transaction");
ActionData data;
data.Kind = Act_RemoveStmt;
- data.S = S->IgnoreImplicit(); // important for uniquing
+ if (auto *E = dyn_cast<Expr>(S))
+ S = E->IgnoreImplicit(); // important for uniquing
+ data.S = S;
CachedActions.push_back(data);
}
diff --git a/lib/ARCMigrate/Transforms.cpp b/lib/ARCMigrate/Transforms.cpp
index 8bd2b407aee9..59b80a917e56 100644
--- a/lib/ARCMigrate/Transforms.cpp
+++ b/lib/ARCMigrate/Transforms.cpp
@@ -1,9 +1,8 @@
//===--- Transforms.cpp - Transformations to ARC mode ---------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -287,10 +286,11 @@ private:
void mark(Stmt *S) {
if (!S) return;
- while (LabelStmt *Label = dyn_cast<LabelStmt>(S))
+ while (auto *Label = dyn_cast<LabelStmt>(S))
S = Label->getSubStmt();
- S = S->IgnoreImplicit();
- if (Expr *E = dyn_cast<Expr>(S))
+ if (auto *E = dyn_cast<Expr>(S))
+ S = E->IgnoreImplicit();
+ if (auto *E = dyn_cast<Expr>(S))
Removables.insert(E);
}
};
diff --git a/lib/ARCMigrate/Transforms.h b/lib/ARCMigrate/Transforms.h
index bafe9fc52a27..e087136f0e2c 100644
--- a/lib/ARCMigrate/Transforms.h
+++ b/lib/ARCMigrate/Transforms.h
@@ -1,9 +1,8 @@
//===-- Transforms.h - Transformations to ARC mode --------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/AST/APValue.cpp b/lib/AST/APValue.cpp
index c05b160b8e3d..1993bba9bd1a 100644
--- a/lib/AST/APValue.cpp
+++ b/lib/AST/APValue.cpp
@@ -1,9 +1,8 @@
//===--- APValue.cpp - Union class for APFloat/APSInt/Complex -------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -21,6 +20,61 @@
#include "llvm/Support/raw_ostream.h"
using namespace clang;
+/// The identity of a type_info object depends on the canonical unqualified
+/// type only.
+TypeInfoLValue::TypeInfoLValue(const Type *T)
+ : T(T->getCanonicalTypeUnqualified().getTypePtr()) {}
+
+void TypeInfoLValue::print(llvm::raw_ostream &Out,
+ const PrintingPolicy &Policy) const {
+ Out << "typeid(";
+ QualType(getType(), 0).print(Out, Policy);
+ Out << ")";
+}
+
+static_assert(
+ 1 << llvm::PointerLikeTypeTraits<TypeInfoLValue>::NumLowBitsAvailable <=
+ alignof(Type),
+ "Type is insufficiently aligned");
+
+APValue::LValueBase::LValueBase(const ValueDecl *P, unsigned I, unsigned V)
+ : Ptr(P), Local{I, V} {}
+APValue::LValueBase::LValueBase(const Expr *P, unsigned I, unsigned V)
+ : Ptr(P), Local{I, V} {}
+
+APValue::LValueBase APValue::LValueBase::getTypeInfo(TypeInfoLValue LV,
+ QualType TypeInfo) {
+ LValueBase Base;
+ Base.Ptr = LV;
+ Base.TypeInfoType = TypeInfo.getAsOpaquePtr();
+ return Base;
+}
+
+unsigned APValue::LValueBase::getCallIndex() const {
+ return is<TypeInfoLValue>() ? 0 : Local.CallIndex;
+}
+
+unsigned APValue::LValueBase::getVersion() const {
+ return is<TypeInfoLValue>() ? 0 : Local.Version;
+}
+
+QualType APValue::LValueBase::getTypeInfoType() const {
+ assert(is<TypeInfoLValue>() && "not a type_info lvalue");
+ return QualType::getFromOpaquePtr(TypeInfoType);
+}
+
+namespace clang {
+bool operator==(const APValue::LValueBase &LHS,
+ const APValue::LValueBase &RHS) {
+ if (LHS.Ptr != RHS.Ptr)
+ return false;
+ if (LHS.is<TypeInfoLValue>())
+ return true;
+ return LHS.Local.CallIndex == RHS.Local.CallIndex &&
+ LHS.Local.Version == RHS.Local.Version;
+}
+}
+
namespace {
struct LVBase {
APValue::LValueBase Base;
@@ -46,26 +100,27 @@ APValue::LValueBase::operator bool () const {
clang::APValue::LValueBase
llvm::DenseMapInfo<clang::APValue::LValueBase>::getEmptyKey() {
return clang::APValue::LValueBase(
- DenseMapInfo<clang::APValue::LValueBase::PtrTy>::getEmptyKey(),
- DenseMapInfo<unsigned>::getEmptyKey(),
- DenseMapInfo<unsigned>::getEmptyKey());
+ DenseMapInfo<const ValueDecl*>::getEmptyKey());
}
clang::APValue::LValueBase
llvm::DenseMapInfo<clang::APValue::LValueBase>::getTombstoneKey() {
return clang::APValue::LValueBase(
- DenseMapInfo<clang::APValue::LValueBase::PtrTy>::getTombstoneKey(),
- DenseMapInfo<unsigned>::getTombstoneKey(),
- DenseMapInfo<unsigned>::getTombstoneKey());
+ DenseMapInfo<const ValueDecl*>::getTombstoneKey());
+}
+
+namespace clang {
+llvm::hash_code hash_value(const APValue::LValueBase &Base) {
+ if (Base.is<TypeInfoLValue>())
+ return llvm::hash_value(Base.getOpaqueValue());
+ return llvm::hash_combine(Base.getOpaqueValue(), Base.getCallIndex(),
+ Base.getVersion());
+}
}
unsigned llvm::DenseMapInfo<clang::APValue::LValueBase>::getHashValue(
const clang::APValue::LValueBase &Base) {
- llvm::FoldingSetNodeID ID;
- ID.AddPointer(Base.getOpaqueValue());
- ID.AddInteger(Base.getCallIndex());
- ID.AddInteger(Base.getVersion());
- return ID.ComputeHash();
+ return hash_value(Base);
}
bool llvm::DenseMapInfo<clang::APValue::LValueBase>::isEqual(
@@ -164,9 +219,11 @@ APValue::UnionData::~UnionData () {
delete Value;
}
-APValue::APValue(const APValue &RHS) : Kind(Uninitialized) {
+APValue::APValue(const APValue &RHS) : Kind(None) {
switch (RHS.getKind()) {
- case Uninitialized:
+ case None:
+ case Indeterminate:
+ Kind = RHS.getKind();
break;
case Int:
MakeInt();
@@ -176,6 +233,11 @@ APValue::APValue(const APValue &RHS) : Kind(Uninitialized) {
MakeFloat();
setFloat(RHS.getFloat());
break;
+ case FixedPoint: {
+ APFixedPoint FXCopy = RHS.getFixedPoint();
+ MakeFixedPoint(std::move(FXCopy));
+ break;
+ }
case Vector:
MakeVector();
setVector(((const Vec *)(const char *)RHS.Data.buffer)->Elts,
@@ -233,6 +295,8 @@ void APValue::DestroyDataAndMakeUninit() {
((APSInt*)(char*)Data.buffer)->~APSInt();
else if (Kind == Float)
((APFloat*)(char*)Data.buffer)->~APFloat();
+ else if (Kind == FixedPoint)
+ ((APFixedPoint *)(char *)Data.buffer)->~APFixedPoint();
else if (Kind == Vector)
((Vec*)(char*)Data.buffer)->~Vec();
else if (Kind == ComplexInt)
@@ -251,12 +315,13 @@ void APValue::DestroyDataAndMakeUninit() {
((MemberPointerData*)(char*)Data.buffer)->~MemberPointerData();
else if (Kind == AddrLabelDiff)
((AddrLabelDiffData*)(char*)Data.buffer)->~AddrLabelDiffData();
- Kind = Uninitialized;
+ Kind = None;
}
bool APValue::needsCleanup() const {
switch (getKind()) {
- case Uninitialized:
+ case None:
+ case Indeterminate:
case AddrLabelDiff:
return false;
case Struct:
@@ -268,6 +333,8 @@ bool APValue::needsCleanup() const {
return getInt().needsCleanup();
case Float:
return getFloat().needsCleanup();
+ case FixedPoint:
+ return getFixedPoint().getValue().needsCleanup();
case ComplexFloat:
assert(getComplexFloatImag().needsCleanup() ==
getComplexFloatReal().needsCleanup() &&
@@ -312,8 +379,11 @@ static double GetApproxValue(const llvm::APFloat &F) {
void APValue::dump(raw_ostream &OS) const {
switch (getKind()) {
- case Uninitialized:
- OS << "Uninitialized";
+ case None:
+ OS << "None";
+ return;
+ case Indeterminate:
+ OS << "Indeterminate";
return;
case Int:
OS << "Int: " << getInt();
@@ -321,6 +391,9 @@ void APValue::dump(raw_ostream &OS) const {
case Float:
OS << "Float: " << GetApproxValue(getFloat());
return;
+ case FixedPoint:
+ OS << "FixedPoint : " << getFixedPoint();
+ return;
case Vector:
OS << "Vector: ";
getVectorElt(0).dump(OS);
@@ -383,9 +456,13 @@ void APValue::dump(raw_ostream &OS) const {
llvm_unreachable("Unknown APValue kind!");
}
-void APValue::printPretty(raw_ostream &Out, ASTContext &Ctx, QualType Ty) const{
+void APValue::printPretty(raw_ostream &Out, const ASTContext &Ctx,
+ QualType Ty) const {
switch (getKind()) {
- case APValue::Uninitialized:
+ case APValue::None:
+ Out << "<out of lifetime>";
+ return;
+ case APValue::Indeterminate:
Out << "<uninitialized>";
return;
case APValue::Int:
@@ -397,6 +474,9 @@ void APValue::printPretty(raw_ostream &Out, ASTContext &Ctx, QualType Ty) const{
case APValue::Float:
Out << GetApproxValue(getFloat());
return;
+ case APValue::FixedPoint:
+ Out << getFixedPoint();
+ return;
case APValue::Vector: {
Out << '{';
QualType ElemTy = Ty->getAs<VectorType>()->getElementType();
@@ -453,7 +533,9 @@ void APValue::printPretty(raw_ostream &Out, ASTContext &Ctx, QualType Ty) const{
if (const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>())
Out << *VD;
- else {
+ else if (TypeInfoLValue TI = Base.dyn_cast<TypeInfoLValue>()) {
+ TI.print(Out, Ctx.getPrintingPolicy());
+ } else {
assert(Base.get<const Expr *>() != nullptr &&
"Expecting non-null Expr");
Base.get<const Expr*>()->printPretty(Out, nullptr,
@@ -478,6 +560,9 @@ void APValue::printPretty(raw_ostream &Out, ASTContext &Ctx, QualType Ty) const{
if (const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>()) {
Out << *VD;
ElemTy = VD->getType();
+ } else if (TypeInfoLValue TI = Base.dyn_cast<TypeInfoLValue>()) {
+ TI.print(Out, Ctx.getPrintingPolicy());
+ ElemTy = Base.getTypeInfoType();
} else {
const Expr *E = Base.get<const Expr*>();
assert(E != nullptr && "Expecting non-null Expr");
@@ -491,8 +576,7 @@ void APValue::printPretty(raw_ostream &Out, ASTContext &Ctx, QualType Ty) const{
if (ElemTy->getAs<RecordType>()) {
// The lvalue refers to a class type, so the next path entry is a base
// or member.
- const Decl *BaseOrMember =
- BaseOrMemberType::getFromOpaqueValue(Path[I].BaseOrMember).getPointer();
+ const Decl *BaseOrMember = Path[I].getAsBaseOrMember().getPointer();
if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(BaseOrMember)) {
CastToBase = RD;
ElemTy = Ctx.getRecordType(RD);
@@ -506,7 +590,7 @@ void APValue::printPretty(raw_ostream &Out, ASTContext &Ctx, QualType Ty) const{
}
} else {
// The lvalue must refer to an array.
- Out << '[' << Path[I].ArrayIndex << ']';
+ Out << '[' << Path[I].getAsArrayIndex() << ']';
ElemTy = Ctx.getAsArrayType(ElemTy)->getElementType();
}
}
@@ -592,7 +676,7 @@ void APValue::printPretty(raw_ostream &Out, ASTContext &Ctx, QualType Ty) const{
llvm_unreachable("Unknown APValue kind!");
}
-std::string APValue::getAsString(ASTContext &Ctx, QualType Ty) const {
+std::string APValue::getAsString(const ASTContext &Ctx, QualType Ty) const {
std::string Result;
llvm::raw_string_ostream Out(Result);
printPretty(Out, Ctx, Ty);
@@ -600,6 +684,26 @@ std::string APValue::getAsString(ASTContext &Ctx, QualType Ty) const {
return Result;
}
+bool APValue::toIntegralConstant(APSInt &Result, QualType SrcTy,
+ const ASTContext &Ctx) const {
+ if (isInt()) {
+ Result = getInt();
+ return true;
+ }
+
+ if (isLValue() && isNullPointer()) {
+ Result = Ctx.MakeIntValue(Ctx.getTargetNullPointerValue(SrcTy), SrcTy);
+ return true;
+ }
+
+ if (isLValue() && !getLValueBase()) {
+ Result = Ctx.MakeIntValue(getLValueOffset().getQuantity(), SrcTy);
+ return true;
+ }
+
+ return false;
+}
+
const APValue::LValueBase APValue::getLValueBase() const {
assert(isLValue() && "Invalid accessor");
return ((const LV*)(const void*)Data.buffer)->Base;
@@ -687,21 +791,21 @@ ArrayRef<const CXXRecordDecl*> APValue::getMemberPointerPath() const {
}
void APValue::MakeLValue() {
- assert(isUninit() && "Bad state change");
+ assert(isAbsent() && "Bad state change");
static_assert(sizeof(LV) <= DataSize, "LV too big");
new ((void*)(char*)Data.buffer) LV();
Kind = LValue;
}
void APValue::MakeArray(unsigned InitElts, unsigned Size) {
- assert(isUninit() && "Bad state change");
+ assert(isAbsent() && "Bad state change");
new ((void*)(char*)Data.buffer) Arr(InitElts, Size);
Kind = Array;
}
void APValue::MakeMemberPointer(const ValueDecl *Member, bool IsDerivedMember,
ArrayRef<const CXXRecordDecl*> Path) {
- assert(isUninit() && "Bad state change");
+ assert(isAbsent() && "Bad state change");
MemberPointerData *MPD = new ((void*)(char*)Data.buffer) MemberPointerData;
Kind = MemberPointer;
MPD->MemberAndIsDerivedMember.setPointer(Member);
diff --git a/lib/AST/ASTConsumer.cpp b/lib/AST/ASTConsumer.cpp
index 55033b238c66..6bba8f1f80a9 100644
--- a/lib/AST/ASTConsumer.cpp
+++ b/lib/AST/ASTConsumer.cpp
@@ -1,9 +1,8 @@
//===--- ASTConsumer.cpp - Abstract interface for reading ASTs --*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/AST/ASTContext.cpp b/lib/AST/ASTContext.cpp
index 21b6f36e9aa7..0d69eb90abaf 100644
--- a/lib/AST/ASTContext.cpp
+++ b/lib/AST/ASTContext.cpp
@@ -1,9 +1,8 @@
//===- ASTContext.cpp - Context to hold long-lived AST nodes --------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -95,38 +94,18 @@
using namespace clang;
-unsigned ASTContext::NumImplicitDefaultConstructors;
-unsigned ASTContext::NumImplicitDefaultConstructorsDeclared;
-unsigned ASTContext::NumImplicitCopyConstructors;
-unsigned ASTContext::NumImplicitCopyConstructorsDeclared;
-unsigned ASTContext::NumImplicitMoveConstructors;
-unsigned ASTContext::NumImplicitMoveConstructorsDeclared;
-unsigned ASTContext::NumImplicitCopyAssignmentOperators;
-unsigned ASTContext::NumImplicitCopyAssignmentOperatorsDeclared;
-unsigned ASTContext::NumImplicitMoveAssignmentOperators;
-unsigned ASTContext::NumImplicitMoveAssignmentOperatorsDeclared;
-unsigned ASTContext::NumImplicitDestructors;
-unsigned ASTContext::NumImplicitDestructorsDeclared;
-
enum FloatingRank {
Float16Rank, HalfRank, FloatRank, DoubleRank, LongDoubleRank, Float128Rank
};
RawComment *ASTContext::getRawCommentForDeclNoCache(const Decl *D) const {
- if (!CommentsLoaded && ExternalSource) {
- ExternalSource->ReadComments();
-
-#ifndef NDEBUG
- ArrayRef<RawComment *> RawComments = Comments.getComments();
- assert(std::is_sorted(RawComments.begin(), RawComments.end(),
- BeforeThanCompare<RawComment>(SourceMgr)));
-#endif
-
- CommentsLoaded = true;
- }
-
assert(D);
+ // If we already tried to load comments but there are none,
+ // we won't find anything.
+ if (CommentsLoaded && Comments.getComments().empty())
+ return nullptr;
+
// User can not attach documentation to implicit declarations.
if (D->isImplicit())
return nullptr;
@@ -176,12 +155,6 @@ RawComment *ASTContext::getRawCommentForDeclNoCache(const Decl *D) const {
isa<TemplateTemplateParmDecl>(D))
return nullptr;
- ArrayRef<RawComment *> RawComments = Comments.getComments();
-
- // If there are no comments anywhere, we won't find anything.
- if (RawComments.empty())
- return nullptr;
-
// Find declaration location.
// For Objective-C declarations we generally don't expect to have multiple
// declarators, thus use declaration starting location as the "declaration
@@ -220,6 +193,23 @@ RawComment *ASTContext::getRawCommentForDeclNoCache(const Decl *D) const {
if (DeclLoc.isInvalid() || !DeclLoc.isFileID())
return nullptr;
+ if (!CommentsLoaded && ExternalSource) {
+ ExternalSource->ReadComments();
+
+#ifndef NDEBUG
+ ArrayRef<RawComment *> RawComments = Comments.getComments();
+ assert(std::is_sorted(RawComments.begin(), RawComments.end(),
+ BeforeThanCompare<RawComment>(SourceMgr)));
+#endif
+
+ CommentsLoaded = true;
+ }
+
+ ArrayRef<RawComment *> RawComments = Comments.getComments();
+ // If there are no comments anywhere, we won't find anything.
+ if (RawComments.empty())
+ return nullptr;
+
// Find the comment that occurs just after this declaration.
ArrayRef<RawComment *>::iterator Comment;
{
@@ -238,12 +228,11 @@ RawComment *ASTContext::getRawCommentForDeclNoCache(const Decl *D) const {
if (Found) {
Comment = MaybeBeforeDecl + 1;
- assert(Comment == std::lower_bound(RawComments.begin(), RawComments.end(),
- &CommentAtDeclLoc, Compare));
+ assert(Comment ==
+ llvm::lower_bound(RawComments, &CommentAtDeclLoc, Compare));
} else {
// Slow path.
- Comment = std::lower_bound(RawComments.begin(), RawComments.end(),
- &CommentAtDeclLoc, Compare);
+ Comment = llvm::lower_bound(RawComments, &CommentAtDeclLoc, Compare);
}
}
@@ -265,6 +254,7 @@ RawComment *ASTContext::getRawCommentForDeclNoCache(const Decl *D) const {
SourceMgr.getLineNumber(DeclLocDecomp.first, DeclLocDecomp.second)
== SourceMgr.getLineNumber(CommentBeginDecomp.first,
CommentBeginDecomp.second)) {
+ (**Comment).setAttached();
return *Comment;
}
}
@@ -306,6 +296,7 @@ RawComment *ASTContext::getRawCommentForDeclNoCache(const Decl *D) const {
if (Text.find_first_of(";{}#@") != StringRef::npos)
return nullptr;
+ (**Comment).setAttached();
return *Comment;
}
@@ -835,6 +826,9 @@ ASTContext::~ASTContext() {
for (const auto &Value : ModuleInitializers)
Value.second->~PerModuleInitializers();
+
+ for (APValue *Value : APValueCleanups)
+ Value->~APValue();
}
class ASTContext::ParentMap {
@@ -913,7 +907,7 @@ void ASTContext::setTraversalScope(const std::vector<Decl *> &TopLevelDecls) {
Parents.reset();
}
-void ASTContext::AddDeallocation(void (*Callback)(void*), void *Data) {
+void ASTContext::AddDeallocation(void (*Callback)(void *), void *Data) const {
Deallocations.push_back({Callback, Data});
}
@@ -1391,24 +1385,6 @@ ASTContext::setTemplateOrSpecializationInfo(VarDecl *Inst,
TemplateOrInstantiation[Inst] = TSI;
}
-FunctionDecl *ASTContext::getClassScopeSpecializationPattern(
- const FunctionDecl *FD){
- assert(FD && "Specialization is 0");
- llvm::DenseMap<const FunctionDecl*, FunctionDecl *>::const_iterator Pos
- = ClassScopeSpecializationPattern.find(FD);
- if (Pos == ClassScopeSpecializationPattern.end())
- return nullptr;
-
- return Pos->second;
-}
-
-void ASTContext::setClassScopeSpecializationPattern(FunctionDecl *FD,
- FunctionDecl *Pattern) {
- assert(FD && "Specialization is 0");
- assert(Pattern && "Class scope specialization pattern is 0");
- ClassScopeSpecializationPattern[FD] = Pattern;
-}
-
NamedDecl *
ASTContext::getInstantiatedFromUsingDecl(NamedDecl *UUD) {
auto Pos = InstantiatedFromUsingDecl.find(UUD);
@@ -1548,8 +1524,14 @@ const llvm::fltSemantics &ASTContext::getFloatTypeSemantics(QualType T) const {
return Target->getHalfFormat();
case BuiltinType::Float: return Target->getFloatFormat();
case BuiltinType::Double: return Target->getDoubleFormat();
- case BuiltinType::LongDouble: return Target->getLongDoubleFormat();
- case BuiltinType::Float128: return Target->getFloat128Format();
+ case BuiltinType::LongDouble:
+ if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice)
+ return AuxTarget->getLongDoubleFormat();
+ return Target->getLongDoubleFormat();
+ case BuiltinType::Float128:
+ if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice)
+ return AuxTarget->getFloat128Format();
+ return Target->getFloat128Format();
}
}
@@ -1610,8 +1592,10 @@ CharUnits ASTContext::getDeclAlign(const Decl *D, bool ForAlignof) const {
if (BaseT.getQualifiers().hasUnaligned())
Align = Target->getCharWidth();
if (const auto *VD = dyn_cast<VarDecl>(D)) {
- if (VD->hasGlobalStorage() && !ForAlignof)
- Align = std::max(Align, getTargetInfo().getMinGlobalAlign());
+ if (VD->hasGlobalStorage() && !ForAlignof) {
+ uint64_t TypeSize = getTypeSize(T.getTypePtr());
+ Align = std::max(Align, getTargetInfo().getMinGlobalAlign(TypeSize));
+ }
}
}
@@ -1916,8 +1900,16 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
break;
case BuiltinType::Float16:
case BuiltinType::Half:
- Width = Target->getHalfWidth();
- Align = Target->getHalfAlign();
+ if (Target->hasFloat16Type() || !getLangOpts().OpenMP ||
+ !getLangOpts().OpenMPIsDevice) {
+ Width = Target->getHalfWidth();
+ Align = Target->getHalfAlign();
+ } else {
+ assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice &&
+ "Expected OpenMP device compilation.");
+ Width = AuxTarget->getHalfWidth();
+ Align = AuxTarget->getHalfAlign();
+ }
break;
case BuiltinType::Float:
Width = Target->getFloatWidth();
@@ -1928,12 +1920,27 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
Align = Target->getDoubleAlign();
break;
case BuiltinType::LongDouble:
- Width = Target->getLongDoubleWidth();
- Align = Target->getLongDoubleAlign();
+ if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice &&
+ (Target->getLongDoubleWidth() != AuxTarget->getLongDoubleWidth() ||
+ Target->getLongDoubleAlign() != AuxTarget->getLongDoubleAlign())) {
+ Width = AuxTarget->getLongDoubleWidth();
+ Align = AuxTarget->getLongDoubleAlign();
+ } else {
+ Width = Target->getLongDoubleWidth();
+ Align = Target->getLongDoubleAlign();
+ }
break;
case BuiltinType::Float128:
- Width = Target->getFloat128Width();
- Align = Target->getFloat128Align();
+ if (Target->hasFloat128Type() || !getLangOpts().OpenMP ||
+ !getLangOpts().OpenMPIsDevice) {
+ Width = Target->getFloat128Width();
+ Align = Target->getFloat128Align();
+ } else {
+ assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice &&
+ "Expected OpenMP device compilation.");
+ Width = AuxTarget->getFloat128Width();
+ Align = AuxTarget->getFloat128Align();
+ }
break;
case BuiltinType::NullPtr:
Width = Target->getPointerWidth(0); // C++ 3.9.1p11: sizeof(nullptr_t)
@@ -2057,6 +2064,10 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
case Type::Paren:
return getTypeInfo(cast<ParenType>(T)->getInnerType().getTypePtr());
+ case Type::MacroQualified:
+ return getTypeInfo(
+ cast<MacroQualifiedType>(T)->getUnderlyingType().getTypePtr());
+
case Type::ObjCTypeParam:
return getTypeInfo(cast<ObjCTypeParamType>(T)->desugar().getTypePtr());
@@ -2134,7 +2145,7 @@ unsigned ASTContext::getTypeUnadjustedAlign(const Type *T) const {
const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl());
UnadjustedAlign = toBits(Layout.getUnadjustedAlignment());
} else {
- UnadjustedAlign = getTypeAlign(T);
+ UnadjustedAlign = getTypeAlign(T->getUnqualifiedDesugaredType());
}
MemoizedUnadjustedAlign[T] = UnadjustedAlign;
@@ -2233,7 +2244,8 @@ unsigned ASTContext::getTargetDefaultAlignForAttributeAligned() const {
/// getAlignOfGlobalVar - Return the alignment in bits that should be given
/// to a global variable of the specified type.
unsigned ASTContext::getAlignOfGlobalVar(QualType T) const {
- return std::max(getTypeAlign(T), getTargetInfo().getMinGlobalAlign());
+ uint64_t TypeSize = getTypeSize(T.getTypePtr());
+ return std::max(getTypeAlign(T), getTargetInfo().getMinGlobalAlign(TypeSize));
}
/// getAlignOfGlobalVarInChars - Return the alignment in characters that
@@ -2574,7 +2586,7 @@ ASTContext::getBlockVarCopyInit(const VarDecl*VD) const {
return {nullptr, false};
}
-/// Set the copy inialization expression of a block var decl.
+/// Set the copy initialization expression of a block var decl.
void ASTContext::setBlockVarCopyInit(const VarDecl*VD, Expr *CopyExpr,
bool CanThrow) {
assert(VD && CopyExpr && "Passed null params");
@@ -2763,6 +2775,12 @@ QualType ASTContext::getFunctionTypeWithExceptionSpec(
return getParenType(
getFunctionTypeWithExceptionSpec(PT->getInnerType(), ESI));
+ // Might be wrapped in a macro qualified type.
+ if (const auto *MQT = dyn_cast<MacroQualifiedType>(Orig))
+ return getMacroQualifiedType(
+ getFunctionTypeWithExceptionSpec(MQT->getUnderlyingType(), ESI),
+ MQT->getMacroIdentifier());
+
// Might have a calling-convention attribute.
if (const auto *AT = dyn_cast<AttributedType>(Orig))
return getAttributedType(
@@ -2772,7 +2790,7 @@ QualType ASTContext::getFunctionTypeWithExceptionSpec(
// Anything else must be a function type. Rebuild it with the new exception
// specification.
- const auto *Proto = cast<FunctionProtoType>(Orig);
+ const auto *Proto = Orig->getAs<FunctionProtoType>();
return getFunctionType(
Proto->getReturnType(), Proto->getParamTypes(),
Proto->getExtProtoInfo().withExceptionSpec(ESI));
@@ -3739,7 +3757,10 @@ QualType ASTContext::getFunctionTypeInternal(
break;
}
- case EST_DynamicNone: case EST_BasicNoexcept: case EST_NoexceptTrue:
+ case EST_DynamicNone:
+ case EST_BasicNoexcept:
+ case EST_NoexceptTrue:
+ case EST_NoThrow:
CanonicalEPI.ExceptionSpec.Type = EST_BasicNoexcept;
break;
@@ -3938,7 +3959,7 @@ QualType ASTContext::getAttributedType(attr::Kind attrKind,
QualType canon = getCanonicalType(equivalentType);
type = new (*this, TypeAlignment)
- AttributedType(canon, attrKind, modifiedType, equivalentType);
+ AttributedType(canon, attrKind, modifiedType, equivalentType);
Types.push_back(type);
AttributedTypes.InsertNode(type, insertPos);
@@ -4219,6 +4240,19 @@ ASTContext::getParenType(QualType InnerType) const {
return QualType(T, 0);
}
+QualType
+ASTContext::getMacroQualifiedType(QualType UnderlyingTy,
+ const IdentifierInfo *MacroII) const {
+ QualType Canon = UnderlyingTy;
+ if (!Canon.isCanonical())
+ Canon = getCanonicalType(UnderlyingTy);
+
+ auto *newType = new (*this, TypeAlignment)
+ MacroQualifiedType(UnderlyingTy, Canon, MacroII);
+ Types.push_back(newType);
+ return QualType(newType, 0);
+}
+
QualType ASTContext::getDependentNameType(ElaboratedTypeKeyword Keyword,
NestedNameSpecifier *NNS,
const IdentifierInfo *Name,
@@ -4356,7 +4390,13 @@ QualType ASTContext::getPackExpansionType(QualType Pattern,
llvm::FoldingSetNodeID ID;
PackExpansionType::Profile(ID, Pattern, NumExpansions);
- assert(Pattern->containsUnexpandedParameterPack() &&
+ // A deduced type can deduce to a pack, eg
+ // auto ...x = some_pack;
+ // That declaration isn't (yet) valid, but is created as part of building an
+ // init-capture pack:
+ // [...x = some_pack] {}
+ assert((Pattern->containsUnexpandedParameterPack() ||
+ Pattern->getContainedDeducedType()) &&
"Pack expansions must expand one or more parameter packs");
void *InsertPos = nullptr;
PackExpansionType *T
@@ -4856,19 +4896,20 @@ QualType ASTContext::getUnaryTransformType(QualType BaseType,
/// deduced to the given type, or to the canonical undeduced 'auto' type, or the
/// canonical deduced-but-dependent 'auto' type.
QualType ASTContext::getAutoType(QualType DeducedType, AutoTypeKeyword Keyword,
- bool IsDependent) const {
+ bool IsDependent, bool IsPack) const {
+ assert((!IsPack || IsDependent) && "only use IsPack for a dependent pack");
if (DeducedType.isNull() && Keyword == AutoTypeKeyword::Auto && !IsDependent)
return getAutoDeductType();
// Look in the folding set for an existing type.
void *InsertPos = nullptr;
llvm::FoldingSetNodeID ID;
- AutoType::Profile(ID, DeducedType, Keyword, IsDependent);
+ AutoType::Profile(ID, DeducedType, Keyword, IsDependent, IsPack);
if (AutoType *AT = AutoTypes.FindNodeOrInsertPos(ID, InsertPos))
return QualType(AT, 0);
auto *AT = new (*this, TypeAlignment)
- AutoType(DeducedType, Keyword, IsDependent);
+ AutoType(DeducedType, Keyword, IsDependent, IsPack);
Types.push_back(AT);
if (InsertPos)
AutoTypes.InsertNode(AT, InsertPos);
@@ -4930,7 +4971,7 @@ QualType ASTContext::getAutoDeductType() const {
if (AutoDeductTy.isNull())
AutoDeductTy = QualType(
new (*this, TypeAlignment) AutoType(QualType(), AutoTypeKeyword::Auto,
- /*dependent*/false),
+ /*dependent*/false, /*pack*/false),
0);
return AutoDeductTy;
}
@@ -5218,6 +5259,11 @@ ASTContext::getNameForTemplate(TemplateName Name,
return DeclarationNameInfo((*Storage->begin())->getDeclName(), NameLoc);
}
+ case TemplateName::AssumedTemplate: {
+ AssumedTemplateStorage *Storage = Name.getAsAssumedTemplateName();
+ return DeclarationNameInfo(Storage->getDeclName(), NameLoc);
+ }
+
case TemplateName::DependentTemplate: {
DependentTemplateName *DTN = Name.getAsDependentTemplateName();
DeclarationName DName;
@@ -5265,7 +5311,8 @@ TemplateName ASTContext::getCanonicalTemplateName(TemplateName Name) const {
}
case TemplateName::OverloadedTemplate:
- llvm_unreachable("cannot canonicalize overloaded template");
+ case TemplateName::AssumedTemplate:
+ llvm_unreachable("cannot canonicalize unresolved template");
case TemplateName::DependentTemplate: {
DependentTemplateName *DTN = Name.getAsDependentTemplateName();
@@ -5609,6 +5656,12 @@ int ASTContext::getFloatingTypeOrder(QualType LHS, QualType RHS) const {
return -1;
}
+int ASTContext::getFloatingTypeSemanticOrder(QualType LHS, QualType RHS) const {
+ if (&getFloatTypeSemantics(LHS) == &getFloatTypeSemantics(RHS))
+ return 0;
+ return getFloatingTypeOrder(LHS, RHS);
+}
+
/// getIntegerRank - Return an integer conversion rank (C99 6.3.1.1p1). This
/// routine will assert if passed a built-in type that isn't an integer or enum,
/// or if it is not canonicalized.
@@ -6283,12 +6336,13 @@ void ASTContext::getObjCEncodingForMethodParameter(Decl::ObjCDeclQualifier QT,
// Encode type qualifer, 'in', 'inout', etc. for the parameter.
getObjCEncodingForTypeQualifier(QT, S);
// Encode parameter type.
- getObjCEncodingForTypeImpl(T, S, true, true, nullptr,
- true /*OutermostType*/,
- false /*EncodingProperty*/,
- false /*StructField*/,
- Extended /*EncodeBlockParameters*/,
- Extended /*EncodeClassNames*/);
+ ObjCEncOptions Options = ObjCEncOptions()
+ .setExpandPointedToStructures()
+ .setExpandStructures()
+ .setIsOutermostType();
+ if (Extended)
+ Options.setEncodeBlockParameters().setEncodeClassNames();
+ getObjCEncodingForTypeImpl(T, S, Options, /*Field=*/nullptr);
}
/// getObjCEncodingForMethodDecl - Return the encoded type for this method
@@ -6480,9 +6534,12 @@ void ASTContext::getObjCEncodingForType(QualType T, std::string& S,
// directly pointed to, and expanding embedded structures. Note that
// these rules are sufficient to prevent recursive encoding of the
// same type.
- getObjCEncodingForTypeImpl(T, S, true, true, Field,
- true /* outermost type */, false, false,
- false, false, false, NotEncodedT);
+ getObjCEncodingForTypeImpl(T, S,
+ ObjCEncOptions()
+ .setExpandPointedToStructures()
+ .setExpandStructures()
+ .setIsOutermostType(),
+ Field, NotEncodedT);
}
void ASTContext::getObjCEncodingForPropertyType(QualType T,
@@ -6490,9 +6547,13 @@ void ASTContext::getObjCEncodingForPropertyType(QualType T,
// Encode result type.
// GCC has some special rules regarding encoding of properties which
// closely resembles encoding of ivars.
- getObjCEncodingForTypeImpl(T, S, true, true, nullptr,
- true /* outermost type */,
- true /* encoding property */);
+ getObjCEncodingForTypeImpl(T, S,
+ ObjCEncOptions()
+ .setExpandPointedToStructures()
+ .setExpandStructures()
+ .setIsOutermostType()
+ .setEncodingProperty(),
+ /*Field=*/nullptr);
}
static char getObjCEncodingForPrimitiveKind(const ASTContext *C,
@@ -6639,16 +6700,9 @@ static void EncodeBitField(const ASTContext *Ctx, std::string& S,
}
// FIXME: Use SmallString for accumulating string.
-void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S,
- bool ExpandPointedToStructures,
- bool ExpandStructures,
+void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string &S,
+ const ObjCEncOptions Options,
const FieldDecl *FD,
- bool OutermostType,
- bool EncodingProperty,
- bool StructField,
- bool EncodeBlockParameters,
- bool EncodeClassNames,
- bool EncodePointerToObjCTypedef,
QualType *NotEncodedT) const {
CanQualType CT = getCanonicalType(T);
switch (CT->getTypeClass()) {
@@ -6665,14 +6719,16 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S,
case Type::Complex: {
const auto *CT = T->castAs<ComplexType>();
S += 'j';
- getObjCEncodingForTypeImpl(CT->getElementType(), S, false, false, nullptr);
+ getObjCEncodingForTypeImpl(CT->getElementType(), S, ObjCEncOptions(),
+ /*Field=*/nullptr);
return;
}
case Type::Atomic: {
const auto *AT = T->castAs<AtomicType>();
S += 'A';
- getObjCEncodingForTypeImpl(AT->getValueType(), S, false, false, nullptr);
+ getObjCEncodingForTypeImpl(AT->getValueType(), S, ObjCEncOptions(),
+ /*Field=*/nullptr);
return;
}
@@ -6698,11 +6754,11 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S,
// the pointer itself gets ignored, _unless_ we are looking at a typedef!
// Also, do not emit the 'r' for anything but the outermost type!
if (isa<TypedefType>(T.getTypePtr())) {
- if (OutermostType && T.isConstQualified()) {
+ if (Options.IsOutermostType() && T.isConstQualified()) {
isReadOnly = true;
S += 'r';
}
- } else if (OutermostType) {
+ } else if (Options.IsOutermostType()) {
QualType P = PointeeTy;
while (P->getAs<PointerType>())
P = P->getAs<PointerType>()->getPointeeType();
@@ -6742,9 +6798,11 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S,
S += '^';
getLegacyIntegralTypeEncoding(PointeeTy);
- getObjCEncodingForTypeImpl(PointeeTy, S, false, ExpandPointedToStructures,
- nullptr, false, false, false, false, false, false,
- NotEncodedT);
+ ObjCEncOptions NewOptions;
+ if (Options.ExpandPointedToStructures())
+ NewOptions.setExpandStructures();
+ getObjCEncodingForTypeImpl(PointeeTy, S, NewOptions,
+ /*Field=*/nullptr, NotEncodedT);
return;
}
@@ -6753,12 +6811,13 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S,
case Type::VariableArray: {
const auto *AT = cast<ArrayType>(CT);
- if (isa<IncompleteArrayType>(AT) && !StructField) {
+ if (isa<IncompleteArrayType>(AT) && !Options.IsStructField()) {
// Incomplete arrays are encoded as a pointer to the array element.
S += '^';
- getObjCEncodingForTypeImpl(AT->getElementType(), S,
- false, ExpandStructures, FD);
+ getObjCEncodingForTypeImpl(
+ AT->getElementType(), S,
+ Options.keepingOnly(ObjCEncOptions().setExpandStructures()), FD);
} else {
S += '[';
@@ -6771,10 +6830,10 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S,
S += '0';
}
- getObjCEncodingForTypeImpl(AT->getElementType(), S,
- false, ExpandStructures, FD,
- false, false, false, false, false, false,
- NotEncodedT);
+ getObjCEncodingForTypeImpl(
+ AT->getElementType(), S,
+ Options.keepingOnly(ObjCEncOptions().setExpandStructures()), FD,
+ NotEncodedT);
S += ']';
}
return;
@@ -6800,7 +6859,7 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S,
} else {
S += '?';
}
- if (ExpandStructures) {
+ if (Options.ExpandStructures()) {
S += '=';
if (!RDecl->isUnion()) {
getObjCEncodingForStructureImpl(RDecl, S, FD, true, NotEncodedT);
@@ -6814,16 +6873,16 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S,
// Special case bit-fields.
if (Field->isBitField()) {
- getObjCEncodingForTypeImpl(Field->getType(), S, false, true,
+ getObjCEncodingForTypeImpl(Field->getType(), S,
+ ObjCEncOptions().setExpandStructures(),
Field);
} else {
QualType qt = Field->getType();
getLegacyIntegralTypeEncoding(qt);
- getObjCEncodingForTypeImpl(qt, S, false, true,
- FD, /*OutermostType*/false,
- /*EncodingProperty*/false,
- /*StructField*/true,
- false, false, false, NotEncodedT);
+ getObjCEncodingForTypeImpl(
+ qt, S,
+ ObjCEncOptions().setExpandStructures().setIsStructField(), FD,
+ NotEncodedT);
}
}
}
@@ -6835,26 +6894,20 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S,
case Type::BlockPointer: {
const auto *BT = T->castAs<BlockPointerType>();
S += "@?"; // Unlike a pointer-to-function, which is "^?".
- if (EncodeBlockParameters) {
+ if (Options.EncodeBlockParameters()) {
const auto *FT = BT->getPointeeType()->castAs<FunctionType>();
S += '<';
// Block return type
- getObjCEncodingForTypeImpl(
- FT->getReturnType(), S, ExpandPointedToStructures, ExpandStructures,
- FD, false /* OutermostType */, EncodingProperty,
- false /* StructField */, EncodeBlockParameters, EncodeClassNames, false,
- NotEncodedT);
+ getObjCEncodingForTypeImpl(FT->getReturnType(), S,
+ Options.forComponentType(), FD, NotEncodedT);
// Block self
S += "@?";
// Block parameters
if (const auto *FPT = dyn_cast<FunctionProtoType>(FT)) {
for (const auto &I : FPT->param_types())
- getObjCEncodingForTypeImpl(
- I, S, ExpandPointedToStructures, ExpandStructures, FD,
- false /* OutermostType */, EncodingProperty,
- false /* StructField */, EncodeBlockParameters, EncodeClassNames,
- false, NotEncodedT);
+ getObjCEncodingForTypeImpl(I, S, Options.forComponentType(), FD,
+ NotEncodedT);
}
S += '>';
}
@@ -6882,18 +6935,19 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S,
ObjCInterfaceDecl *OI = T->castAs<ObjCObjectType>()->getInterface();
S += '{';
S += OI->getObjCRuntimeNameAsString();
- if (ExpandStructures) {
+ if (Options.ExpandStructures()) {
S += '=';
SmallVector<const ObjCIvarDecl*, 32> Ivars;
DeepCollectObjCIvars(OI, true, Ivars);
for (unsigned i = 0, e = Ivars.size(); i != e; ++i) {
const FieldDecl *Field = Ivars[i];
if (Field->isBitField())
- getObjCEncodingForTypeImpl(Field->getType(), S, false, true, Field);
+ getObjCEncodingForTypeImpl(Field->getType(), S,
+ ObjCEncOptions().setExpandStructures(),
+ Field);
else
- getObjCEncodingForTypeImpl(Field->getType(), S, false, true, FD,
- false, false, false, false, false,
- EncodePointerToObjCTypedef,
+ getObjCEncodingForTypeImpl(Field->getType(), S,
+ ObjCEncOptions().setExpandStructures(), FD,
NotEncodedT);
}
}
@@ -6910,17 +6964,20 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S,
if (OPT->isObjCClassType() || OPT->isObjCQualifiedClassType()) {
// FIXME: Consider if we need to output qualifiers for 'Class<p>'.
- // Since this is a binary compatibility issue, need to consult with runtime
- // folks. Fortunately, this is a *very* obscure construct.
+ // Since this is a binary compatibility issue, need to consult with
+ // runtime folks. Fortunately, this is a *very* obscure construct.
S += '#';
return;
}
if (OPT->isObjCQualifiedIdType()) {
- getObjCEncodingForTypeImpl(getObjCIdType(), S,
- ExpandPointedToStructures,
- ExpandStructures, FD);
- if (FD || EncodingProperty || EncodeClassNames) {
+ getObjCEncodingForTypeImpl(
+ getObjCIdType(), S,
+ Options.keepingOnly(ObjCEncOptions()
+ .setExpandPointedToStructures()
+ .setExpandStructures()),
+ FD);
+ if (FD || Options.EncodingProperty() || Options.EncodeClassNames()) {
// Note that we do extended encoding of protocol qualifer list
// Only when doing ivar or property encoding.
S += '"';
@@ -6934,39 +6991,9 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S,
return;
}
- QualType PointeeTy = OPT->getPointeeType();
- if (!EncodingProperty &&
- isa<TypedefType>(PointeeTy.getTypePtr()) &&
- !EncodePointerToObjCTypedef) {
- // Another historical/compatibility reason.
- // We encode the underlying type which comes out as
- // {...};
- S += '^';
- if (FD && OPT->getInterfaceDecl()) {
- // Prevent recursive encoding of fields in some rare cases.
- ObjCInterfaceDecl *OI = OPT->getInterfaceDecl();
- SmallVector<const ObjCIvarDecl*, 32> Ivars;
- DeepCollectObjCIvars(OI, true, Ivars);
- for (unsigned i = 0, e = Ivars.size(); i != e; ++i) {
- if (Ivars[i] == FD) {
- S += '{';
- S += OI->getObjCRuntimeNameAsString();
- S += '}';
- return;
- }
- }
- }
- getObjCEncodingForTypeImpl(PointeeTy, S,
- false, ExpandPointedToStructures,
- nullptr,
- false, false, false, false, false,
- /*EncodePointerToObjCTypedef*/true);
- return;
- }
-
S += '@';
if (OPT->getInterfaceDecl() &&
- (FD || EncodingProperty || EncodeClassNames)) {
+ (FD || Options.EncodingProperty() || Options.EncodeClassNames())) {
S += '"';
S += OPT->getInterfaceDecl()->getObjCRuntimeNameAsString();
for (const auto *I : OPT->quals()) {
@@ -6980,7 +7007,7 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S,
}
// gcc just blithely ignores member pointers.
- // FIXME: we shoul do better than that. 'M' is available.
+ // FIXME: we should do better than that. 'M' is available.
case Type::MemberPointer:
// This matches gcc's encoding, even though technically it is insufficient.
//FIXME. We should do a better job than gcc.
@@ -7142,11 +7169,9 @@ void ASTContext::getObjCEncodingForStructureImpl(RecordDecl *RDecl,
} else {
QualType qt = field->getType();
getLegacyIntegralTypeEncoding(qt);
- getObjCEncodingForTypeImpl(qt, S, false, true, FD,
- /*OutermostType*/false,
- /*EncodingProperty*/false,
- /*StructField*/true,
- false, false, false, NotEncodedT);
+ getObjCEncodingForTypeImpl(
+ qt, S, ObjCEncOptions().setExpandStructures().setIsStructField(),
+ FD, NotEncodedT);
#ifndef NDEBUG
CurOffs += getTypeSize(field->getType());
#endif
@@ -7606,6 +7631,13 @@ ASTContext::getOverloadedTemplateName(UnresolvedSetIterator Begin,
return TemplateName(OT);
}
+/// Retrieve a template name representing an unqualified-id that has been
+/// assumed to name a template for ADL purposes.
+TemplateName ASTContext::getAssumedTemplateName(DeclarationName Name) const {
+ auto *OT = new (*this) AssumedTemplateStorage(Name);
+ return TemplateName(OT);
+}
+
/// Retrieve the template name that represents a qualified
/// template name such as \c std::vector.
TemplateName
@@ -8581,7 +8613,7 @@ QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs,
if (lproto->isVariadic() != rproto->isVariadic())
return {};
- if (lproto->getTypeQuals() != rproto->getTypeQuals())
+ if (lproto->getMethodQuals() != rproto->getMethodQuals())
return {};
SmallVector<FunctionProtoType::ExtParameterInfo, 4> newParamInfos;
@@ -9246,7 +9278,7 @@ static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context,
// Read the prefixed modifiers first.
bool Done = false;
#ifndef NDEBUG
- bool IsSpecialLong = false;
+ bool IsSpecial = false;
#endif
while (!Done) {
switch (*Str++) {
@@ -9265,26 +9297,26 @@ static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context,
Unsigned = true;
break;
case 'L':
- assert(!IsSpecialLong && "Can't use 'L' with 'W' or 'N' modifiers");
+ assert(!IsSpecial && "Can't use 'L' with 'W', 'N', 'Z' or 'O' modifiers");
assert(HowLong <= 2 && "Can't have LLLL modifier");
++HowLong;
break;
case 'N':
// 'N' behaves like 'L' for all non LP64 targets and 'int' otherwise.
- assert(!IsSpecialLong && "Can't use two 'N' or 'W' modifiers!");
+ assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!");
assert(HowLong == 0 && "Can't use both 'L' and 'N' modifiers!");
#ifndef NDEBUG
- IsSpecialLong = true;
+ IsSpecial = true;
#endif
if (Context.getTargetInfo().getLongWidth() == 32)
++HowLong;
break;
case 'W':
// This modifier represents int64 type.
- assert(!IsSpecialLong && "Can't use two 'N' or 'W' modifiers!");
+ assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!");
assert(HowLong == 0 && "Can't use both 'L' and 'W' modifiers!");
#ifndef NDEBUG
- IsSpecialLong = true;
+ IsSpecial = true;
#endif
switch (Context.getTargetInfo().getInt64Type()) {
default:
@@ -9297,6 +9329,38 @@ static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context,
break;
}
break;
+ case 'Z':
+ // This modifier represents int32 type.
+ assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!");
+ assert(HowLong == 0 && "Can't use both 'L' and 'Z' modifiers!");
+ #ifndef NDEBUG
+ IsSpecial = true;
+ #endif
+ switch (Context.getTargetInfo().getIntTypeByWidth(32, true)) {
+ default:
+ llvm_unreachable("Unexpected integer type");
+ case TargetInfo::SignedInt:
+ HowLong = 0;
+ break;
+ case TargetInfo::SignedLong:
+ HowLong = 1;
+ break;
+ case TargetInfo::SignedLongLong:
+ HowLong = 2;
+ break;
+ }
+ break;
+ case 'O':
+ assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!");
+ assert(HowLong == 0 && "Can't use both 'L' and 'O' modifiers!");
+ #ifndef NDEBUG
+ IsSpecial = true;
+ #endif
+ if (Context.getLangOpts().OpenCL)
+ HowLong = 1;
+ else
+ HowLong = 2;
+ break;
}
}
@@ -9518,6 +9582,10 @@ QualType ASTContext::GetBuiltinType(unsigned Id,
GetBuiltinTypeError &Error,
unsigned *IntegerConstantArgs) const {
const char *TypeStr = BuiltinInfo.getTypeString(Id);
+ if (TypeStr[0] == '\0') {
+ Error = GE_Missing_type;
+ return {};
+ }
SmallVector<QualType, 8> ArgTypes;
@@ -9553,10 +9621,12 @@ QualType ASTContext::GetBuiltinType(unsigned Id,
assert((TypeStr[0] != '.' || TypeStr[1] == 0) &&
"'.' should only occur at end of builtin type list!");
- FunctionType::ExtInfo EI(CC_C);
+ bool Variadic = (TypeStr[0] == '.');
+
+ FunctionType::ExtInfo EI(getDefaultCallingConvention(
+ Variadic, /*IsCXXMethod=*/false, /*IsBuiltin=*/true));
if (BuiltinInfo.isNoReturn(Id)) EI = EI.withNoReturn(true);
- bool Variadic = (TypeStr[0] == '.');
// We really shouldn't be making a no-proto type here.
if (ArgTypes.empty() && Variadic && !getLangOpts().CPlusPlus)
@@ -9784,12 +9854,12 @@ bool ASTContext::DeclMustBeEmitted(const Decl *D) {
return false;
} else if (isa<PragmaCommentDecl>(D))
return true;
- else if (isa<OMPThreadPrivateDecl>(D))
- return true;
else if (isa<PragmaDetectMismatchDecl>(D))
return true;
else if (isa<OMPThreadPrivateDecl>(D))
return !D->getDeclContext()->isDependentContext();
+ else if (isa<OMPAllocateDecl>(D))
+ return !D->getDeclContext()->isDependentContext();
else if (isa<OMPDeclareReductionDecl>(D))
return !D->getDeclContext()->isDependentContext();
else if (isa<ImportDecl>(D))
@@ -9931,34 +10001,39 @@ void ASTContext::forEachMultiversionedFunctionVersion(
}
CallingConv ASTContext::getDefaultCallingConvention(bool IsVariadic,
- bool IsCXXMethod) const {
+ bool IsCXXMethod,
+ bool IsBuiltin) const {
// Pass through to the C++ ABI object
if (IsCXXMethod)
return ABI->getDefaultMethodCallConv(IsVariadic);
- switch (LangOpts.getDefaultCallingConv()) {
- case LangOptions::DCC_None:
- break;
- case LangOptions::DCC_CDecl:
- return CC_C;
- case LangOptions::DCC_FastCall:
- if (getTargetInfo().hasFeature("sse2") && !IsVariadic)
- return CC_X86FastCall;
- break;
- case LangOptions::DCC_StdCall:
- if (!IsVariadic)
- return CC_X86StdCall;
- break;
- case LangOptions::DCC_VectorCall:
- // __vectorcall cannot be applied to variadic functions.
- if (!IsVariadic)
- return CC_X86VectorCall;
- break;
- case LangOptions::DCC_RegCall:
- // __regcall cannot be applied to variadic functions.
- if (!IsVariadic)
- return CC_X86RegCall;
- break;
+ // Builtins ignore user-specified default calling convention and remain the
+ // Target's default calling convention.
+ if (!IsBuiltin) {
+ switch (LangOpts.getDefaultCallingConv()) {
+ case LangOptions::DCC_None:
+ break;
+ case LangOptions::DCC_CDecl:
+ return CC_C;
+ case LangOptions::DCC_FastCall:
+ if (getTargetInfo().hasFeature("sse2") && !IsVariadic)
+ return CC_X86FastCall;
+ break;
+ case LangOptions::DCC_StdCall:
+ if (!IsVariadic)
+ return CC_X86StdCall;
+ break;
+ case LangOptions::DCC_VectorCall:
+ // __vectorcall cannot be applied to variadic functions.
+ if (!IsVariadic)
+ return CC_X86VectorCall;
+ break;
+ case LangOptions::DCC_RegCall:
+ // __regcall cannot be applied to variadic functions.
+ if (!IsVariadic)
+ return CC_X86RegCall;
+ break;
+ }
}
return Target->getDefaultCallingConv(TargetInfo::CCMT_Unknown);
}
@@ -9978,8 +10053,10 @@ VTableContextBase *ASTContext::getVTableContext() {
return VTContext.get();
}
-MangleContext *ASTContext::createMangleContext() {
- switch (Target->getCXXABI().getKind()) {
+MangleContext *ASTContext::createMangleContext(const TargetInfo *T) {
+ if (!T)
+ T = Target;
+ switch (T->getCXXABI().getKind()) {
case TargetCXXABI::GenericAArch64:
case TargetCXXABI::GenericItanium:
case TargetCXXABI::GenericARM:
@@ -10010,8 +10087,7 @@ size_t ASTContext::getSideTableAllocatedMemory() const {
llvm::capacity_in_bytes(InstantiatedFromUnnamedFieldDecl) +
llvm::capacity_in_bytes(OverriddenMethods) +
llvm::capacity_in_bytes(Types) +
- llvm::capacity_in_bytes(VariableArrayTypes) +
- llvm::capacity_in_bytes(ClassScopeSpecializationPattern);
+ llvm::capacity_in_bytes(VariableArrayTypes);
}
/// getIntTypeForBitwidth -
@@ -10140,6 +10216,31 @@ ASTContext::getMaterializedTemporaryValue(const MaterializeTemporaryExpr *E,
return MaterializedTemporaryValues.lookup(E);
}
+QualType ASTContext::getStringLiteralArrayType(QualType EltTy,
+ unsigned Length) const {
+ // A C++ string literal has a const-qualified element type (C++ 2.13.4p1).
+ if (getLangOpts().CPlusPlus || getLangOpts().ConstStrings)
+ EltTy = EltTy.withConst();
+
+ EltTy = adjustStringLiteralBaseType(EltTy);
+
+ // Get an array type for the string, according to C99 6.4.5. This includes
+ // the null terminator character.
+ return getConstantArrayType(EltTy, llvm::APInt(32, Length + 1),
+ ArrayType::Normal, /*IndexTypeQuals*/ 0);
+}
+
+StringLiteral *
+ASTContext::getPredefinedStringLiteralFromCache(StringRef Key) const {
+ StringLiteral *&Result = StringLiteralCache[Key];
+ if (!Result)
+ Result = StringLiteral::Create(
+ *this, Key, StringLiteral::Ascii,
+ /*Pascal*/ false, getStringLiteralArrayType(CharTy, Key.size()),
+ SourceLocation());
+ return Result;
+}
+
bool ASTContext::AtomicUsesUnsupportedLibcall(const AtomicExpr *E) const {
const llvm::Triple &T = getTargetInfo().getTriple();
if (!T.isOSDarwin())
@@ -10485,7 +10586,13 @@ unsigned char ASTContext::getFixedPointIBits(QualType Ty) const {
}
FixedPointSemantics ASTContext::getFixedPointSemantics(QualType Ty) const {
- assert(Ty->isFixedPointType());
+ assert((Ty->isFixedPointType() || Ty->isIntegerType()) &&
+ "Can only get the fixed point semantics for a "
+ "fixed point or integer type.");
+ if (Ty->isIntegerType())
+ return FixedPointSemantics::GetIntegerSemantics(getIntWidth(Ty),
+ Ty->isSignedIntegerType());
+
bool isSigned = Ty->isSignedFixedPointType();
return FixedPointSemantics(
static_cast<unsigned>(getTypeSize(Ty)), getFixedPointScale(Ty), isSigned,
@@ -10502,3 +10609,38 @@ APFixedPoint ASTContext::getFixedPointMin(QualType Ty) const {
assert(Ty->isFixedPointType());
return APFixedPoint::getMin(getFixedPointSemantics(Ty));
}
+
+QualType ASTContext::getCorrespondingSignedFixedPointType(QualType Ty) const {
+ assert(Ty->isUnsignedFixedPointType() &&
+ "Expected unsigned fixed point type");
+ const auto *BTy = Ty->getAs<BuiltinType>();
+
+ switch (BTy->getKind()) {
+ case BuiltinType::UShortAccum:
+ return ShortAccumTy;
+ case BuiltinType::UAccum:
+ return AccumTy;
+ case BuiltinType::ULongAccum:
+ return LongAccumTy;
+ case BuiltinType::SatUShortAccum:
+ return SatShortAccumTy;
+ case BuiltinType::SatUAccum:
+ return SatAccumTy;
+ case BuiltinType::SatULongAccum:
+ return SatLongAccumTy;
+ case BuiltinType::UShortFract:
+ return ShortFractTy;
+ case BuiltinType::UFract:
+ return FractTy;
+ case BuiltinType::ULongFract:
+ return LongFractTy;
+ case BuiltinType::SatUShortFract:
+ return SatShortFractTy;
+ case BuiltinType::SatUFract:
+ return SatFractTy;
+ case BuiltinType::SatULongFract:
+ return SatLongFractTy;
+ default:
+ llvm_unreachable("Unexpected unsigned fixed point type");
+ }
+}
diff --git a/lib/AST/ASTDiagnostic.cpp b/lib/AST/ASTDiagnostic.cpp
index dd0585558572..15df86585294 100644
--- a/lib/AST/ASTDiagnostic.cpp
+++ b/lib/AST/ASTDiagnostic.cpp
@@ -1,9 +1,8 @@
//===--- ASTDiagnostic.cpp - Diagnostic Printing Hooks for AST Nodes ------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -42,6 +41,11 @@ static QualType Desugar(ASTContext &Context, QualType QT, bool &ShouldAKA) {
QT = PT->desugar();
continue;
}
+ // ... or a macro defined type ...
+ if (const MacroQualifiedType *MDT = dyn_cast<MacroQualifiedType>(Ty)) {
+ QT = MDT->desugar();
+ continue;
+ }
// ...or a substituted template type parameter ...
if (const SubstTemplateTypeParmType *ST =
dyn_cast<SubstTemplateTypeParmType>(Ty)) {
diff --git a/lib/AST/ASTDumper.cpp b/lib/AST/ASTDumper.cpp
index b52ec21943e6..22196a1a2600 100644
--- a/lib/AST/ASTDumper.cpp
+++ b/lib/AST/ASTDumper.cpp
@@ -1,9 +1,8 @@
//===--- ASTDumper.cpp - Dumping implementation for ASTs ------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -12,21 +11,10 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/AST/ASTDumper.h"
#include "clang/AST/ASTContext.h"
-#include "clang/AST/ASTDumperUtils.h"
-#include "clang/AST/Attr.h"
-#include "clang/AST/AttrVisitor.h"
-#include "clang/AST/CommentVisitor.h"
-#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclLookups.h"
-#include "clang/AST/DeclObjC.h"
-#include "clang/AST/DeclOpenMP.h"
-#include "clang/AST/DeclVisitor.h"
-#include "clang/AST/LocInfoType.h"
-#include "clang/AST/StmtVisitor.h"
-#include "clang/AST/TemplateArgumentVisitor.h"
-#include "clang/AST/TextNodeDumper.h"
-#include "clang/AST/TypeVisitor.h"
+#include "clang/AST/JSONNodeDumper.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/SourceManager.h"
@@ -34,345 +22,8 @@
using namespace clang;
using namespace clang::comments;
-//===----------------------------------------------------------------------===//
-// ASTDumper Visitor
-//===----------------------------------------------------------------------===//
-
-namespace {
-
- class ASTDumper
- : public ConstDeclVisitor<ASTDumper>,
- public ConstStmtVisitor<ASTDumper>,
- public ConstCommentVisitor<ASTDumper, void, const FullComment *>,
- public TypeVisitor<ASTDumper>,
- public ConstAttrVisitor<ASTDumper>,
- public ConstTemplateArgumentVisitor<ASTDumper> {
-
- TextNodeDumper NodeDumper;
-
- raw_ostream &OS;
-
- /// The policy to use for printing; can be defaulted.
- PrintingPolicy PrintPolicy;
-
- /// Indicates whether we should trigger deserialization of nodes that had
- /// not already been loaded.
- bool Deserialize = false;
-
- const bool ShowColors;
-
- /// Dump a child of the current node.
- template<typename Fn> void dumpChild(Fn DoDumpChild) {
- NodeDumper.AddChild(DoDumpChild);
- }
- template <typename Fn> void dumpChild(StringRef Label, Fn DoDumpChild) {
- NodeDumper.AddChild(Label, DoDumpChild);
- }
-
- public:
- ASTDumper(raw_ostream &OS, const CommandTraits *Traits,
- const SourceManager *SM)
- : ASTDumper(OS, Traits, SM,
- SM && SM->getDiagnostics().getShowColors()) {}
-
- ASTDumper(raw_ostream &OS, const CommandTraits *Traits,
- const SourceManager *SM, bool ShowColors)
- : ASTDumper(OS, Traits, SM, ShowColors, LangOptions()) {}
- ASTDumper(raw_ostream &OS, const CommandTraits *Traits,
- const SourceManager *SM, bool ShowColors,
- const PrintingPolicy &PrintPolicy)
- : NodeDumper(OS, ShowColors, SM, PrintPolicy, Traits), OS(OS),
- PrintPolicy(PrintPolicy), ShowColors(ShowColors) {}
-
- void setDeserialize(bool D) { Deserialize = D; }
-
- void dumpDecl(const Decl *D);
- void dumpStmt(const Stmt *S, StringRef Label = {});
-
- // Utilities
- void dumpTypeAsChild(QualType T);
- void dumpTypeAsChild(const Type *T);
- void dumpDeclContext(const DeclContext *DC);
- void dumpLookups(const DeclContext *DC, bool DumpDecls);
- void dumpAttr(const Attr *A);
-
- // C++ Utilities
- void dumpCXXCtorInitializer(const CXXCtorInitializer *Init);
- void dumpTemplateParameters(const TemplateParameterList *TPL);
- void dumpTemplateArgumentListInfo(const TemplateArgumentListInfo &TALI);
- void dumpTemplateArgumentLoc(const TemplateArgumentLoc &A,
- const Decl *From = nullptr,
- const char *Label = nullptr);
- void dumpTemplateArgumentList(const TemplateArgumentList &TAL);
- void dumpTemplateArgument(const TemplateArgument &A,
- SourceRange R = SourceRange(),
- const Decl *From = nullptr,
- const char *Label = nullptr);
- template <typename SpecializationDecl>
- void dumpTemplateDeclSpecialization(const SpecializationDecl *D,
- bool DumpExplicitInst,
- bool DumpRefOnly);
- template <typename TemplateDecl>
- void dumpTemplateDecl(const TemplateDecl *D, bool DumpExplicitInst);
-
- // Objective-C utilities.
- void dumpObjCTypeParamList(const ObjCTypeParamList *typeParams);
-
- // Types
- void VisitComplexType(const ComplexType *T) {
- dumpTypeAsChild(T->getElementType());
- }
- void VisitLocInfoType(const LocInfoType *T) {
- dumpTypeAsChild(T->getTypeSourceInfo()->getType());
- }
- void VisitPointerType(const PointerType *T) {
- dumpTypeAsChild(T->getPointeeType());
- }
- void VisitBlockPointerType(const BlockPointerType *T) {
- dumpTypeAsChild(T->getPointeeType());
- }
- void VisitReferenceType(const ReferenceType *T) {
- dumpTypeAsChild(T->getPointeeType());
- }
- void VisitMemberPointerType(const MemberPointerType *T) {
- dumpTypeAsChild(T->getClass());
- dumpTypeAsChild(T->getPointeeType());
- }
- void VisitArrayType(const ArrayType *T) {
- dumpTypeAsChild(T->getElementType());
- }
- void VisitVariableArrayType(const VariableArrayType *T) {
- VisitArrayType(T);
- dumpStmt(T->getSizeExpr());
- }
- void VisitDependentSizedArrayType(const DependentSizedArrayType *T) {
- dumpTypeAsChild(T->getElementType());
- dumpStmt(T->getSizeExpr());
- }
- void VisitDependentSizedExtVectorType(
- const DependentSizedExtVectorType *T) {
- dumpTypeAsChild(T->getElementType());
- dumpStmt(T->getSizeExpr());
- }
- void VisitVectorType(const VectorType *T) {
- dumpTypeAsChild(T->getElementType());
- }
- void VisitFunctionType(const FunctionType *T) {
- dumpTypeAsChild(T->getReturnType());
- }
- void VisitFunctionProtoType(const FunctionProtoType *T) {
- VisitFunctionType(T);
- for (QualType PT : T->getParamTypes())
- dumpTypeAsChild(PT);
- if (T->getExtProtoInfo().Variadic)
- dumpChild([=] { OS << "..."; });
- }
- void VisitTypeOfExprType(const TypeOfExprType *T) {
- dumpStmt(T->getUnderlyingExpr());
- }
- void VisitDecltypeType(const DecltypeType *T) {
- dumpStmt(T->getUnderlyingExpr());
- }
- void VisitUnaryTransformType(const UnaryTransformType *T) {
- dumpTypeAsChild(T->getBaseType());
- }
- void VisitAttributedType(const AttributedType *T) {
- // FIXME: AttrKind
- dumpTypeAsChild(T->getModifiedType());
- }
- void VisitSubstTemplateTypeParmType(const SubstTemplateTypeParmType *T) {
- dumpTypeAsChild(T->getReplacedParameter());
- }
- void VisitSubstTemplateTypeParmPackType(
- const SubstTemplateTypeParmPackType *T) {
- dumpTypeAsChild(T->getReplacedParameter());
- dumpTemplateArgument(T->getArgumentPack());
- }
- void VisitTemplateSpecializationType(const TemplateSpecializationType *T) {
- for (auto &Arg : *T)
- dumpTemplateArgument(Arg);
- if (T->isTypeAlias())
- dumpTypeAsChild(T->getAliasedType());
- }
- void VisitObjCObjectPointerType(const ObjCObjectPointerType *T) {
- dumpTypeAsChild(T->getPointeeType());
- }
- void VisitAtomicType(const AtomicType *T) {
- dumpTypeAsChild(T->getValueType());
- }
- void VisitPipeType(const PipeType *T) {
- dumpTypeAsChild(T->getElementType());
- }
- void VisitAdjustedType(const AdjustedType *T) {
- dumpTypeAsChild(T->getOriginalType());
- }
- void VisitPackExpansionType(const PackExpansionType *T) {
- if (!T->isSugared())
- dumpTypeAsChild(T->getPattern());
- }
- // FIXME: ElaboratedType, DependentNameType,
- // DependentTemplateSpecializationType, ObjCObjectType
-
- // Decls
- void VisitLabelDecl(const LabelDecl *D);
- void VisitTypedefDecl(const TypedefDecl *D);
- void VisitEnumDecl(const EnumDecl *D);
- void VisitRecordDecl(const RecordDecl *D);
- void VisitEnumConstantDecl(const EnumConstantDecl *D);
- void VisitIndirectFieldDecl(const IndirectFieldDecl *D);
- void VisitFunctionDecl(const FunctionDecl *D);
- void VisitFieldDecl(const FieldDecl *D);
- void VisitVarDecl(const VarDecl *D);
- void VisitDecompositionDecl(const DecompositionDecl *D);
- void VisitBindingDecl(const BindingDecl *D);
- void VisitFileScopeAsmDecl(const FileScopeAsmDecl *D);
- void VisitImportDecl(const ImportDecl *D);
- void VisitPragmaCommentDecl(const PragmaCommentDecl *D);
- void VisitPragmaDetectMismatchDecl(const PragmaDetectMismatchDecl *D);
- void VisitCapturedDecl(const CapturedDecl *D);
-
- // OpenMP decls
- void VisitOMPThreadPrivateDecl(const OMPThreadPrivateDecl *D);
- void VisitOMPDeclareReductionDecl(const OMPDeclareReductionDecl *D);
- void VisitOMPRequiresDecl(const OMPRequiresDecl *D);
- void VisitOMPCapturedExprDecl(const OMPCapturedExprDecl *D);
-
- // C++ Decls
- void VisitNamespaceDecl(const NamespaceDecl *D);
- void VisitUsingDirectiveDecl(const UsingDirectiveDecl *D);
- void VisitNamespaceAliasDecl(const NamespaceAliasDecl *D);
- void VisitTypeAliasDecl(const TypeAliasDecl *D);
- void VisitTypeAliasTemplateDecl(const TypeAliasTemplateDecl *D);
- void VisitCXXRecordDecl(const CXXRecordDecl *D);
- void VisitStaticAssertDecl(const StaticAssertDecl *D);
- void VisitFunctionTemplateDecl(const FunctionTemplateDecl *D);
- void VisitClassTemplateDecl(const ClassTemplateDecl *D);
- void VisitClassTemplateSpecializationDecl(
- const ClassTemplateSpecializationDecl *D);
- void VisitClassTemplatePartialSpecializationDecl(
- const ClassTemplatePartialSpecializationDecl *D);
- void VisitClassScopeFunctionSpecializationDecl(
- const ClassScopeFunctionSpecializationDecl *D);
- void VisitBuiltinTemplateDecl(const BuiltinTemplateDecl *D);
- void VisitVarTemplateDecl(const VarTemplateDecl *D);
- void VisitVarTemplateSpecializationDecl(
- const VarTemplateSpecializationDecl *D);
- void VisitVarTemplatePartialSpecializationDecl(
- const VarTemplatePartialSpecializationDecl *D);
- void VisitTemplateTypeParmDecl(const TemplateTypeParmDecl *D);
- void VisitNonTypeTemplateParmDecl(const NonTypeTemplateParmDecl *D);
- void VisitTemplateTemplateParmDecl(const TemplateTemplateParmDecl *D);
- void VisitUsingDecl(const UsingDecl *D);
- void VisitUnresolvedUsingTypenameDecl(const UnresolvedUsingTypenameDecl *D);
- void VisitUnresolvedUsingValueDecl(const UnresolvedUsingValueDecl *D);
- void VisitUsingShadowDecl(const UsingShadowDecl *D);
- void VisitConstructorUsingShadowDecl(const ConstructorUsingShadowDecl *D);
- void VisitLinkageSpecDecl(const LinkageSpecDecl *D);
- void VisitAccessSpecDecl(const AccessSpecDecl *D);
- void VisitFriendDecl(const FriendDecl *D);
-
- // ObjC Decls
- void VisitObjCIvarDecl(const ObjCIvarDecl *D);
- void VisitObjCMethodDecl(const ObjCMethodDecl *D);
- void VisitObjCTypeParamDecl(const ObjCTypeParamDecl *D);
- void VisitObjCCategoryDecl(const ObjCCategoryDecl *D);
- void VisitObjCCategoryImplDecl(const ObjCCategoryImplDecl *D);
- void VisitObjCProtocolDecl(const ObjCProtocolDecl *D);
- void VisitObjCInterfaceDecl(const ObjCInterfaceDecl *D);
- void VisitObjCImplementationDecl(const ObjCImplementationDecl *D);
- void VisitObjCCompatibleAliasDecl(const ObjCCompatibleAliasDecl *D);
- void VisitObjCPropertyDecl(const ObjCPropertyDecl *D);
- void VisitObjCPropertyImplDecl(const ObjCPropertyImplDecl *D);
- void Visit(const BlockDecl::Capture &C);
- void VisitBlockDecl(const BlockDecl *D);
-
- // Stmts.
- void VisitDeclStmt(const DeclStmt *Node);
- void VisitAttributedStmt(const AttributedStmt *Node);
- void VisitCXXCatchStmt(const CXXCatchStmt *Node);
- void VisitCapturedStmt(const CapturedStmt *Node);
-
- // OpenMP
- void Visit(const OMPClause *C);
- void VisitOMPExecutableDirective(const OMPExecutableDirective *Node);
-
- // Exprs
- void VisitInitListExpr(const InitListExpr *ILE);
- void VisitBlockExpr(const BlockExpr *Node);
- void VisitOpaqueValueExpr(const OpaqueValueExpr *Node);
- void VisitGenericSelectionExpr(const GenericSelectionExpr *E);
-
- // C++
- void VisitLambdaExpr(const LambdaExpr *Node) {
- dumpDecl(Node->getLambdaClass());
- }
- void VisitSizeOfPackExpr(const SizeOfPackExpr *Node);
-
- // ObjC
- void VisitObjCAtCatchStmt(const ObjCAtCatchStmt *Node);
-
- // Comments.
- void dumpComment(const Comment *C, const FullComment *FC);
-
- void VisitExpressionTemplateArgument(const TemplateArgument &TA) {
- dumpStmt(TA.getAsExpr());
- }
- void VisitPackTemplateArgument(const TemplateArgument &TA) {
- for (const auto &TArg : TA.pack_elements())
- dumpTemplateArgument(TArg);
- }
-
-// Implements Visit methods for Attrs.
-#include "clang/AST/AttrNodeTraverse.inc"
- };
-}
-
-//===----------------------------------------------------------------------===//
-// Utilities
-//===----------------------------------------------------------------------===//
-
-void ASTDumper::dumpTypeAsChild(QualType T) {
- SplitQualType SQT = T.split();
- if (!SQT.Quals.hasQualifiers())
- return dumpTypeAsChild(SQT.Ty);
-
- dumpChild([=] {
- NodeDumper.Visit(T);
- dumpTypeAsChild(T.split().Ty);
- });
-}
-
-void ASTDumper::dumpTypeAsChild(const Type *T) {
- dumpChild([=] {
- NodeDumper.Visit(T);
- if (!T)
- return;
- TypeVisitor<ASTDumper>::Visit(T);
-
- QualType SingleStepDesugar =
- T->getLocallyUnqualifiedSingleStepDesugaredType();
- if (SingleStepDesugar != QualType(T, 0))
- dumpTypeAsChild(SingleStepDesugar);
- });
-}
-
-void ASTDumper::dumpDeclContext(const DeclContext *DC) {
- if (!DC)
- return;
-
- for (auto *D : (Deserialize ? DC->decls() : DC->noload_decls()))
- dumpDecl(D);
-
- if (DC->hasExternalLexicalStorage()) {
- dumpChild([=] {
- ColorScope Color(OS, ShowColors, UndeserializedColor);
- OS << "<undeserialized declarations>";
- });
- }
-}
-
void ASTDumper::dumpLookups(const DeclContext *DC, bool DumpDecls) {
- dumpChild([=] {
+ NodeDumper.AddChild([=] {
OS << "StoredDeclsMap ";
NodeDumper.dumpBareDeclRef(cast<Decl>(DC));
@@ -384,14 +35,14 @@ void ASTDumper::dumpLookups(const DeclContext *DC, bool DumpDecls) {
bool HasUndeserializedLookups = Primary->hasExternalVisibleStorage();
- auto Range = Deserialize
+ auto Range = getDeserialize()
? Primary->lookups()
: Primary->noload_lookups(/*PreserveInternalState=*/true);
for (auto I = Range.begin(), E = Range.end(); I != E; ++I) {
DeclarationName Name = I.getLookupName();
DeclContextLookupResult R = *I;
- dumpChild([=] {
+ NodeDumper.AddChild([=] {
OS << "DeclarationName ";
{
ColorScope Color(OS, ShowColors, DeclNameColor);
@@ -400,7 +51,7 @@ void ASTDumper::dumpLookups(const DeclContext *DC, bool DumpDecls) {
for (DeclContextLookupResult::iterator RI = R.begin(), RE = R.end();
RI != RE; ++RI) {
- dumpChild([=] {
+ NodeDumper.AddChild([=] {
NodeDumper.dumpBareDeclRef(*RI);
if ((*RI)->isHidden())
@@ -412,7 +63,7 @@ void ASTDumper::dumpLookups(const DeclContext *DC, bool DumpDecls) {
std::function<void(Decl *)> DumpWithPrev = [&](Decl *D) {
if (Decl *Prev = D->getPreviousDecl())
DumpWithPrev(Prev);
- dumpDecl(D);
+ Visit(D);
};
DumpWithPrev(*RI);
}
@@ -422,7 +73,7 @@ void ASTDumper::dumpLookups(const DeclContext *DC, bool DumpDecls) {
}
if (HasUndeserializedLookups) {
- dumpChild([=] {
+ NodeDumper.AddChild([=] {
ColorScope Color(OS, ShowColors, UndeserializedColor);
OS << "<undeserialized lookups>";
});
@@ -430,560 +81,12 @@ void ASTDumper::dumpLookups(const DeclContext *DC, bool DumpDecls) {
});
}
-void ASTDumper::dumpAttr(const Attr *A) {
- dumpChild([=] {
- NodeDumper.Visit(A);
- ConstAttrVisitor<ASTDumper>::Visit(A);
- });
-}
-
-//===----------------------------------------------------------------------===//
-// C++ Utilities
-//===----------------------------------------------------------------------===//
-
-void ASTDumper::dumpCXXCtorInitializer(const CXXCtorInitializer *Init) {
- dumpChild([=] {
- NodeDumper.Visit(Init);
- dumpStmt(Init->getInit());
- });
-}
-
-void ASTDumper::dumpTemplateParameters(const TemplateParameterList *TPL) {
- if (!TPL)
- return;
-
- for (TemplateParameterList::const_iterator I = TPL->begin(), E = TPL->end();
- I != E; ++I)
- dumpDecl(*I);
-}
-
-void ASTDumper::dumpTemplateArgumentListInfo(
- const TemplateArgumentListInfo &TALI) {
- for (unsigned i = 0, e = TALI.size(); i < e; ++i)
- dumpTemplateArgumentLoc(TALI[i]);
-}
-
-void ASTDumper::dumpTemplateArgumentLoc(const TemplateArgumentLoc &A,
- const Decl *From, const char *Label) {
- dumpTemplateArgument(A.getArgument(), A.getSourceRange(), From, Label);
-}
-
-void ASTDumper::dumpTemplateArgumentList(const TemplateArgumentList &TAL) {
- for (unsigned i = 0, e = TAL.size(); i < e; ++i)
- dumpTemplateArgument(TAL[i]);
-}
-
-void ASTDumper::dumpTemplateArgument(const TemplateArgument &A, SourceRange R,
- const Decl *From, const char *Label) {
- dumpChild([=] {
- NodeDumper.Visit(A, R, From, Label);
- ConstTemplateArgumentVisitor<ASTDumper>::Visit(A);
- });
-}
-
-//===----------------------------------------------------------------------===//
-// Objective-C Utilities
-//===----------------------------------------------------------------------===//
-void ASTDumper::dumpObjCTypeParamList(const ObjCTypeParamList *typeParams) {
- if (!typeParams)
- return;
-
- for (auto typeParam : *typeParams) {
- dumpDecl(typeParam);
- }
-}
-
-//===----------------------------------------------------------------------===//
-// Decl dumping methods.
-//===----------------------------------------------------------------------===//
-
-void ASTDumper::dumpDecl(const Decl *D) {
- dumpChild([=] {
- NodeDumper.Visit(D);
- if (!D)
- return;
-
- ConstDeclVisitor<ASTDumper>::Visit(D);
-
- for (Decl::attr_iterator I = D->attr_begin(), E = D->attr_end(); I != E;
- ++I)
- dumpAttr(*I);
-
- if (const FullComment *Comment =
- D->getASTContext().getLocalCommentForDeclUncached(D))
- dumpComment(Comment, Comment);
-
- // Decls within functions are visited by the body.
- if (!isa<FunctionDecl>(*D) && !isa<ObjCMethodDecl>(*D)) {
- auto DC = dyn_cast<DeclContext>(D);
- if (DC &&
- (DC->hasExternalLexicalStorage() ||
- (Deserialize ? DC->decls_begin() != DC->decls_end()
- : DC->noload_decls_begin() != DC->noload_decls_end())))
- dumpDeclContext(DC);
- }
- });
-}
-
-void ASTDumper::VisitLabelDecl(const LabelDecl *D) { NodeDumper.dumpName(D); }
-
-void ASTDumper::VisitTypedefDecl(const TypedefDecl *D) {
- NodeDumper.dumpName(D);
- NodeDumper.dumpType(D->getUnderlyingType());
- if (D->isModulePrivate())
- OS << " __module_private__";
- dumpTypeAsChild(D->getUnderlyingType());
-}
-
-void ASTDumper::VisitEnumDecl(const EnumDecl *D) {
- if (D->isScoped()) {
- if (D->isScopedUsingClassTag())
- OS << " class";
- else
- OS << " struct";
- }
- NodeDumper.dumpName(D);
- if (D->isModulePrivate())
- OS << " __module_private__";
- if (D->isFixed())
- NodeDumper.dumpType(D->getIntegerType());
-}
-
-void ASTDumper::VisitRecordDecl(const RecordDecl *D) {
- OS << ' ' << D->getKindName();
- NodeDumper.dumpName(D);
- if (D->isModulePrivate())
- OS << " __module_private__";
- if (D->isCompleteDefinition())
- OS << " definition";
-}
-
-void ASTDumper::VisitEnumConstantDecl(const EnumConstantDecl *D) {
- NodeDumper.dumpName(D);
- NodeDumper.dumpType(D->getType());
- if (const Expr *Init = D->getInitExpr())
- dumpStmt(Init);
-}
-
-void ASTDumper::VisitIndirectFieldDecl(const IndirectFieldDecl *D) {
- NodeDumper.dumpName(D);
- NodeDumper.dumpType(D->getType());
-
- for (auto *Child : D->chain())
- NodeDumper.dumpDeclRef(Child);
-}
-
-void ASTDumper::VisitFunctionDecl(const FunctionDecl *D) {
- NodeDumper.dumpName(D);
- NodeDumper.dumpType(D->getType());
-
- StorageClass SC = D->getStorageClass();
- if (SC != SC_None)
- OS << ' ' << VarDecl::getStorageClassSpecifierString(SC);
- if (D->isInlineSpecified())
- OS << " inline";
- if (D->isVirtualAsWritten())
- OS << " virtual";
- if (D->isModulePrivate())
- OS << " __module_private__";
-
- if (D->isPure())
- OS << " pure";
- if (D->isDefaulted()) {
- OS << " default";
- if (D->isDeleted())
- OS << "_delete";
- }
- if (D->isDeletedAsWritten())
- OS << " delete";
- if (D->isTrivial())
- OS << " trivial";
-
- if (const auto *FPT = D->getType()->getAs<FunctionProtoType>()) {
- FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
- switch (EPI.ExceptionSpec.Type) {
- default: break;
- case EST_Unevaluated:
- OS << " noexcept-unevaluated " << EPI.ExceptionSpec.SourceDecl;
- break;
- case EST_Uninstantiated:
- OS << " noexcept-uninstantiated " << EPI.ExceptionSpec.SourceTemplate;
- break;
- }
- }
-
- if (const auto *MD = dyn_cast<CXXMethodDecl>(D)) {
- if (MD->size_overridden_methods() != 0) {
- auto dumpOverride = [=](const CXXMethodDecl *D) {
- SplitQualType T_split = D->getType().split();
- OS << D << " " << D->getParent()->getName()
- << "::" << D->getNameAsString() << " '"
- << QualType::getAsString(T_split, PrintPolicy) << "'";
- };
-
- dumpChild([=] {
- auto Overrides = MD->overridden_methods();
- OS << "Overrides: [ ";
- dumpOverride(*Overrides.begin());
- for (const auto *Override :
- llvm::make_range(Overrides.begin() + 1, Overrides.end())) {
- OS << ", ";
- dumpOverride(Override);
- }
- OS << " ]";
- });
- }
- }
-
- if (const auto *FTSI = D->getTemplateSpecializationInfo())
- dumpTemplateArgumentList(*FTSI->TemplateArguments);
-
- if (!D->param_begin() && D->getNumParams())
- dumpChild([=] { OS << "<<NULL params x " << D->getNumParams() << ">>"; });
- else
- for (const ParmVarDecl *Parameter : D->parameters())
- dumpDecl(Parameter);
-
- if (const auto *C = dyn_cast<CXXConstructorDecl>(D))
- for (const auto *I : C->inits())
- dumpCXXCtorInitializer(I);
-
- if (D->doesThisDeclarationHaveABody())
- dumpStmt(D->getBody());
-}
-
-void ASTDumper::VisitFieldDecl(const FieldDecl *D) {
- NodeDumper.dumpName(D);
- NodeDumper.dumpType(D->getType());
- if (D->isMutable())
- OS << " mutable";
- if (D->isModulePrivate())
- OS << " __module_private__";
-
- if (D->isBitField())
- dumpStmt(D->getBitWidth());
- if (Expr *Init = D->getInClassInitializer())
- dumpStmt(Init);
-}
-
-void ASTDumper::VisitVarDecl(const VarDecl *D) {
- NodeDumper.dumpName(D);
- NodeDumper.dumpType(D->getType());
- StorageClass SC = D->getStorageClass();
- if (SC != SC_None)
- OS << ' ' << VarDecl::getStorageClassSpecifierString(SC);
- switch (D->getTLSKind()) {
- case VarDecl::TLS_None: break;
- case VarDecl::TLS_Static: OS << " tls"; break;
- case VarDecl::TLS_Dynamic: OS << " tls_dynamic"; break;
- }
- if (D->isModulePrivate())
- OS << " __module_private__";
- if (D->isNRVOVariable())
- OS << " nrvo";
- if (D->isInline())
- OS << " inline";
- if (D->isConstexpr())
- OS << " constexpr";
- if (D->hasInit()) {
- switch (D->getInitStyle()) {
- case VarDecl::CInit: OS << " cinit"; break;
- case VarDecl::CallInit: OS << " callinit"; break;
- case VarDecl::ListInit: OS << " listinit"; break;
- }
- dumpStmt(D->getInit());
- }
-}
-
-void ASTDumper::VisitDecompositionDecl(const DecompositionDecl *D) {
- VisitVarDecl(D);
- for (auto *B : D->bindings())
- dumpDecl(B);
-}
-
-void ASTDumper::VisitBindingDecl(const BindingDecl *D) {
- NodeDumper.dumpName(D);
- NodeDumper.dumpType(D->getType());
- if (auto *E = D->getBinding())
- dumpStmt(E);
-}
-
-void ASTDumper::VisitFileScopeAsmDecl(const FileScopeAsmDecl *D) {
- dumpStmt(D->getAsmString());
-}
-
-void ASTDumper::VisitImportDecl(const ImportDecl *D) {
- OS << ' ' << D->getImportedModule()->getFullModuleName();
-}
-
-void ASTDumper::VisitPragmaCommentDecl(const PragmaCommentDecl *D) {
- OS << ' ';
- switch (D->getCommentKind()) {
- case PCK_Unknown: llvm_unreachable("unexpected pragma comment kind");
- case PCK_Compiler: OS << "compiler"; break;
- case PCK_ExeStr: OS << "exestr"; break;
- case PCK_Lib: OS << "lib"; break;
- case PCK_Linker: OS << "linker"; break;
- case PCK_User: OS << "user"; break;
- }
- StringRef Arg = D->getArg();
- if (!Arg.empty())
- OS << " \"" << Arg << "\"";
-}
-
-void ASTDumper::VisitPragmaDetectMismatchDecl(
- const PragmaDetectMismatchDecl *D) {
- OS << " \"" << D->getName() << "\" \"" << D->getValue() << "\"";
-}
-
-void ASTDumper::VisitCapturedDecl(const CapturedDecl *D) {
- dumpStmt(D->getBody());
-}
-
-//===----------------------------------------------------------------------===//
-// OpenMP Declarations
-//===----------------------------------------------------------------------===//
-
-void ASTDumper::VisitOMPThreadPrivateDecl(const OMPThreadPrivateDecl *D) {
- for (auto *E : D->varlists())
- dumpStmt(E);
-}
-
-void ASTDumper::VisitOMPDeclareReductionDecl(const OMPDeclareReductionDecl *D) {
- NodeDumper.dumpName(D);
- NodeDumper.dumpType(D->getType());
- OS << " combiner";
- NodeDumper.dumpPointer(D->getCombiner());
- if (const auto *Initializer = D->getInitializer()) {
- OS << " initializer";
- NodeDumper.dumpPointer(Initializer);
- switch (D->getInitializerKind()) {
- case OMPDeclareReductionDecl::DirectInit:
- OS << " omp_priv = ";
- break;
- case OMPDeclareReductionDecl::CopyInit:
- OS << " omp_priv ()";
- break;
- case OMPDeclareReductionDecl::CallInit:
- break;
- }
- }
-
- dumpStmt(D->getCombiner());
- if (const auto *Initializer = D->getInitializer())
- dumpStmt(Initializer);
-}
-
-void ASTDumper::VisitOMPRequiresDecl(const OMPRequiresDecl *D) {
- for (auto *C : D->clauselists()) {
- dumpChild([=] {
- if (!C) {
- ColorScope Color(OS, ShowColors, NullColor);
- OS << "<<<NULL>>> OMPClause";
- return;
- }
- {
- ColorScope Color(OS, ShowColors, AttrColor);
- StringRef ClauseName(getOpenMPClauseName(C->getClauseKind()));
- OS << "OMP" << ClauseName.substr(/*Start=*/0, /*N=*/1).upper()
- << ClauseName.drop_front() << "Clause";
- }
- NodeDumper.dumpPointer(C);
- NodeDumper.dumpSourceRange(SourceRange(C->getBeginLoc(), C->getEndLoc()));
- });
- }
-}
-
-void ASTDumper::VisitOMPCapturedExprDecl(const OMPCapturedExprDecl *D) {
- NodeDumper.dumpName(D);
- NodeDumper.dumpType(D->getType());
- dumpStmt(D->getInit());
-}
-
-//===----------------------------------------------------------------------===//
-// C++ Declarations
-//===----------------------------------------------------------------------===//
-
-void ASTDumper::VisitNamespaceDecl(const NamespaceDecl *D) {
- NodeDumper.dumpName(D);
- if (D->isInline())
- OS << " inline";
- if (!D->isOriginalNamespace())
- NodeDumper.dumpDeclRef(D->getOriginalNamespace(), "original");
-}
-
-void ASTDumper::VisitUsingDirectiveDecl(const UsingDirectiveDecl *D) {
- OS << ' ';
- NodeDumper.dumpBareDeclRef(D->getNominatedNamespace());
-}
-
-void ASTDumper::VisitNamespaceAliasDecl(const NamespaceAliasDecl *D) {
- NodeDumper.dumpName(D);
- NodeDumper.dumpDeclRef(D->getAliasedNamespace());
-}
-
-void ASTDumper::VisitTypeAliasDecl(const TypeAliasDecl *D) {
- NodeDumper.dumpName(D);
- NodeDumper.dumpType(D->getUnderlyingType());
- dumpTypeAsChild(D->getUnderlyingType());
-}
-
-void ASTDumper::VisitTypeAliasTemplateDecl(const TypeAliasTemplateDecl *D) {
- NodeDumper.dumpName(D);
- dumpTemplateParameters(D->getTemplateParameters());
- dumpDecl(D->getTemplatedDecl());
-}
-
-void ASTDumper::VisitCXXRecordDecl(const CXXRecordDecl *D) {
- VisitRecordDecl(D);
- if (!D->isCompleteDefinition())
- return;
-
- dumpChild([=] {
- {
- ColorScope Color(OS, ShowColors, DeclKindNameColor);
- OS << "DefinitionData";
- }
-#define FLAG(fn, name) if (D->fn()) OS << " " #name;
- FLAG(isParsingBaseSpecifiers, parsing_base_specifiers);
-
- FLAG(isGenericLambda, generic);
- FLAG(isLambda, lambda);
-
- FLAG(canPassInRegisters, pass_in_registers);
- FLAG(isEmpty, empty);
- FLAG(isAggregate, aggregate);
- FLAG(isStandardLayout, standard_layout);
- FLAG(isTriviallyCopyable, trivially_copyable);
- FLAG(isPOD, pod);
- FLAG(isTrivial, trivial);
- FLAG(isPolymorphic, polymorphic);
- FLAG(isAbstract, abstract);
- FLAG(isLiteral, literal);
-
- FLAG(hasUserDeclaredConstructor, has_user_declared_ctor);
- FLAG(hasConstexprNonCopyMoveConstructor, has_constexpr_non_copy_move_ctor);
- FLAG(hasMutableFields, has_mutable_fields);
- FLAG(hasVariantMembers, has_variant_members);
- FLAG(allowConstDefaultInit, can_const_default_init);
-
- dumpChild([=] {
- {
- ColorScope Color(OS, ShowColors, DeclKindNameColor);
- OS << "DefaultConstructor";
- }
- FLAG(hasDefaultConstructor, exists);
- FLAG(hasTrivialDefaultConstructor, trivial);
- FLAG(hasNonTrivialDefaultConstructor, non_trivial);
- FLAG(hasUserProvidedDefaultConstructor, user_provided);
- FLAG(hasConstexprDefaultConstructor, constexpr);
- FLAG(needsImplicitDefaultConstructor, needs_implicit);
- FLAG(defaultedDefaultConstructorIsConstexpr, defaulted_is_constexpr);
- });
-
- dumpChild([=] {
- {
- ColorScope Color(OS, ShowColors, DeclKindNameColor);
- OS << "CopyConstructor";
- }
- FLAG(hasSimpleCopyConstructor, simple);
- FLAG(hasTrivialCopyConstructor, trivial);
- FLAG(hasNonTrivialCopyConstructor, non_trivial);
- FLAG(hasUserDeclaredCopyConstructor, user_declared);
- FLAG(hasCopyConstructorWithConstParam, has_const_param);
- FLAG(needsImplicitCopyConstructor, needs_implicit);
- FLAG(needsOverloadResolutionForCopyConstructor,
- needs_overload_resolution);
- if (!D->needsOverloadResolutionForCopyConstructor())
- FLAG(defaultedCopyConstructorIsDeleted, defaulted_is_deleted);
- FLAG(implicitCopyConstructorHasConstParam, implicit_has_const_param);
- });
-
- dumpChild([=] {
- {
- ColorScope Color(OS, ShowColors, DeclKindNameColor);
- OS << "MoveConstructor";
- }
- FLAG(hasMoveConstructor, exists);
- FLAG(hasSimpleMoveConstructor, simple);
- FLAG(hasTrivialMoveConstructor, trivial);
- FLAG(hasNonTrivialMoveConstructor, non_trivial);
- FLAG(hasUserDeclaredMoveConstructor, user_declared);
- FLAG(needsImplicitMoveConstructor, needs_implicit);
- FLAG(needsOverloadResolutionForMoveConstructor,
- needs_overload_resolution);
- if (!D->needsOverloadResolutionForMoveConstructor())
- FLAG(defaultedMoveConstructorIsDeleted, defaulted_is_deleted);
- });
-
- dumpChild([=] {
- {
- ColorScope Color(OS, ShowColors, DeclKindNameColor);
- OS << "CopyAssignment";
- }
- FLAG(hasTrivialCopyAssignment, trivial);
- FLAG(hasNonTrivialCopyAssignment, non_trivial);
- FLAG(hasCopyAssignmentWithConstParam, has_const_param);
- FLAG(hasUserDeclaredCopyAssignment, user_declared);
- FLAG(needsImplicitCopyAssignment, needs_implicit);
- FLAG(needsOverloadResolutionForCopyAssignment, needs_overload_resolution);
- FLAG(implicitCopyAssignmentHasConstParam, implicit_has_const_param);
- });
-
- dumpChild([=] {
- {
- ColorScope Color(OS, ShowColors, DeclKindNameColor);
- OS << "MoveAssignment";
- }
- FLAG(hasMoveAssignment, exists);
- FLAG(hasSimpleMoveAssignment, simple);
- FLAG(hasTrivialMoveAssignment, trivial);
- FLAG(hasNonTrivialMoveAssignment, non_trivial);
- FLAG(hasUserDeclaredMoveAssignment, user_declared);
- FLAG(needsImplicitMoveAssignment, needs_implicit);
- FLAG(needsOverloadResolutionForMoveAssignment, needs_overload_resolution);
- });
-
- dumpChild([=] {
- {
- ColorScope Color(OS, ShowColors, DeclKindNameColor);
- OS << "Destructor";
- }
- FLAG(hasSimpleDestructor, simple);
- FLAG(hasIrrelevantDestructor, irrelevant);
- FLAG(hasTrivialDestructor, trivial);
- FLAG(hasNonTrivialDestructor, non_trivial);
- FLAG(hasUserDeclaredDestructor, user_declared);
- FLAG(needsImplicitDestructor, needs_implicit);
- FLAG(needsOverloadResolutionForDestructor, needs_overload_resolution);
- if (!D->needsOverloadResolutionForDestructor())
- FLAG(defaultedDestructorIsDeleted, defaulted_is_deleted);
- });
- });
-
- for (const auto &I : D->bases()) {
- dumpChild([=] {
- if (I.isVirtual())
- OS << "virtual ";
- NodeDumper.dumpAccessSpecifier(I.getAccessSpecifier());
- NodeDumper.dumpType(I.getType());
- if (I.isPackExpansion())
- OS << "...";
- });
- }
-}
-
-void ASTDumper::VisitStaticAssertDecl(const StaticAssertDecl *D) {
- dumpStmt(D->getAssertExpr());
- dumpStmt(D->getMessage());
-}
-
template <typename SpecializationDecl>
void ASTDumper::dumpTemplateDeclSpecialization(const SpecializationDecl *D,
bool DumpExplicitInst,
bool DumpRefOnly) {
bool DumpedAny = false;
- for (auto *RedeclWithBadType : D->redecls()) {
+ for (const auto *RedeclWithBadType : D->redecls()) {
// FIXME: The redecls() range sometimes has elements of a less-specific
// type. (In particular, ClassTemplateSpecializationDecl::redecls() gives
// us TagDecls, and should give CXXRecordDecls).
@@ -1007,7 +110,7 @@ void ASTDumper::dumpTemplateDeclSpecialization(const SpecializationDecl *D,
if (DumpRefOnly)
NodeDumper.dumpDeclRef(Redecl);
else
- dumpDecl(Redecl);
+ Visit(Redecl);
DumpedAny = true;
break;
case TSK_ExplicitSpecialization:
@@ -1022,12 +125,11 @@ void ASTDumper::dumpTemplateDeclSpecialization(const SpecializationDecl *D,
template <typename TemplateDecl>
void ASTDumper::dumpTemplateDecl(const TemplateDecl *D, bool DumpExplicitInst) {
- NodeDumper.dumpName(D);
dumpTemplateParameters(D->getTemplateParameters());
- dumpDecl(D->getTemplatedDecl());
+ Visit(D->getTemplatedDecl());
- for (auto *Child : D->specializations())
+ for (const auto *Child : D->specializations())
dumpTemplateDeclSpecialization(Child, DumpExplicitInst,
!D->isCanonicalDecl());
}
@@ -1043,500 +145,10 @@ void ASTDumper::VisitClassTemplateDecl(const ClassTemplateDecl *D) {
dumpTemplateDecl(D, false);
}
-void ASTDumper::VisitClassTemplateSpecializationDecl(
- const ClassTemplateSpecializationDecl *D) {
- VisitCXXRecordDecl(D);
- dumpTemplateArgumentList(D->getTemplateArgs());
-}
-
-void ASTDumper::VisitClassTemplatePartialSpecializationDecl(
- const ClassTemplatePartialSpecializationDecl *D) {
- VisitClassTemplateSpecializationDecl(D);
- dumpTemplateParameters(D->getTemplateParameters());
-}
-
-void ASTDumper::VisitClassScopeFunctionSpecializationDecl(
- const ClassScopeFunctionSpecializationDecl *D) {
- dumpDecl(D->getSpecialization());
- if (D->hasExplicitTemplateArgs())
- dumpTemplateArgumentListInfo(D->templateArgs());
-}
-
void ASTDumper::VisitVarTemplateDecl(const VarTemplateDecl *D) {
dumpTemplateDecl(D, false);
}
-void ASTDumper::VisitBuiltinTemplateDecl(const BuiltinTemplateDecl *D) {
- NodeDumper.dumpName(D);
- dumpTemplateParameters(D->getTemplateParameters());
-}
-
-void ASTDumper::VisitVarTemplateSpecializationDecl(
- const VarTemplateSpecializationDecl *D) {
- dumpTemplateArgumentList(D->getTemplateArgs());
- VisitVarDecl(D);
-}
-
-void ASTDumper::VisitVarTemplatePartialSpecializationDecl(
- const VarTemplatePartialSpecializationDecl *D) {
- dumpTemplateParameters(D->getTemplateParameters());
- VisitVarTemplateSpecializationDecl(D);
-}
-
-void ASTDumper::VisitTemplateTypeParmDecl(const TemplateTypeParmDecl *D) {
- if (D->wasDeclaredWithTypename())
- OS << " typename";
- else
- OS << " class";
- OS << " depth " << D->getDepth() << " index " << D->getIndex();
- if (D->isParameterPack())
- OS << " ...";
- NodeDumper.dumpName(D);
- if (D->hasDefaultArgument())
- dumpTemplateArgument(D->getDefaultArgument(), SourceRange(),
- D->getDefaultArgStorage().getInheritedFrom(),
- D->defaultArgumentWasInherited() ? "inherited from"
- : "previous");
-}
-
-void ASTDumper::VisitNonTypeTemplateParmDecl(const NonTypeTemplateParmDecl *D) {
- NodeDumper.dumpType(D->getType());
- OS << " depth " << D->getDepth() << " index " << D->getIndex();
- if (D->isParameterPack())
- OS << " ...";
- NodeDumper.dumpName(D);
- if (D->hasDefaultArgument())
- dumpTemplateArgument(D->getDefaultArgument(), SourceRange(),
- D->getDefaultArgStorage().getInheritedFrom(),
- D->defaultArgumentWasInherited() ? "inherited from"
- : "previous");
-}
-
-void ASTDumper::VisitTemplateTemplateParmDecl(
- const TemplateTemplateParmDecl *D) {
- OS << " depth " << D->getDepth() << " index " << D->getIndex();
- if (D->isParameterPack())
- OS << " ...";
- NodeDumper.dumpName(D);
- dumpTemplateParameters(D->getTemplateParameters());
- if (D->hasDefaultArgument())
- dumpTemplateArgumentLoc(
- D->getDefaultArgument(), D->getDefaultArgStorage().getInheritedFrom(),
- D->defaultArgumentWasInherited() ? "inherited from" : "previous");
-}
-
-void ASTDumper::VisitUsingDecl(const UsingDecl *D) {
- OS << ' ';
- if (D->getQualifier())
- D->getQualifier()->print(OS, D->getASTContext().getPrintingPolicy());
- OS << D->getNameAsString();
-}
-
-void ASTDumper::VisitUnresolvedUsingTypenameDecl(
- const UnresolvedUsingTypenameDecl *D) {
- OS << ' ';
- if (D->getQualifier())
- D->getQualifier()->print(OS, D->getASTContext().getPrintingPolicy());
- OS << D->getNameAsString();
-}
-
-void ASTDumper::VisitUnresolvedUsingValueDecl(const UnresolvedUsingValueDecl *D) {
- OS << ' ';
- if (D->getQualifier())
- D->getQualifier()->print(OS, D->getASTContext().getPrintingPolicy());
- OS << D->getNameAsString();
- NodeDumper.dumpType(D->getType());
-}
-
-void ASTDumper::VisitUsingShadowDecl(const UsingShadowDecl *D) {
- OS << ' ';
- NodeDumper.dumpBareDeclRef(D->getTargetDecl());
- if (auto *TD = dyn_cast<TypeDecl>(D->getUnderlyingDecl()))
- dumpTypeAsChild(TD->getTypeForDecl());
-}
-
-void ASTDumper::VisitConstructorUsingShadowDecl(
- const ConstructorUsingShadowDecl *D) {
- if (D->constructsVirtualBase())
- OS << " virtual";
-
- dumpChild([=] {
- OS << "target ";
- NodeDumper.dumpBareDeclRef(D->getTargetDecl());
- });
-
- dumpChild([=] {
- OS << "nominated ";
- NodeDumper.dumpBareDeclRef(D->getNominatedBaseClass());
- OS << ' ';
- NodeDumper.dumpBareDeclRef(D->getNominatedBaseClassShadowDecl());
- });
-
- dumpChild([=] {
- OS << "constructed ";
- NodeDumper.dumpBareDeclRef(D->getConstructedBaseClass());
- OS << ' ';
- NodeDumper.dumpBareDeclRef(D->getConstructedBaseClassShadowDecl());
- });
-}
-
-void ASTDumper::VisitLinkageSpecDecl(const LinkageSpecDecl *D) {
- switch (D->getLanguage()) {
- case LinkageSpecDecl::lang_c: OS << " C"; break;
- case LinkageSpecDecl::lang_cxx: OS << " C++"; break;
- }
-}
-
-void ASTDumper::VisitAccessSpecDecl(const AccessSpecDecl *D) {
- OS << ' ';
- NodeDumper.dumpAccessSpecifier(D->getAccess());
-}
-
-void ASTDumper::VisitFriendDecl(const FriendDecl *D) {
- if (TypeSourceInfo *T = D->getFriendType())
- NodeDumper.dumpType(T->getType());
- else
- dumpDecl(D->getFriendDecl());
-}
-
-//===----------------------------------------------------------------------===//
-// Obj-C Declarations
-//===----------------------------------------------------------------------===//
-
-void ASTDumper::VisitObjCIvarDecl(const ObjCIvarDecl *D) {
- NodeDumper.dumpName(D);
- NodeDumper.dumpType(D->getType());
- if (D->getSynthesize())
- OS << " synthesize";
-
- switch (D->getAccessControl()) {
- case ObjCIvarDecl::None:
- OS << " none";
- break;
- case ObjCIvarDecl::Private:
- OS << " private";
- break;
- case ObjCIvarDecl::Protected:
- OS << " protected";
- break;
- case ObjCIvarDecl::Public:
- OS << " public";
- break;
- case ObjCIvarDecl::Package:
- OS << " package";
- break;
- }
-}
-
-void ASTDumper::VisitObjCMethodDecl(const ObjCMethodDecl *D) {
- if (D->isInstanceMethod())
- OS << " -";
- else
- OS << " +";
- NodeDumper.dumpName(D);
- NodeDumper.dumpType(D->getReturnType());
-
- if (D->isThisDeclarationADefinition()) {
- dumpDeclContext(D);
- } else {
- for (const ParmVarDecl *Parameter : D->parameters())
- dumpDecl(Parameter);
- }
-
- if (D->isVariadic())
- dumpChild([=] { OS << "..."; });
-
- if (D->hasBody())
- dumpStmt(D->getBody());
-}
-
-void ASTDumper::VisitObjCTypeParamDecl(const ObjCTypeParamDecl *D) {
- NodeDumper.dumpName(D);
- switch (D->getVariance()) {
- case ObjCTypeParamVariance::Invariant:
- break;
-
- case ObjCTypeParamVariance::Covariant:
- OS << " covariant";
- break;
-
- case ObjCTypeParamVariance::Contravariant:
- OS << " contravariant";
- break;
- }
-
- if (D->hasExplicitBound())
- OS << " bounded";
- NodeDumper.dumpType(D->getUnderlyingType());
-}
-
-void ASTDumper::VisitObjCCategoryDecl(const ObjCCategoryDecl *D) {
- NodeDumper.dumpName(D);
- NodeDumper.dumpDeclRef(D->getClassInterface());
- NodeDumper.dumpDeclRef(D->getImplementation());
- for (ObjCCategoryDecl::protocol_iterator I = D->protocol_begin(),
- E = D->protocol_end();
- I != E; ++I)
- NodeDumper.dumpDeclRef(*I);
- dumpObjCTypeParamList(D->getTypeParamList());
-}
-
-void ASTDumper::VisitObjCCategoryImplDecl(const ObjCCategoryImplDecl *D) {
- NodeDumper.dumpName(D);
- NodeDumper.dumpDeclRef(D->getClassInterface());
- NodeDumper.dumpDeclRef(D->getCategoryDecl());
-}
-
-void ASTDumper::VisitObjCProtocolDecl(const ObjCProtocolDecl *D) {
- NodeDumper.dumpName(D);
-
- for (auto *Child : D->protocols())
- NodeDumper.dumpDeclRef(Child);
-}
-
-void ASTDumper::VisitObjCInterfaceDecl(const ObjCInterfaceDecl *D) {
- NodeDumper.dumpName(D);
- NodeDumper.dumpDeclRef(D->getSuperClass(), "super");
-
- NodeDumper.dumpDeclRef(D->getImplementation());
- for (auto *Child : D->protocols())
- NodeDumper.dumpDeclRef(Child);
- dumpObjCTypeParamList(D->getTypeParamListAsWritten());
-}
-
-void ASTDumper::VisitObjCImplementationDecl(const ObjCImplementationDecl *D) {
- NodeDumper.dumpName(D);
- NodeDumper.dumpDeclRef(D->getSuperClass(), "super");
- NodeDumper.dumpDeclRef(D->getClassInterface());
- for (ObjCImplementationDecl::init_const_iterator I = D->init_begin(),
- E = D->init_end();
- I != E; ++I)
- dumpCXXCtorInitializer(*I);
-}
-
-void ASTDumper::VisitObjCCompatibleAliasDecl(const ObjCCompatibleAliasDecl *D) {
- NodeDumper.dumpName(D);
- NodeDumper.dumpDeclRef(D->getClassInterface());
-}
-
-void ASTDumper::VisitObjCPropertyDecl(const ObjCPropertyDecl *D) {
- NodeDumper.dumpName(D);
- NodeDumper.dumpType(D->getType());
-
- if (D->getPropertyImplementation() == ObjCPropertyDecl::Required)
- OS << " required";
- else if (D->getPropertyImplementation() == ObjCPropertyDecl::Optional)
- OS << " optional";
-
- ObjCPropertyDecl::PropertyAttributeKind Attrs = D->getPropertyAttributes();
- if (Attrs != ObjCPropertyDecl::OBJC_PR_noattr) {
- if (Attrs & ObjCPropertyDecl::OBJC_PR_readonly)
- OS << " readonly";
- if (Attrs & ObjCPropertyDecl::OBJC_PR_assign)
- OS << " assign";
- if (Attrs & ObjCPropertyDecl::OBJC_PR_readwrite)
- OS << " readwrite";
- if (Attrs & ObjCPropertyDecl::OBJC_PR_retain)
- OS << " retain";
- if (Attrs & ObjCPropertyDecl::OBJC_PR_copy)
- OS << " copy";
- if (Attrs & ObjCPropertyDecl::OBJC_PR_nonatomic)
- OS << " nonatomic";
- if (Attrs & ObjCPropertyDecl::OBJC_PR_atomic)
- OS << " atomic";
- if (Attrs & ObjCPropertyDecl::OBJC_PR_weak)
- OS << " weak";
- if (Attrs & ObjCPropertyDecl::OBJC_PR_strong)
- OS << " strong";
- if (Attrs & ObjCPropertyDecl::OBJC_PR_unsafe_unretained)
- OS << " unsafe_unretained";
- if (Attrs & ObjCPropertyDecl::OBJC_PR_class)
- OS << " class";
- if (Attrs & ObjCPropertyDecl::OBJC_PR_getter)
- NodeDumper.dumpDeclRef(D->getGetterMethodDecl(), "getter");
- if (Attrs & ObjCPropertyDecl::OBJC_PR_setter)
- NodeDumper.dumpDeclRef(D->getSetterMethodDecl(), "setter");
- }
-}
-
-void ASTDumper::VisitObjCPropertyImplDecl(const ObjCPropertyImplDecl *D) {
- NodeDumper.dumpName(D->getPropertyDecl());
- if (D->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize)
- OS << " synthesize";
- else
- OS << " dynamic";
- NodeDumper.dumpDeclRef(D->getPropertyDecl());
- NodeDumper.dumpDeclRef(D->getPropertyIvarDecl());
-}
-
-void ASTDumper::Visit(const BlockDecl::Capture &C) {
- dumpChild([=] {
- NodeDumper.Visit(C);
- if (C.hasCopyExpr())
- dumpStmt(C.getCopyExpr());
- });
-}
-
-void ASTDumper::VisitBlockDecl(const BlockDecl *D) {
- for (auto I : D->parameters())
- dumpDecl(I);
-
- if (D->isVariadic())
- dumpChild([=]{ OS << "..."; });
-
- if (D->capturesCXXThis())
- dumpChild([=]{ OS << "capture this"; });
-
- for (const auto &I : D->captures())
- Visit(I);
- dumpStmt(D->getBody());
-}
-
-//===----------------------------------------------------------------------===//
-// Stmt dumping methods.
-//===----------------------------------------------------------------------===//
-
-void ASTDumper::dumpStmt(const Stmt *S, StringRef Label) {
- dumpChild(Label, [=] {
- NodeDumper.Visit(S);
-
- if (!S) {
- return;
- }
-
- ConstStmtVisitor<ASTDumper>::Visit(S);
-
- // Some statements have custom mechanisms for dumping their children.
- if (isa<DeclStmt>(S) || isa<GenericSelectionExpr>(S)) {
- return;
- }
-
- for (const Stmt *SubStmt : S->children())
- dumpStmt(SubStmt);
- });
-}
-
-void ASTDumper::VisitDeclStmt(const DeclStmt *Node) {
- for (DeclStmt::const_decl_iterator I = Node->decl_begin(),
- E = Node->decl_end();
- I != E; ++I)
- dumpDecl(*I);
-}
-
-void ASTDumper::VisitAttributedStmt(const AttributedStmt *Node) {
- for (ArrayRef<const Attr *>::iterator I = Node->getAttrs().begin(),
- E = Node->getAttrs().end();
- I != E; ++I)
- dumpAttr(*I);
-}
-
-void ASTDumper::VisitCXXCatchStmt(const CXXCatchStmt *Node) {
- dumpDecl(Node->getExceptionDecl());
-}
-
-void ASTDumper::VisitCapturedStmt(const CapturedStmt *Node) {
- dumpDecl(Node->getCapturedDecl());
-}
-
-//===----------------------------------------------------------------------===//
-// OpenMP dumping methods.
-//===----------------------------------------------------------------------===//
-
-void ASTDumper::Visit(const OMPClause *C) {
- dumpChild([=] {
- NodeDumper.Visit(C);
- for (auto *S : C->children())
- dumpStmt(S);
- });
-}
-
-void ASTDumper::VisitOMPExecutableDirective(
- const OMPExecutableDirective *Node) {
- for (const auto *C : Node->clauses())
- Visit(C);
-}
-
-//===----------------------------------------------------------------------===//
-// Expr dumping methods.
-//===----------------------------------------------------------------------===//
-
-
-void ASTDumper::VisitInitListExpr(const InitListExpr *ILE) {
- if (auto *Filler = ILE->getArrayFiller()) {
- dumpStmt(Filler, "array_filler");
- }
-}
-
-void ASTDumper::VisitBlockExpr(const BlockExpr *Node) {
- dumpDecl(Node->getBlockDecl());
-}
-
-void ASTDumper::VisitOpaqueValueExpr(const OpaqueValueExpr *Node) {
- if (Expr *Source = Node->getSourceExpr())
- dumpStmt(Source);
-}
-
-void ASTDumper::VisitGenericSelectionExpr(const GenericSelectionExpr *E) {
- if (E->isResultDependent())
- OS << " result_dependent";
- dumpStmt(E->getControllingExpr());
- dumpTypeAsChild(E->getControllingExpr()->getType()); // FIXME: remove
-
- for (unsigned I = 0, N = E->getNumAssocs(); I != N; ++I) {
- dumpChild([=] {
- if (const TypeSourceInfo *TSI = E->getAssocTypeSourceInfo(I)) {
- OS << "case ";
- NodeDumper.dumpType(TSI->getType());
- } else {
- OS << "default";
- }
-
- if (!E->isResultDependent() && E->getResultIndex() == I)
- OS << " selected";
-
- if (const TypeSourceInfo *TSI = E->getAssocTypeSourceInfo(I))
- dumpTypeAsChild(TSI->getType());
- dumpStmt(E->getAssocExpr(I));
- });
- }
-}
-
-//===----------------------------------------------------------------------===//
-// C++ Expressions
-//===----------------------------------------------------------------------===//
-
-void ASTDumper::VisitSizeOfPackExpr(const SizeOfPackExpr *Node) {
- if (Node->isPartiallySubstituted())
- for (const auto &A : Node->getPartialArguments())
- dumpTemplateArgument(A);
-}
-
-//===----------------------------------------------------------------------===//
-// Obj-C Expressions
-//===----------------------------------------------------------------------===//
-
-void ASTDumper::VisitObjCAtCatchStmt(const ObjCAtCatchStmt *Node) {
- if (const VarDecl *CatchParam = Node->getCatchParamDecl())
- dumpDecl(CatchParam);
-}
-
-//===----------------------------------------------------------------------===//
-// Comments
-//===----------------------------------------------------------------------===//
-
-void ASTDumper::dumpComment(const Comment *C, const FullComment *FC) {
- dumpChild([=] {
- NodeDumper.Visit(C, FC);
- if (!C) {
- return;
- }
- ConstCommentVisitor<ASTDumper, void, const FullComment *>::visit(C, FC);
- for (Comment::child_iterator I = C->child_begin(), E = C->child_end();
- I != E; ++I)
- dumpComment(*I, FC);
- });
-}
-
//===----------------------------------------------------------------------===//
// Type method implementations
//===----------------------------------------------------------------------===//
@@ -1551,7 +163,7 @@ LLVM_DUMP_METHOD void QualType::dump() const { dump(llvm::errs()); }
LLVM_DUMP_METHOD void QualType::dump(llvm::raw_ostream &OS) const {
ASTDumper Dumper(OS, nullptr, nullptr);
- Dumper.dumpTypeAsChild(*this);
+ Dumper.Visit(*this);
}
LLVM_DUMP_METHOD void Type::dump() const { dump(llvm::errs()); }
@@ -1566,13 +178,22 @@ LLVM_DUMP_METHOD void Type::dump(llvm::raw_ostream &OS) const {
LLVM_DUMP_METHOD void Decl::dump() const { dump(llvm::errs()); }
-LLVM_DUMP_METHOD void Decl::dump(raw_ostream &OS, bool Deserialize) const {
- const ASTContext &Ctx = getASTContext();
+LLVM_DUMP_METHOD void Decl::dump(raw_ostream &OS, bool Deserialize,
+ ASTDumpOutputFormat Format) const {
+ ASTContext &Ctx = getASTContext();
const SourceManager &SM = Ctx.getSourceManager();
- ASTDumper P(OS, &Ctx.getCommentCommandTraits(), &SM,
- SM.getDiagnostics().getShowColors(), Ctx.getPrintingPolicy());
- P.setDeserialize(Deserialize);
- P.dumpDecl(this);
+
+ if (ADOF_JSON == Format) {
+ JSONDumper P(OS, SM, Ctx, Ctx.getPrintingPolicy(),
+ &Ctx.getCommentCommandTraits());
+ (void)Deserialize; // FIXME?
+ P.Visit(this);
+ } else {
+ ASTDumper P(OS, &Ctx.getCommentCommandTraits(), &SM,
+ SM.getDiagnostics().getShowColors(), Ctx.getPrintingPolicy());
+ P.setDeserialize(Deserialize);
+ P.Visit(this);
+ }
}
LLVM_DUMP_METHOD void Decl::dumpColor() const {
@@ -1580,7 +201,7 @@ LLVM_DUMP_METHOD void Decl::dumpColor() const {
ASTDumper P(llvm::errs(), &Ctx.getCommentCommandTraits(),
&Ctx.getSourceManager(), /*ShowColors*/ true,
Ctx.getPrintingPolicy());
- P.dumpDecl(this);
+ P.Visit(this);
}
LLVM_DUMP_METHOD void DeclContext::dumpLookups() const {
@@ -1611,22 +232,22 @@ LLVM_DUMP_METHOD void Stmt::dump(SourceManager &SM) const {
LLVM_DUMP_METHOD void Stmt::dump(raw_ostream &OS, SourceManager &SM) const {
ASTDumper P(OS, nullptr, &SM);
- P.dumpStmt(this);
+ P.Visit(this);
}
LLVM_DUMP_METHOD void Stmt::dump(raw_ostream &OS) const {
ASTDumper P(OS, nullptr, nullptr);
- P.dumpStmt(this);
+ P.Visit(this);
}
LLVM_DUMP_METHOD void Stmt::dump() const {
ASTDumper P(llvm::errs(), nullptr, nullptr);
- P.dumpStmt(this);
+ P.Visit(this);
}
LLVM_DUMP_METHOD void Stmt::dumpColor() const {
ASTDumper P(llvm::errs(), nullptr, nullptr, /*ShowColors*/true);
- P.dumpStmt(this);
+ P.Visit(this);
}
//===----------------------------------------------------------------------===//
@@ -1648,7 +269,7 @@ void Comment::dump(raw_ostream &OS, const CommandTraits *Traits,
if (!FC)
return;
ASTDumper D(OS, Traits, SM);
- D.dumpComment(FC, FC);
+ D.Visit(FC, FC);
}
LLVM_DUMP_METHOD void Comment::dumpColor() const {
@@ -1656,5 +277,5 @@ LLVM_DUMP_METHOD void Comment::dumpColor() const {
if (!FC)
return;
ASTDumper D(llvm::errs(), nullptr, nullptr, /*ShowColors*/true);
- D.dumpComment(FC, FC);
+ D.Visit(FC, FC);
}
diff --git a/lib/AST/ASTImporter.cpp b/lib/AST/ASTImporter.cpp
index 44832557e97b..9d5dd84161de 100644
--- a/lib/AST/ASTImporter.cpp
+++ b/lib/AST/ASTImporter.cpp
@@ -1,9 +1,8 @@
//===- ASTImporter.cpp - Importing ASTs from other Contexts ---------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -13,7 +12,7 @@
//===----------------------------------------------------------------------===//
#include "clang/AST/ASTImporter.h"
-#include "clang/AST/ASTImporterLookupTable.h"
+#include "clang/AST/ASTImporterSharedState.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTDiagnostic.h"
#include "clang/AST/ASTStructuralEquivalence.h"
@@ -58,6 +57,7 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/None.h"
#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/ScopeExit.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Casting.h"
@@ -135,25 +135,6 @@ namespace clang {
To->setIsUsed();
}
- // FIXME: Temporary until every import returns Expected.
- template <>
- LLVM_NODISCARD Error
- ASTImporter::importInto(SourceLocation &To, const SourceLocation &From) {
- To = Import(From);
- if (From.isValid() && To.isInvalid())
- return llvm::make_error<ImportError>();
- return Error::success();
- }
- // FIXME: Temporary until every import returns Expected.
- template <>
- LLVM_NODISCARD Error
- ASTImporter::importInto(QualType &To, const QualType &From) {
- To = Import(From);
- if (!From.isNull() && To.isNull())
- return llvm::make_error<ImportError>();
- return Error::success();
- }
-
class ASTNodeImporter : public TypeVisitor<ASTNodeImporter, ExpectedType>,
public DeclVisitor<ASTNodeImporter, ExpectedDecl>,
public StmtVisitor<ASTNodeImporter, ExpectedStmt> {
@@ -168,32 +149,20 @@ namespace clang {
// Use this to import pointers of specific type.
template <typename ImportT>
LLVM_NODISCARD Error importInto(ImportT *&To, ImportT *From) {
- auto ToI = Importer.Import(From);
- if (!ToI && From)
- return make_error<ImportError>();
- To = cast_or_null<ImportT>(ToI);
- return Error::success();
- // FIXME: This should be the final code.
- //auto ToOrErr = Importer.Import(From);
- //if (ToOrErr) {
- // To = cast_or_null<ImportT>(*ToOrErr);
- //}
- //return ToOrErr.takeError();
+ auto ToOrErr = Importer.Import(From);
+ if (ToOrErr)
+ To = cast_or_null<ImportT>(*ToOrErr);
+ return ToOrErr.takeError();
}
// Call the import function of ASTImporter for a baseclass of type `T` and
// cast the return value to `T`.
template <typename T>
Expected<T *> import(T *From) {
- auto *To = Importer.Import(From);
- if (!To && From)
- return make_error<ImportError>();
- return cast_or_null<T>(To);
- // FIXME: This should be the final code.
- //auto ToOrErr = Importer.Import(From);
- //if (!ToOrErr)
- // return ToOrErr.takeError();
- //return cast_or_null<T>(*ToOrErr);
+ auto ToOrErr = Importer.Import(From);
+ if (!ToOrErr)
+ return ToOrErr.takeError();
+ return cast_or_null<T>(*ToOrErr);
}
template <typename T>
@@ -204,13 +173,15 @@ namespace clang {
// Call the import function of ASTImporter for type `T`.
template <typename T>
Expected<T> import(const T &From) {
- T To = Importer.Import(From);
- T DefaultT;
- if (To == DefaultT && !(From == DefaultT))
- return make_error<ImportError>();
- return To;
- // FIXME: This should be the final code.
- //return Importer.Import(From);
+ return Importer.Import(From);
+ }
+
+ // Import an Optional<T> by importing the contained T, if any.
+ template<typename T>
+ Expected<Optional<T>> import(Optional<T> From) {
+ if (!From)
+ return Optional<T>();
+ return import(*From);
}
template <class T>
@@ -283,18 +254,16 @@ namespace clang {
LLVM_NODISCARD bool
GetImportedOrCreateSpecialDecl(ToDeclT *&ToD, CreateFunT CreateFun,
FromDeclT *FromD, Args &&... args) {
- // FIXME: This code is needed later.
- //if (Importer.getImportDeclErrorIfAny(FromD)) {
- // ToD = nullptr;
- // return true; // Already imported but with error.
- //}
+ if (Importer.getImportDeclErrorIfAny(FromD)) {
+ ToD = nullptr;
+ return true; // Already imported but with error.
+ }
ToD = cast_or_null<ToDeclT>(Importer.GetAlreadyImportedOrNull(FromD));
if (ToD)
return true; // Already imported.
ToD = CreateFun(std::forward<Args>(args)...);
// Keep track of imported Decls.
- Importer.MapImported(FromD, ToD);
- Importer.AddToLookupTable(ToD);
+ Importer.RegisterImportedDecl(FromD, ToD);
InitializeImportedDecl(FromD, ToD);
return false; // A new Decl is created.
}
@@ -302,14 +271,31 @@ namespace clang {
void InitializeImportedDecl(Decl *FromD, Decl *ToD) {
ToD->IdentifierNamespace = FromD->IdentifierNamespace;
if (FromD->hasAttrs())
- for (const Attr *FromAttr : FromD->getAttrs())
- ToD->addAttr(Importer.Import(FromAttr));
+ for (const Attr *FromAttr : FromD->getAttrs()) {
+ // FIXME: Return of the error here is not possible until store of
+ // import errors is implemented.
+ auto ToAttrOrErr = import(FromAttr);
+ if (ToAttrOrErr)
+ ToD->addAttr(*ToAttrOrErr);
+ else
+ llvm::consumeError(ToAttrOrErr.takeError());
+ }
if (FromD->isUsed())
ToD->setIsUsed();
if (FromD->isImplicit())
ToD->setImplicit();
}
+ // Check if we have found an existing definition. Returns with that
+ // definition if yes, otherwise returns null.
+ Decl *FindAndMapDefinition(FunctionDecl *D, FunctionDecl *FoundFunction) {
+ const FunctionDecl *Definition = nullptr;
+ if (D->doesThisDeclarationHaveABody() &&
+ FoundFunction->hasBody(Definition))
+ return Importer.MapImported(D, const_cast<FunctionDecl *>(Definition));
+ return nullptr;
+ }
+
public:
explicit ASTNodeImporter(ASTImporter &Importer) : Importer(Importer) {}
@@ -412,8 +398,6 @@ namespace clang {
Error ImportDefinition(
ObjCProtocolDecl *From, ObjCProtocolDecl *To,
ImportDefinitionKind Kind = IDK_Default);
- Expected<TemplateParameterList *> ImportTemplateParameterList(
- TemplateParameterList *Params);
Error ImportTemplateArguments(
const TemplateArgument *FromArgs, unsigned NumFromArgs,
SmallVectorImpl<TemplateArgument> &ToArgs);
@@ -435,9 +419,16 @@ namespace clang {
Expected<FunctionTemplateAndArgsTy>
ImportFunctionTemplateWithTemplateArgsFromSpecialization(
FunctionDecl *FromFD);
+ Error ImportTemplateParameterLists(const DeclaratorDecl *FromD,
+ DeclaratorDecl *ToD);
Error ImportTemplateInformation(FunctionDecl *FromFD, FunctionDecl *ToFD);
+ Error ImportFunctionDeclBody(FunctionDecl *FromFD, FunctionDecl *ToFD);
+
+ template <typename T>
+ bool hasSameVisibilityContext(T *Found, T *From);
+
bool IsStructuralMatch(Decl *From, Decl *To, bool Complain);
bool IsStructuralMatch(RecordDecl *FromRecord, RecordDecl *ToRecord,
bool Complain = true);
@@ -548,6 +539,7 @@ namespace clang {
// Importing expressions
ExpectedStmt VisitExpr(Expr *E);
ExpectedStmt VisitVAArgExpr(VAArgExpr *E);
+ ExpectedStmt VisitChooseExpr(ChooseExpr *E);
ExpectedStmt VisitGNUNullExpr(GNUNullExpr *E);
ExpectedStmt VisitPredefinedExpr(PredefinedExpr *E);
ExpectedStmt VisitDeclRefExpr(DeclRefExpr *E);
@@ -649,15 +641,6 @@ namespace clang {
FunctionDecl *FromFD);
};
-// FIXME: Temporary until every import returns Expected.
-template <>
-Expected<TemplateName> ASTNodeImporter::import(const TemplateName &From) {
- TemplateName To = Importer.Import(From);
- if (To.isNull() && !From.isNull())
- return make_error<ImportError>();
- return To;
-}
-
template <typename InContainerTy>
Error ASTNodeImporter::ImportTemplateArgumentListInfo(
SourceLocation FromLAngleLoc, SourceLocation FromRAngleLoc,
@@ -1649,16 +1632,32 @@ ASTNodeImporter::ImportDeclContext(DeclContext *FromDC, bool ForceImport) {
auto ToDCOrErr = Importer.ImportContext(FromDC);
return ToDCOrErr.takeError();
}
+
+ // We use strict error handling in case of records and enums, but not
+ // with e.g. namespaces.
+ //
+ // FIXME Clients of the ASTImporter should be able to choose an
+ // appropriate error handling strategy for their needs. For instance,
+ // they may not want to mark an entire namespace as erroneous merely
+ // because there is an ODR error with two typedefs. As another example,
+ // the client may allow EnumConstantDecls with same names but with
+ // different values in two distinct translation units.
+ bool AccumulateChildErrors = isa<TagDecl>(FromDC);
+
+ Error ChildErrors = Error::success();
llvm::SmallVector<Decl *, 8> ImportedDecls;
for (auto *From : FromDC->decls()) {
ExpectedDecl ImportedOrErr = import(From);
- if (!ImportedOrErr)
- // Ignore the error, continue with next Decl.
- // FIXME: Handle this case somehow better.
- consumeError(ImportedOrErr.takeError());
+ if (!ImportedOrErr) {
+ if (AccumulateChildErrors)
+ ChildErrors =
+ joinErrors(std::move(ChildErrors), ImportedOrErr.takeError());
+ else
+ consumeError(ImportedOrErr.takeError());
+ }
}
- return Error::success();
+ return ChildErrors;
}
Error ASTNodeImporter::ImportDeclContext(
@@ -1698,29 +1697,50 @@ Error ASTNodeImporter::ImportImplicitMethods(
static Error setTypedefNameForAnonDecl(TagDecl *From, TagDecl *To,
ASTImporter &Importer) {
if (TypedefNameDecl *FromTypedef = From->getTypedefNameForAnonDecl()) {
- Decl *ToTypedef = Importer.Import(FromTypedef);
- if (!ToTypedef)
- return make_error<ImportError>();
- To->setTypedefNameForAnonDecl(cast<TypedefNameDecl>(ToTypedef));
- // FIXME: This should be the final code.
- //if (Expected<Decl *> ToTypedefOrErr = Importer.Import(FromTypedef))
- // To->setTypedefNameForAnonDecl(cast<TypedefNameDecl>(*ToTypedefOrErr));
- //else
- // return ToTypedefOrErr.takeError();
+ if (ExpectedDecl ToTypedefOrErr = Importer.Import(FromTypedef))
+ To->setTypedefNameForAnonDecl(cast<TypedefNameDecl>(*ToTypedefOrErr));
+ else
+ return ToTypedefOrErr.takeError();
}
return Error::success();
}
Error ASTNodeImporter::ImportDefinition(
RecordDecl *From, RecordDecl *To, ImportDefinitionKind Kind) {
+ auto DefinitionCompleter = [To]() {
+ // There are cases in LLDB when we first import a class without its
+ // members. The class will have DefinitionData, but no members. Then,
+ // importDefinition is called from LLDB, which tries to get the members, so
+ // when we get here, the class already has the DefinitionData set, so we
+ // must unset the CompleteDefinition here to be able to complete again the
+ // definition.
+ To->setCompleteDefinition(false);
+ To->completeDefinition();
+ };
+
if (To->getDefinition() || To->isBeingDefined()) {
- if (Kind == IDK_Everything)
- return ImportDeclContext(From, /*ForceImport=*/true);
+ if (Kind == IDK_Everything ||
+ // In case of lambdas, the class already has a definition ptr set, but
+ // the contained decls are not imported yet. Also, isBeingDefined was
+ // set in CXXRecordDecl::CreateLambda. We must import the contained
+ // decls here and finish the definition.
+ (To->isLambda() && shouldForceImportDeclContext(Kind))) {
+ Error Result = ImportDeclContext(From, /*ForceImport=*/true);
+ // Finish the definition of the lambda, set isBeingDefined to false.
+ if (To->isLambda())
+ DefinitionCompleter();
+ return Result;
+ }
return Error::success();
}
To->startDefinition();
+ // Complete the definition even if error is returned.
+ // The RecordDecl may be already part of the AST so it is better to
+ // have it in complete state even if something is wrong with it.
+ auto DefinitionCompleterScopeExit =
+ llvm::make_scope_exit(DefinitionCompleter);
if (Error Err = setTypedefNameForAnonDecl(From, To, Importer))
return Err;
@@ -1798,6 +1818,9 @@ Error ASTNodeImporter::ImportDefinition(
ToData.HasDeclaredCopyAssignmentWithConstParam
= FromData.HasDeclaredCopyAssignmentWithConstParam;
+ // Copy over the data stored in RecordDeclBits
+ ToCXX->setArgPassingRestrictions(FromCXX->getArgPassingRestrictions());
+
SmallVector<CXXBaseSpecifier *, 4> Bases;
for (const auto &Base1 : FromCXX->bases()) {
ExpectedType TyOrErr = import(Base1.getType());
@@ -1842,7 +1865,6 @@ Error ASTNodeImporter::ImportDefinition(
if (Error Err = ImportDeclContext(From, /*ForceImport=*/true))
return Err;
- To->completeDefinition();
return Error::success();
}
@@ -1903,40 +1925,6 @@ Error ASTNodeImporter::ImportDefinition(
return Error::success();
}
-// FIXME: Remove this, use `import` instead.
-Expected<TemplateParameterList *> ASTNodeImporter::ImportTemplateParameterList(
- TemplateParameterList *Params) {
- SmallVector<NamedDecl *, 4> ToParams(Params->size());
- if (Error Err = ImportContainerChecked(*Params, ToParams))
- return std::move(Err);
-
- Expr *ToRequiresClause;
- if (Expr *const R = Params->getRequiresClause()) {
- if (Error Err = importInto(ToRequiresClause, R))
- return std::move(Err);
- } else {
- ToRequiresClause = nullptr;
- }
-
- auto ToTemplateLocOrErr = import(Params->getTemplateLoc());
- if (!ToTemplateLocOrErr)
- return ToTemplateLocOrErr.takeError();
- auto ToLAngleLocOrErr = import(Params->getLAngleLoc());
- if (!ToLAngleLocOrErr)
- return ToLAngleLocOrErr.takeError();
- auto ToRAngleLocOrErr = import(Params->getRAngleLoc());
- if (!ToRAngleLocOrErr)
- return ToRAngleLocOrErr.takeError();
-
- return TemplateParameterList::Create(
- Importer.getToContext(),
- *ToTemplateLocOrErr,
- *ToLAngleLocOrErr,
- ToParams,
- *ToRAngleLocOrErr,
- ToRequiresClause);
-}
-
Error ASTNodeImporter::ImportTemplateArguments(
const TemplateArgument *FromArgs, unsigned NumFromArgs,
SmallVectorImpl<TemplateArgument> &ToArgs) {
@@ -2011,6 +1999,12 @@ bool ASTNodeImporter::IsStructuralMatch(VarDecl *FromVar, VarDecl *ToVar,
}
bool ASTNodeImporter::IsStructuralMatch(EnumDecl *FromEnum, EnumDecl *ToEnum) {
+ // Eliminate a potential failure point where we attempt to re-import
+ // something we're trying to import while completing ToEnum.
+ if (Decl *ToOrigin = Importer.GetOriginalDecl(ToEnum))
+ if (auto *ToOriginEnum = dyn_cast<EnumDecl>(ToOrigin))
+ ToEnum = ToOriginEnum;
+
StructuralEquivalenceContext Ctx(
Importer.getFromContext(), Importer.getToContext(),
Importer.getNonEquivalentDecls(), getStructuralEquivalenceKind(Importer));
@@ -2303,7 +2297,8 @@ ASTNodeImporter::VisitTypedefNameDecl(TypedefNameDecl *D, bool IsAlias) {
if (!FromUT->isIncompleteType() && !FoundUT->isIncompleteType())
return Importer.MapImported(D, FoundTypedef);
}
- // FIXME Handle redecl chain.
+ // FIXME Handle redecl chain. When you do that make consistent changes
+ // in ASTImporterLookupTable too.
break;
}
@@ -2492,6 +2487,8 @@ ExpectedDecl ASTNodeImporter::VisitEnumDecl(EnumDecl *D) {
}
if (auto *FoundEnum = dyn_cast<EnumDecl>(FoundDecl)) {
+ if (!hasSameVisibilityContext(FoundEnum, D))
+ continue;
if (IsStructuralMatch(D, FoundEnum))
return Importer.MapImported(D, FoundEnum);
}
@@ -2500,7 +2497,7 @@ ExpectedDecl ASTNodeImporter::VisitEnumDecl(EnumDecl *D) {
}
if (!ConflictingDecls.empty()) {
- Name = Importer.HandleNameConflict(Name, DC, IDNS,
+ Name = Importer.HandleNameConflict(SearchName, DC, IDNS,
ConflictingDecls.data(),
ConflictingDecls.size());
if (!Name)
@@ -2548,26 +2545,6 @@ ExpectedDecl ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
Decl::FOK_None;
}
- // If this record has a definition in the translation unit we're coming from,
- // but this particular declaration is not that definition, import the
- // definition and map to that.
- TagDecl *Definition = D->getDefinition();
- if (Definition && Definition != D &&
- // Friend template declaration must be imported on its own.
- !IsFriendTemplate &&
- // In contrast to a normal CXXRecordDecl, the implicit
- // CXXRecordDecl of ClassTemplateSpecializationDecl is its redeclaration.
- // The definition of the implicit CXXRecordDecl in this case is the
- // ClassTemplateSpecializationDecl itself. Thus, we start with an extra
- // condition in order to be able to import the implict Decl.
- !D->isImplicit()) {
- ExpectedDecl ImportedDefOrErr = import(Definition);
- if (!ImportedDefOrErr)
- return ImportedDefOrErr.takeError();
-
- return Importer.MapImported(D, *ImportedDefOrErr);
- }
-
// Import the major distinguishing characteristics of this record.
DeclContext *DC, *LexicalDC;
DeclarationName Name;
@@ -2596,7 +2573,8 @@ ExpectedDecl ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
auto FoundDecls =
Importer.findDeclsInToCtx(DC, SearchName);
if (!FoundDecls.empty()) {
- // We're going to have to compare D against potentially conflicting Decls, so complete it.
+ // We're going to have to compare D against potentially conflicting Decls,
+ // so complete it.
if (D->hasExternalLexicalStorage() && !D->isCompleteDefinition())
D->getASTContext().getExternalSource()->CompleteType(D);
}
@@ -2625,6 +2603,9 @@ ExpectedDecl ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
if (!IsStructuralMatch(D, FoundRecord, false))
continue;
+ if (!hasSameVisibilityContext(FoundRecord, D))
+ continue;
+
if (IsStructuralMatch(D, FoundRecord)) {
RecordDecl *FoundDef = FoundRecord->getDefinition();
if (D->isThisDeclarationADefinition() && FoundDef) {
@@ -2651,7 +2632,7 @@ ExpectedDecl ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
} // for
if (!ConflictingDecls.empty() && SearchName) {
- Name = Importer.HandleNameConflict(Name, DC, IDNS,
+ Name = Importer.HandleNameConflict(SearchName, DC, IDNS,
ConflictingDecls.data(),
ConflictingDecls.size());
if (!Name)
@@ -2848,6 +2829,22 @@ ExpectedDecl ASTNodeImporter::VisitEnumConstantDecl(EnumConstantDecl *D) {
return ToEnumerator;
}
+Error ASTNodeImporter::ImportTemplateParameterLists(const DeclaratorDecl *FromD,
+ DeclaratorDecl *ToD) {
+ unsigned int Num = FromD->getNumTemplateParameterLists();
+ if (Num == 0)
+ return Error::success();
+ SmallVector<TemplateParameterList *, 2> ToTPLists(Num);
+ for (unsigned int I = 0; I < Num; ++I)
+ if (Expected<TemplateParameterList *> ToTPListOrErr =
+ import(FromD->getTemplateParameterList(I)))
+ ToTPLists[I] = *ToTPListOrErr;
+ else
+ return ToTPListOrErr.takeError();
+ ToD->setTemplateParameterListsInfo(Importer.ToContext, ToTPLists);
+ return Error::success();
+}
+
Error ASTNodeImporter::ImportTemplateInformation(
FunctionDecl *FromFD, FunctionDecl *ToFD) {
switch (FromFD->getTemplatedKind()) {
@@ -2894,6 +2891,9 @@ Error ASTNodeImporter::ImportTemplateInformation(
if (!POIOrErr)
return POIOrErr.takeError();
+ if (Error Err = ImportTemplateParameterLists(FromFD, ToFD))
+ return Err;
+
TemplateSpecializationKind TSK = FTSInfo->getTemplateSpecializationKind();
ToFD->setFunctionTemplateSpecialization(
std::get<0>(*FunctionAndArgsOrErr), ToTAList, /* InsertPos= */ nullptr,
@@ -2945,6 +2945,30 @@ ASTNodeImporter::FindFunctionTemplateSpecialization(FunctionDecl *FromFD) {
return FoundSpec;
}
+Error ASTNodeImporter::ImportFunctionDeclBody(FunctionDecl *FromFD,
+ FunctionDecl *ToFD) {
+ if (Stmt *FromBody = FromFD->getBody()) {
+ if (ExpectedStmt ToBodyOrErr = import(FromBody))
+ ToFD->setBody(*ToBodyOrErr);
+ else
+ return ToBodyOrErr.takeError();
+ }
+ return Error::success();
+}
+
+template <typename T>
+bool ASTNodeImporter::hasSameVisibilityContext(T *Found, T *From) {
+ if (From->hasExternalFormalLinkage())
+ return Found->hasExternalFormalLinkage();
+ if (Importer.GetFromTU(Found) != From->getTranslationUnitDecl())
+ return false;
+ if (From->isInAnonymousNamespace())
+ return Found->isInAnonymousNamespace();
+ else
+ return !Found->isInAnonymousNamespace() &&
+ !Found->hasExternalFormalLinkage();
+}
+
ExpectedDecl ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
SmallVector<Decl *, 2> Redecls = getCanonicalForwardRedeclChain(D);
@@ -2968,7 +2992,7 @@ ExpectedDecl ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
if (ToD)
return ToD;
- const FunctionDecl *FoundByLookup = nullptr;
+ FunctionDecl *FoundByLookup = nullptr;
FunctionTemplateDecl *FromFT = D->getDescribedFunctionTemplate();
// If this is a function template specialization, then try to find the same
@@ -2982,8 +3006,8 @@ ExpectedDecl ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
if (!FoundFunctionOrErr)
return FoundFunctionOrErr.takeError();
if (FunctionDecl *FoundFunction = *FoundFunctionOrErr) {
- if (D->doesThisDeclarationHaveABody() && FoundFunction->hasBody())
- return Importer.MapImported(D, FoundFunction);
+ if (Decl *Def = FindAndMapDefinition(D, FoundFunction))
+ return Def;
FoundByLookup = FoundFunction;
}
}
@@ -2998,33 +3022,27 @@ ExpectedDecl ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
continue;
if (auto *FoundFunction = dyn_cast<FunctionDecl>(FoundDecl)) {
- if (FoundFunction->hasExternalFormalLinkage() &&
- D->hasExternalFormalLinkage()) {
- if (IsStructuralMatch(D, FoundFunction)) {
- const FunctionDecl *Definition = nullptr;
- if (D->doesThisDeclarationHaveABody() &&
- FoundFunction->hasBody(Definition)) {
- return Importer.MapImported(
- D, const_cast<FunctionDecl *>(Definition));
- }
- FoundByLookup = FoundFunction;
- break;
- }
+ if (!hasSameVisibilityContext(FoundFunction, D))
+ continue;
- // FIXME: Check for overloading more carefully, e.g., by boosting
- // Sema::IsOverload out to the AST library.
+ if (IsStructuralMatch(D, FoundFunction)) {
+ if (Decl *Def = FindAndMapDefinition(D, FoundFunction))
+ return Def;
+ FoundByLookup = FoundFunction;
+ break;
+ }
+ // FIXME: Check for overloading more carefully, e.g., by boosting
+ // Sema::IsOverload out to the AST library.
- // Function overloading is okay in C++.
- if (Importer.getToContext().getLangOpts().CPlusPlus)
- continue;
+ // Function overloading is okay in C++.
+ if (Importer.getToContext().getLangOpts().CPlusPlus)
+ continue;
- // Complain about inconsistent function types.
- Importer.ToDiag(Loc, diag::err_odr_function_type_inconsistent)
+ // Complain about inconsistent function types.
+ Importer.ToDiag(Loc, diag::warn_odr_function_type_inconsistent)
<< Name << D->getType() << FoundFunction->getType();
- Importer.ToDiag(FoundFunction->getLocation(),
- diag::note_odr_value_here)
+ Importer.ToDiag(FoundFunction->getLocation(), diag::note_odr_value_here)
<< FoundFunction->getType();
- }
}
ConflictingDecls.push_back(FoundDecl);
@@ -3039,6 +3057,25 @@ ExpectedDecl ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
}
}
+ // We do not allow more than one in-class declaration of a function. This is
+ // because AST clients like VTableBuilder asserts on this. VTableBuilder
+ // assumes there is only one in-class declaration. Building a redecl
+ // chain would result in more than one in-class declaration for
+ // overrides (even if they are part of the same redecl chain inside the
+ // derived class.)
+ if (FoundByLookup) {
+ if (isa<CXXMethodDecl>(FoundByLookup)) {
+ if (D->getLexicalDeclContext() == D->getDeclContext()) {
+ if (!D->doesThisDeclarationHaveABody())
+ return Importer.MapImported(D, FoundByLookup);
+ else {
+ // Let's continue and build up the redecl chain in this case.
+ // FIXME Merge the functions into one decl.
+ }
+ }
+ }
+ }
+
DeclarationNameInfo NameInfo(Name, Loc);
// Import additional name location/type info.
if (Error Err = ImportDeclarationNameLoc(D->getNameInfo(), NameInfo))
@@ -3086,36 +3123,71 @@ ExpectedDecl ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
// Create the imported function.
FunctionDecl *ToFunction = nullptr;
if (auto *FromConstructor = dyn_cast<CXXConstructorDecl>(D)) {
+ Expr *ExplicitExpr = nullptr;
+ if (FromConstructor->getExplicitSpecifier().getExpr()) {
+ auto Imp = importSeq(FromConstructor->getExplicitSpecifier().getExpr());
+ if (!Imp)
+ return Imp.takeError();
+ std::tie(ExplicitExpr) = *Imp;
+ }
if (GetImportedOrCreateDecl<CXXConstructorDecl>(
- ToFunction, D, Importer.getToContext(), cast<CXXRecordDecl>(DC),
- ToInnerLocStart, NameInfo, T, TInfo,
- FromConstructor->isExplicit(),
- D->isInlineSpecified(), D->isImplicit(), D->isConstexpr()))
+ ToFunction, D, Importer.getToContext(), cast<CXXRecordDecl>(DC),
+ ToInnerLocStart, NameInfo, T, TInfo,
+ ExplicitSpecifier(
+ ExplicitExpr,
+ FromConstructor->getExplicitSpecifier().getKind()),
+ D->isInlineSpecified(), D->isImplicit(), D->getConstexprKind()))
return ToFunction;
- } else if (isa<CXXDestructorDecl>(D)) {
+ } else if (CXXDestructorDecl *FromDtor = dyn_cast<CXXDestructorDecl>(D)) {
+
+ auto Imp =
+ importSeq(const_cast<FunctionDecl *>(FromDtor->getOperatorDelete()),
+ FromDtor->getOperatorDeleteThisArg());
+
+ if (!Imp)
+ return Imp.takeError();
+
+ FunctionDecl *ToOperatorDelete;
+ Expr *ToThisArg;
+ std::tie(ToOperatorDelete, ToThisArg) = *Imp;
+
if (GetImportedOrCreateDecl<CXXDestructorDecl>(
ToFunction, D, Importer.getToContext(), cast<CXXRecordDecl>(DC),
ToInnerLocStart, NameInfo, T, TInfo, D->isInlineSpecified(),
D->isImplicit()))
return ToFunction;
+
+ CXXDestructorDecl *ToDtor = cast<CXXDestructorDecl>(ToFunction);
+
+ ToDtor->setOperatorDelete(ToOperatorDelete, ToThisArg);
} else if (CXXConversionDecl *FromConversion =
dyn_cast<CXXConversionDecl>(D)) {
+ Expr *ExplicitExpr = nullptr;
+ if (FromConversion->getExplicitSpecifier().getExpr()) {
+ auto Imp = importSeq(FromConversion->getExplicitSpecifier().getExpr());
+ if (!Imp)
+ return Imp.takeError();
+ std::tie(ExplicitExpr) = *Imp;
+ }
if (GetImportedOrCreateDecl<CXXConversionDecl>(
ToFunction, D, Importer.getToContext(), cast<CXXRecordDecl>(DC),
ToInnerLocStart, NameInfo, T, TInfo, D->isInlineSpecified(),
- FromConversion->isExplicit(), D->isConstexpr(), SourceLocation()))
+ ExplicitSpecifier(ExplicitExpr,
+ FromConversion->getExplicitSpecifier().getKind()),
+ D->getConstexprKind(), SourceLocation()))
return ToFunction;
} else if (auto *Method = dyn_cast<CXXMethodDecl>(D)) {
if (GetImportedOrCreateDecl<CXXMethodDecl>(
ToFunction, D, Importer.getToContext(), cast<CXXRecordDecl>(DC),
ToInnerLocStart, NameInfo, T, TInfo, Method->getStorageClass(),
- Method->isInlineSpecified(), D->isConstexpr(), SourceLocation()))
+ Method->isInlineSpecified(), D->getConstexprKind(),
+ SourceLocation()))
return ToFunction;
} else {
- if (GetImportedOrCreateDecl(ToFunction, D, Importer.getToContext(), DC,
- ToInnerLocStart, NameInfo, T, TInfo,
- D->getStorageClass(), D->isInlineSpecified(),
- D->hasWrittenPrototype(), D->isConstexpr()))
+ if (GetImportedOrCreateDecl(
+ ToFunction, D, Importer.getToContext(), DC, ToInnerLocStart,
+ NameInfo, T, TInfo, D->getStorageClass(), D->isInlineSpecified(),
+ D->hasWrittenPrototype(), D->getConstexprKind()))
return ToFunction;
}
@@ -3124,6 +3196,11 @@ ExpectedDecl ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
auto *Recent = const_cast<FunctionDecl *>(
FoundByLookup->getMostRecentDecl());
ToFunction->setPreviousDecl(Recent);
+ // FIXME Probably we should merge exception specifications. E.g. In the
+ // "To" context the existing function may have exception specification with
+ // noexcept-unevaluated, while the newly imported function may have an
+ // evaluated noexcept. A call to adjustExceptionSpec() on the imported
+ // decl and its redeclarations may be required.
}
// Import Ctor initializers.
@@ -3184,12 +3261,10 @@ ExpectedDecl ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
}
if (D->doesThisDeclarationHaveABody()) {
- if (Stmt *FromBody = D->getBody()) {
- if (ExpectedStmt ToBodyOrErr = import(FromBody))
- ToFunction->setBody(*ToBodyOrErr);
- else
- return ToBodyOrErr.takeError();
- }
+ Error Err = ImportFunctionDeclBody(D, ToFunction);
+
+ if (Err)
+ return std::move(Err);
}
// FIXME: Other bits to merge?
@@ -3292,7 +3367,7 @@ ExpectedDecl ASTNodeImporter::VisitFieldDecl(FieldDecl *D) {
}
// FIXME: Why is this case not handled with calling HandleNameConflict?
- Importer.ToDiag(Loc, diag::err_odr_field_type_inconsistent)
+ Importer.ToDiag(Loc, diag::warn_odr_field_type_inconsistent)
<< Name << D->getType() << FoundField->getType();
Importer.ToDiag(FoundField->getLocation(), diag::note_odr_value_here)
<< FoundField->getType();
@@ -3363,7 +3438,7 @@ ExpectedDecl ASTNodeImporter::VisitIndirectFieldDecl(IndirectFieldDecl *D) {
continue;
// FIXME: Why is this case not handled with calling HandleNameConflict?
- Importer.ToDiag(Loc, diag::err_odr_field_type_inconsistent)
+ Importer.ToDiag(Loc, diag::warn_odr_field_type_inconsistent)
<< Name << D->getType() << FoundField->getType();
Importer.ToDiag(FoundField->getLocation(), diag::note_odr_value_here)
<< FoundField->getType();
@@ -3394,9 +3469,6 @@ ExpectedDecl ASTNodeImporter::VisitIndirectFieldDecl(IndirectFieldDecl *D) {
// FIXME here we leak `NamedChain` which is allocated before
return ToIndirectField;
- for (const auto *Attr : D->attrs())
- ToIndirectField->addAttr(Importer.Import(Attr));
-
ToIndirectField->setAccess(D->getAccess());
ToIndirectField->setLexicalDeclContext(LexicalDC);
LexicalDC->addDeclInternal(ToIndirectField);
@@ -3451,7 +3523,7 @@ ExpectedDecl ASTNodeImporter::VisitFriendDecl(FriendDecl *D) {
SmallVector<TemplateParameterList *, 1> ToTPLists(D->NumTPLists);
auto **FromTPLists = D->getTrailingObjects<TemplateParameterList *>();
for (unsigned I = 0; I < D->NumTPLists; I++) {
- if (auto ListOrErr = ImportTemplateParameterList(FromTPLists[I]))
+ if (auto ListOrErr = import(FromTPLists[I]))
ToTPLists[I] = *ListOrErr;
else
return ListOrErr.takeError();
@@ -3497,7 +3569,7 @@ ExpectedDecl ASTNodeImporter::VisitObjCIvarDecl(ObjCIvarDecl *D) {
return FoundIvar;
}
- Importer.ToDiag(Loc, diag::err_odr_ivar_type_inconsistent)
+ Importer.ToDiag(Loc, diag::warn_odr_ivar_type_inconsistent)
<< Name << D->getType() << FoundIvar->getType();
Importer.ToDiag(FoundIvar->getLocation(), diag::note_odr_value_here)
<< FoundIvar->getType();
@@ -3564,58 +3636,56 @@ ExpectedDecl ASTNodeImporter::VisitVarDecl(VarDecl *D) {
continue;
if (auto *FoundVar = dyn_cast<VarDecl>(FoundDecl)) {
- // We have found a variable that we may need to merge with. Check it.
- if (FoundVar->hasExternalFormalLinkage() &&
- D->hasExternalFormalLinkage()) {
- if (Importer.IsStructurallyEquivalent(D->getType(),
- FoundVar->getType())) {
-
- // The VarDecl in the "From" context has a definition, but in the
- // "To" context we already have a definition.
- VarDecl *FoundDef = FoundVar->getDefinition();
- if (D->isThisDeclarationADefinition() && FoundDef)
- // FIXME Check for ODR error if the two definitions have
- // different initializers?
- return Importer.MapImported(D, FoundDef);
-
- // The VarDecl in the "From" context has an initializer, but in the
- // "To" context we already have an initializer.
- const VarDecl *FoundDInit = nullptr;
- if (D->getInit() && FoundVar->getAnyInitializer(FoundDInit))
- // FIXME Diagnose ODR error if the two initializers are different?
- return Importer.MapImported(D, const_cast<VarDecl*>(FoundDInit));
+ if (!hasSameVisibilityContext(FoundVar, D))
+ continue;
+ if (Importer.IsStructurallyEquivalent(D->getType(),
+ FoundVar->getType())) {
+
+ // The VarDecl in the "From" context has a definition, but in the
+ // "To" context we already have a definition.
+ VarDecl *FoundDef = FoundVar->getDefinition();
+ if (D->isThisDeclarationADefinition() && FoundDef)
+ // FIXME Check for ODR error if the two definitions have
+ // different initializers?
+ return Importer.MapImported(D, FoundDef);
+
+ // The VarDecl in the "From" context has an initializer, but in the
+ // "To" context we already have an initializer.
+ const VarDecl *FoundDInit = nullptr;
+ if (D->getInit() && FoundVar->getAnyInitializer(FoundDInit))
+ // FIXME Diagnose ODR error if the two initializers are different?
+ return Importer.MapImported(D, const_cast<VarDecl*>(FoundDInit));
+
+ FoundByLookup = FoundVar;
+ break;
+ }
+
+ const ArrayType *FoundArray
+ = Importer.getToContext().getAsArrayType(FoundVar->getType());
+ const ArrayType *TArray
+ = Importer.getToContext().getAsArrayType(D->getType());
+ if (FoundArray && TArray) {
+ if (isa<IncompleteArrayType>(FoundArray) &&
+ isa<ConstantArrayType>(TArray)) {
+ // Import the type.
+ if (auto TyOrErr = import(D->getType()))
+ FoundVar->setType(*TyOrErr);
+ else
+ return TyOrErr.takeError();
FoundByLookup = FoundVar;
break;
+ } else if (isa<IncompleteArrayType>(TArray) &&
+ isa<ConstantArrayType>(FoundArray)) {
+ FoundByLookup = FoundVar;
+ break;
}
-
- const ArrayType *FoundArray
- = Importer.getToContext().getAsArrayType(FoundVar->getType());
- const ArrayType *TArray
- = Importer.getToContext().getAsArrayType(D->getType());
- if (FoundArray && TArray) {
- if (isa<IncompleteArrayType>(FoundArray) &&
- isa<ConstantArrayType>(TArray)) {
- // Import the type.
- if (auto TyOrErr = import(D->getType()))
- FoundVar->setType(*TyOrErr);
- else
- return TyOrErr.takeError();
-
- FoundByLookup = FoundVar;
- break;
- } else if (isa<IncompleteArrayType>(TArray) &&
- isa<ConstantArrayType>(FoundArray)) {
- FoundByLookup = FoundVar;
- break;
- }
- }
-
- Importer.ToDiag(Loc, diag::err_odr_variable_type_inconsistent)
- << Name << D->getType() << FoundVar->getType();
- Importer.ToDiag(FoundVar->getLocation(), diag::note_odr_value_here)
- << FoundVar->getType();
}
+
+ Importer.ToDiag(Loc, diag::warn_odr_variable_type_inconsistent)
+ << Name << D->getType() << FoundVar->getType();
+ Importer.ToDiag(FoundVar->getLocation(), diag::note_odr_value_here)
+ << FoundVar->getType();
}
ConflictingDecls.push_back(FoundDecl);
@@ -3777,7 +3847,7 @@ ExpectedDecl ASTNodeImporter::VisitObjCMethodDecl(ObjCMethodDecl *D) {
// Check return types.
if (!Importer.IsStructurallyEquivalent(D->getReturnType(),
FoundMethod->getReturnType())) {
- Importer.ToDiag(Loc, diag::err_odr_objc_method_result_type_inconsistent)
+ Importer.ToDiag(Loc, diag::warn_odr_objc_method_result_type_inconsistent)
<< D->isInstanceMethod() << Name << D->getReturnType()
<< FoundMethod->getReturnType();
Importer.ToDiag(FoundMethod->getLocation(),
@@ -3789,7 +3859,7 @@ ExpectedDecl ASTNodeImporter::VisitObjCMethodDecl(ObjCMethodDecl *D) {
// Check the number of parameters.
if (D->param_size() != FoundMethod->param_size()) {
- Importer.ToDiag(Loc, diag::err_odr_objc_method_num_params_inconsistent)
+ Importer.ToDiag(Loc, diag::warn_odr_objc_method_num_params_inconsistent)
<< D->isInstanceMethod() << Name
<< D->param_size() << FoundMethod->param_size();
Importer.ToDiag(FoundMethod->getLocation(),
@@ -3806,7 +3876,7 @@ ExpectedDecl ASTNodeImporter::VisitObjCMethodDecl(ObjCMethodDecl *D) {
if (!Importer.IsStructurallyEquivalent((*P)->getType(),
(*FoundP)->getType())) {
Importer.FromDiag((*P)->getLocation(),
- diag::err_odr_objc_method_param_type_inconsistent)
+ diag::warn_odr_objc_method_param_type_inconsistent)
<< D->isInstanceMethod() << Name
<< (*P)->getType() << (*FoundP)->getType();
Importer.ToDiag((*FoundP)->getLocation(), diag::note_odr_value_here)
@@ -3819,7 +3889,7 @@ ExpectedDecl ASTNodeImporter::VisitObjCMethodDecl(ObjCMethodDecl *D) {
// Check variadic/non-variadic.
// Check the number of parameters.
if (D->isVariadic() != FoundMethod->isVariadic()) {
- Importer.ToDiag(Loc, diag::err_odr_objc_method_variadic_inconsistent)
+ Importer.ToDiag(Loc, diag::warn_odr_objc_method_variadic_inconsistent)
<< D->isInstanceMethod() << Name;
Importer.ToDiag(FoundMethod->getLocation(),
diag::note_odr_objc_method_here)
@@ -4364,7 +4434,7 @@ Error ASTNodeImporter::ImportDefinition(
if ((bool)FromSuper != (bool)ToSuper ||
(FromSuper && !declaresSameEntity(FromSuper, ToSuper))) {
Importer.ToDiag(To->getLocation(),
- diag::err_odr_objc_superclass_inconsistent)
+ diag::warn_odr_objc_superclass_inconsistent)
<< To->getDeclName();
if (ToSuper)
Importer.ToDiag(To->getSuperClassLoc(), diag::note_odr_objc_superclass)
@@ -4633,7 +4703,7 @@ ASTNodeImporter::VisitObjCImplementationDecl(ObjCImplementationDecl *D) {
!declaresSameEntity(Super->getCanonicalDecl(),
Impl->getSuperClass()))) {
Importer.ToDiag(Impl->getLocation(),
- diag::err_odr_objc_superclass_inconsistent)
+ diag::warn_odr_objc_superclass_inconsistent)
<< Iface->getDeclName();
// FIXME: It would be nice to have the location of the superclass
// below.
@@ -4681,7 +4751,7 @@ ExpectedDecl ASTNodeImporter::VisitObjCPropertyDecl(ObjCPropertyDecl *D) {
// Check property types.
if (!Importer.IsStructurallyEquivalent(D->getType(),
FoundProp->getType())) {
- Importer.ToDiag(Loc, diag::err_odr_objc_property_type_inconsistent)
+ Importer.ToDiag(Loc, diag::warn_odr_objc_property_type_inconsistent)
<< Name << D->getType() << FoundProp->getType();
Importer.ToDiag(FoundProp->getLocation(), diag::note_odr_value_here)
<< FoundProp->getType();
@@ -4788,7 +4858,7 @@ ASTNodeImporter::VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D) {
// vs. @dynamic).
if (D->getPropertyImplementation() != ToImpl->getPropertyImplementation()) {
Importer.ToDiag(ToImpl->getLocation(),
- diag::err_odr_objc_property_impl_kind_inconsistent)
+ diag::warn_odr_objc_property_impl_kind_inconsistent)
<< Property->getDeclName()
<< (ToImpl->getPropertyImplementation()
== ObjCPropertyImplDecl::Dynamic);
@@ -4804,7 +4874,7 @@ ASTNodeImporter::VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D) {
if (D->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize &&
Ivar != ToImpl->getPropertyIvarDecl()) {
Importer.ToDiag(ToImpl->getPropertyIvarDeclLoc(),
- diag::err_odr_objc_synthesize_ivar_inconsistent)
+ diag::warn_odr_objc_synthesize_ivar_inconsistent)
<< Property->getDeclName()
<< ToImpl->getPropertyIvarDecl()->getDeclName()
<< Ivar->getDeclName();
@@ -4888,8 +4958,7 @@ ASTNodeImporter::VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D) {
return LocationOrErr.takeError();
// Import template parameters.
- auto TemplateParamsOrErr = ImportTemplateParameterList(
- D->getTemplateParameters());
+ auto TemplateParamsOrErr = import(D->getTemplateParameters());
if (!TemplateParamsOrErr)
return TemplateParamsOrErr.takeError();
@@ -4905,31 +4974,20 @@ ASTNodeImporter::VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D) {
return ToD;
}
-// Returns the definition for a (forward) declaration of a ClassTemplateDecl, if
+// Returns the definition for a (forward) declaration of a TemplateDecl, if
// it has any definition in the redecl chain.
-static ClassTemplateDecl *getDefinition(ClassTemplateDecl *D) {
- CXXRecordDecl *ToTemplatedDef = D->getTemplatedDecl()->getDefinition();
+template <typename T> static auto getTemplateDefinition(T *D) -> T * {
+ assert(D->getTemplatedDecl() && "Should be called on templates only");
+ auto *ToTemplatedDef = D->getTemplatedDecl()->getDefinition();
if (!ToTemplatedDef)
return nullptr;
- ClassTemplateDecl *TemplateWithDef =
- ToTemplatedDef->getDescribedClassTemplate();
- return TemplateWithDef;
+ auto *TemplateWithDef = ToTemplatedDef->getDescribedTemplate();
+ return cast_or_null<T>(TemplateWithDef);
}
ExpectedDecl ASTNodeImporter::VisitClassTemplateDecl(ClassTemplateDecl *D) {
bool IsFriend = D->getFriendObjectKind() != Decl::FOK_None;
- // If this template has a definition in the translation unit we're coming
- // from, but this particular declaration is not that definition, import the
- // definition and map to that.
- ClassTemplateDecl *Definition = getDefinition(D);
- if (Definition && Definition != D && !IsFriend) {
- if (ExpectedDecl ImportedDefOrErr = import(Definition))
- return Importer.MapImported(D, *ImportedDefOrErr);
- else
- return ImportedDefOrErr.takeError();
- }
-
// Import the major distinguishing characteristics of this class template.
DeclContext *DC, *LexicalDC;
DeclarationName Name;
@@ -4956,7 +5014,8 @@ ExpectedDecl ASTNodeImporter::VisitClassTemplateDecl(ClassTemplateDecl *D) {
if (FoundTemplate) {
if (IsStructuralMatch(D, FoundTemplate)) {
- ClassTemplateDecl *TemplateWithDef = getDefinition(FoundTemplate);
+ ClassTemplateDecl *TemplateWithDef =
+ getTemplateDefinition(FoundTemplate);
if (D->isThisDeclarationADefinition() && TemplateWithDef) {
return Importer.MapImported(D, TemplateWithDef);
}
@@ -4986,8 +5045,7 @@ ExpectedDecl ASTNodeImporter::VisitClassTemplateDecl(ClassTemplateDecl *D) {
return std::move(Err);
// Create the class template declaration itself.
- auto TemplateParamsOrErr = ImportTemplateParameterList(
- D->getTemplateParameters());
+ auto TemplateParamsOrErr = import(D->getTemplateParameters());
if (!TemplateParamsOrErr)
return TemplateParamsOrErr.takeError();
@@ -5019,6 +5077,8 @@ ExpectedDecl ASTNodeImporter::VisitClassTemplateDecl(ClassTemplateDecl *D) {
// and this time the lookup finds the previous fwd friend class template.
// In this case we must set up the previous decl for the templated decl.
if (!ToTemplated->getPreviousDecl()) {
+ assert(FoundByLookup->getTemplatedDecl() &&
+ "Found decl must have its templated decl set");
CXXRecordDecl *PrevTemplated =
FoundByLookup->getTemplatedDecl()->getMostRecentDecl();
if (ToTemplated != PrevTemplated)
@@ -5041,17 +5101,6 @@ ExpectedDecl ASTNodeImporter::VisitClassTemplateDecl(ClassTemplateDecl *D) {
ExpectedDecl ASTNodeImporter::VisitClassTemplateSpecializationDecl(
ClassTemplateSpecializationDecl *D) {
- // If this record has a definition in the translation unit we're coming from,
- // but this particular declaration is not that definition, import the
- // definition and map to that.
- TagDecl *Definition = D->getDefinition();
- if (Definition && Definition != D) {
- if (ExpectedDecl ImportedDefOrErr = import(Definition))
- return Importer.MapImported(D, *ImportedDefOrErr);
- else
- return ImportedDefOrErr.takeError();
- }
-
ClassTemplateDecl *ClassTemplate;
if (Error Err = importInto(ClassTemplate, D->getSpecializedTemplate()))
return std::move(Err);
@@ -5069,154 +5118,146 @@ ExpectedDecl ASTNodeImporter::VisitClassTemplateSpecializationDecl(
// Try to find an existing specialization with these template arguments.
void *InsertPos = nullptr;
- ClassTemplateSpecializationDecl *D2 = nullptr;
+ ClassTemplateSpecializationDecl *PrevDecl = nullptr;
ClassTemplatePartialSpecializationDecl *PartialSpec =
dyn_cast<ClassTemplatePartialSpecializationDecl>(D);
if (PartialSpec)
- D2 = ClassTemplate->findPartialSpecialization(TemplateArgs, InsertPos);
+ PrevDecl =
+ ClassTemplate->findPartialSpecialization(TemplateArgs, InsertPos);
else
- D2 = ClassTemplate->findSpecialization(TemplateArgs, InsertPos);
- ClassTemplateSpecializationDecl * const PrevDecl = D2;
- RecordDecl *FoundDef = D2 ? D2->getDefinition() : nullptr;
- if (FoundDef) {
- if (!D->isCompleteDefinition()) {
- // The "From" translation unit only had a forward declaration; call it
- // the same declaration.
- // TODO Handle the redecl chain properly!
- return Importer.MapImported(D, FoundDef);
- }
-
- if (IsStructuralMatch(D, FoundDef)) {
+ PrevDecl = ClassTemplate->findSpecialization(TemplateArgs, InsertPos);
+
+ if (PrevDecl) {
+ if (IsStructuralMatch(D, PrevDecl)) {
+ if (D->isThisDeclarationADefinition() && PrevDecl->getDefinition()) {
+ Importer.MapImported(D, PrevDecl->getDefinition());
+ // Import those default field initializers which have been
+ // instantiated in the "From" context, but not in the "To" context.
+ for (auto *FromField : D->fields()) {
+ auto ToOrErr = import(FromField);
+ if (!ToOrErr)
+ return ToOrErr.takeError();
+ }
- Importer.MapImported(D, FoundDef);
+ // Import those methods which have been instantiated in the
+ // "From" context, but not in the "To" context.
+ for (CXXMethodDecl *FromM : D->methods()) {
+ auto ToOrErr = import(FromM);
+ if (!ToOrErr)
+ return ToOrErr.takeError();
+ }
- // Import those those default field initializers which have been
- // instantiated in the "From" context, but not in the "To" context.
- for (auto *FromField : D->fields()) {
- auto ToOrErr = import(FromField);
- if (!ToOrErr)
- // FIXME: return the error?
- consumeError(ToOrErr.takeError());
+ // TODO Import instantiated default arguments.
+ // TODO Import instantiated exception specifications.
+ //
+ // Generally, ASTCommon.h/DeclUpdateKind enum gives a very good hint
+ // what else could be fused during an AST merge.
+ return PrevDecl;
}
+ } else { // ODR violation.
+ // FIXME HandleNameConflict
+ return make_error<ImportError>(ImportError::NameConflict);
+ }
+ }
- // Import those methods which have been instantiated in the
- // "From" context, but not in the "To" context.
- for (CXXMethodDecl *FromM : D->methods()) {
- auto ToOrErr = import(FromM);
- if (!ToOrErr)
- // FIXME: return the error?
- consumeError(ToOrErr.takeError());
- }
+ // Import the location of this declaration.
+ ExpectedSLoc BeginLocOrErr = import(D->getBeginLoc());
+ if (!BeginLocOrErr)
+ return BeginLocOrErr.takeError();
+ ExpectedSLoc IdLocOrErr = import(D->getLocation());
+ if (!IdLocOrErr)
+ return IdLocOrErr.takeError();
- // TODO Import instantiated default arguments.
- // TODO Import instantiated exception specifications.
- //
- // Generally, ASTCommon.h/DeclUpdateKind enum gives a very good hint what
- // else could be fused during an AST merge.
+ // Create the specialization.
+ ClassTemplateSpecializationDecl *D2 = nullptr;
+ if (PartialSpec) {
+ // Import TemplateArgumentListInfo.
+ TemplateArgumentListInfo ToTAInfo;
+ const auto &ASTTemplateArgs = *PartialSpec->getTemplateArgsAsWritten();
+ if (Error Err = ImportTemplateArgumentListInfo(ASTTemplateArgs, ToTAInfo))
+ return std::move(Err);
- return FoundDef;
- }
- } else { // We either couldn't find any previous specialization in the "To"
- // context, or we found one but without definition. Let's create a
- // new specialization and register that at the class template.
+ QualType CanonInjType;
+ if (Error Err = importInto(
+ CanonInjType, PartialSpec->getInjectedSpecializationType()))
+ return std::move(Err);
+ CanonInjType = CanonInjType.getCanonicalType();
+
+ auto ToTPListOrErr = import(PartialSpec->getTemplateParameters());
+ if (!ToTPListOrErr)
+ return ToTPListOrErr.takeError();
+
+ if (GetImportedOrCreateDecl<ClassTemplatePartialSpecializationDecl>(
+ D2, D, Importer.getToContext(), D->getTagKind(), DC,
+ *BeginLocOrErr, *IdLocOrErr, *ToTPListOrErr, ClassTemplate,
+ llvm::makeArrayRef(TemplateArgs.data(), TemplateArgs.size()),
+ ToTAInfo, CanonInjType,
+ cast_or_null<ClassTemplatePartialSpecializationDecl>(PrevDecl)))
+ return D2;
- // Import the location of this declaration.
- ExpectedSLoc BeginLocOrErr = import(D->getBeginLoc());
- if (!BeginLocOrErr)
- return BeginLocOrErr.takeError();
- ExpectedSLoc IdLocOrErr = import(D->getLocation());
- if (!IdLocOrErr)
- return IdLocOrErr.takeError();
-
- if (PartialSpec) {
- // Import TemplateArgumentListInfo.
- TemplateArgumentListInfo ToTAInfo;
- const auto &ASTTemplateArgs = *PartialSpec->getTemplateArgsAsWritten();
- if (Error Err = ImportTemplateArgumentListInfo(ASTTemplateArgs, ToTAInfo))
- return std::move(Err);
+ // Update InsertPos, because preceding import calls may have invalidated
+ // it by adding new specializations.
+ if (!ClassTemplate->findPartialSpecialization(TemplateArgs, InsertPos))
+ // Add this partial specialization to the class template.
+ ClassTemplate->AddPartialSpecialization(
+ cast<ClassTemplatePartialSpecializationDecl>(D2), InsertPos);
- QualType CanonInjType;
- if (Error Err = importInto(
- CanonInjType, PartialSpec->getInjectedSpecializationType()))
- return std::move(Err);
- CanonInjType = CanonInjType.getCanonicalType();
+ } else { // Not a partial specialization.
+ if (GetImportedOrCreateDecl(
+ D2, D, Importer.getToContext(), D->getTagKind(), DC,
+ *BeginLocOrErr, *IdLocOrErr, ClassTemplate, TemplateArgs,
+ PrevDecl))
+ return D2;
- auto ToTPListOrErr = ImportTemplateParameterList(
- PartialSpec->getTemplateParameters());
- if (!ToTPListOrErr)
- return ToTPListOrErr.takeError();
+ // Update InsertPos, because preceding import calls may have invalidated
+ // it by adding new specializations.
+ if (!ClassTemplate->findSpecialization(TemplateArgs, InsertPos))
+ // Add this specialization to the class template.
+ ClassTemplate->AddSpecialization(D2, InsertPos);
+ }
- if (GetImportedOrCreateDecl<ClassTemplatePartialSpecializationDecl>(
- D2, D, Importer.getToContext(), D->getTagKind(), DC,
- *BeginLocOrErr, *IdLocOrErr, *ToTPListOrErr, ClassTemplate,
- llvm::makeArrayRef(TemplateArgs.data(), TemplateArgs.size()),
- ToTAInfo, CanonInjType,
- cast_or_null<ClassTemplatePartialSpecializationDecl>(PrevDecl)))
- return D2;
+ D2->setSpecializationKind(D->getSpecializationKind());
- // Update InsertPos, because preceding import calls may have invalidated
- // it by adding new specializations.
- if (!ClassTemplate->findPartialSpecialization(TemplateArgs, InsertPos))
- // Add this partial specialization to the class template.
- ClassTemplate->AddPartialSpecialization(
- cast<ClassTemplatePartialSpecializationDecl>(D2), InsertPos);
+ // Set the context of this specialization/instantiation.
+ D2->setLexicalDeclContext(LexicalDC);
- } else { // Not a partial specialization.
- if (GetImportedOrCreateDecl(
- D2, D, Importer.getToContext(), D->getTagKind(), DC,
- *BeginLocOrErr, *IdLocOrErr, ClassTemplate, TemplateArgs,
- PrevDecl))
- return D2;
+ // Add to the DC only if it was an explicit specialization/instantiation.
+ if (D2->isExplicitInstantiationOrSpecialization()) {
+ LexicalDC->addDeclInternal(D2);
+ }
- // Update InsertPos, because preceding import calls may have invalidated
- // it by adding new specializations.
- if (!ClassTemplate->findSpecialization(TemplateArgs, InsertPos))
- // Add this specialization to the class template.
- ClassTemplate->AddSpecialization(D2, InsertPos);
- }
+ // Import the qualifier, if any.
+ if (auto LocOrErr = import(D->getQualifierLoc()))
+ D2->setQualifierInfo(*LocOrErr);
+ else
+ return LocOrErr.takeError();
- D2->setSpecializationKind(D->getSpecializationKind());
+ if (auto *TSI = D->getTypeAsWritten()) {
+ if (auto TInfoOrErr = import(TSI))
+ D2->setTypeAsWritten(*TInfoOrErr);
+ else
+ return TInfoOrErr.takeError();
- // Import the qualifier, if any.
- if (auto LocOrErr = import(D->getQualifierLoc()))
- D2->setQualifierInfo(*LocOrErr);
+ if (auto LocOrErr = import(D->getTemplateKeywordLoc()))
+ D2->setTemplateKeywordLoc(*LocOrErr);
else
return LocOrErr.takeError();
- if (auto *TSI = D->getTypeAsWritten()) {
- if (auto TInfoOrErr = import(TSI))
- D2->setTypeAsWritten(*TInfoOrErr);
- else
- return TInfoOrErr.takeError();
-
- if (auto LocOrErr = import(D->getTemplateKeywordLoc()))
- D2->setTemplateKeywordLoc(*LocOrErr);
- else
- return LocOrErr.takeError();
-
- if (auto LocOrErr = import(D->getExternLoc()))
- D2->setExternLoc(*LocOrErr);
- else
- return LocOrErr.takeError();
- }
-
- if (D->getPointOfInstantiation().isValid()) {
- if (auto POIOrErr = import(D->getPointOfInstantiation()))
- D2->setPointOfInstantiation(*POIOrErr);
- else
- return POIOrErr.takeError();
- }
+ if (auto LocOrErr = import(D->getExternLoc()))
+ D2->setExternLoc(*LocOrErr);
+ else
+ return LocOrErr.takeError();
+ }
- D2->setTemplateSpecializationKind(D->getTemplateSpecializationKind());
+ if (D->getPointOfInstantiation().isValid()) {
+ if (auto POIOrErr = import(D->getPointOfInstantiation()))
+ D2->setPointOfInstantiation(*POIOrErr);
+ else
+ return POIOrErr.takeError();
+ }
- // Set the context of this specialization/instantiation.
- D2->setLexicalDeclContext(LexicalDC);
+ D2->setTemplateSpecializationKind(D->getTemplateSpecializationKind());
- // Add to the DC only if it was an explicit specialization/instantiation.
- if (D2->isExplicitInstantiationOrSpecialization()) {
- LexicalDC->addDeclInternal(D2);
- }
- }
if (D->isCompleteDefinition())
if (Error Err = ImportDefinition(D, D2))
return std::move(Err);
@@ -5296,8 +5337,7 @@ ExpectedDecl ASTNodeImporter::VisitVarTemplateDecl(VarTemplateDecl *D) {
return std::move(Err);
// Create the variable template declaration itself.
- auto TemplateParamsOrErr = ImportTemplateParameterList(
- D->getTemplateParameters());
+ auto TemplateParamsOrErr = import(D->getTemplateParameters());
if (!TemplateParamsOrErr)
return TemplateParamsOrErr.takeError();
@@ -5333,7 +5373,7 @@ ExpectedDecl ASTNodeImporter::VisitVarTemplateSpecializationDecl(
return ImportedDefOrErr.takeError();
}
- VarTemplateDecl *VarTemplate;
+ VarTemplateDecl *VarTemplate = nullptr;
if (Error Err = importInto(VarTemplate, D->getSpecializedTemplate()))
return std::move(Err);
@@ -5402,8 +5442,7 @@ ExpectedDecl ASTNodeImporter::VisitVarTemplateSpecializationDecl(
*FromTAArgsAsWritten, ArgInfos))
return std::move(Err);
- auto ToTPListOrErr = ImportTemplateParameterList(
- FromPartial->getTemplateParameters());
+ auto ToTPListOrErr = import(FromPartial->getTemplateParameters());
if (!ToTPListOrErr)
return ToTPListOrErr.takeError();
@@ -5481,32 +5520,37 @@ ASTNodeImporter::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) {
if (ToD)
return ToD;
+ const FunctionTemplateDecl *FoundByLookup = nullptr;
+
// Try to find a function in our own ("to") context with the same name, same
// type, and in the same context as the function we're importing.
+ // FIXME Split this into a separate function.
if (!LexicalDC->isFunctionOrMethod()) {
- unsigned IDNS = Decl::IDNS_Ordinary;
+ unsigned IDNS = Decl::IDNS_Ordinary | Decl::IDNS_OrdinaryFriend;
auto FoundDecls = Importer.findDeclsInToCtx(DC, Name);
for (auto *FoundDecl : FoundDecls) {
if (!FoundDecl->isInIdentifierNamespace(IDNS))
continue;
- if (auto *FoundFunction =
- dyn_cast<FunctionTemplateDecl>(FoundDecl)) {
- if (FoundFunction->hasExternalFormalLinkage() &&
+ if (auto *FoundTemplate = dyn_cast<FunctionTemplateDecl>(FoundDecl)) {
+ if (FoundTemplate->hasExternalFormalLinkage() &&
D->hasExternalFormalLinkage()) {
- if (IsStructuralMatch(D, FoundFunction)) {
- Importer.MapImported(D, FoundFunction);
- // FIXME: Actually try to merge the body and other attributes.
- return FoundFunction;
+ if (IsStructuralMatch(D, FoundTemplate)) {
+ FunctionTemplateDecl *TemplateWithDef =
+ getTemplateDefinition(FoundTemplate);
+ if (D->isThisDeclarationADefinition() && TemplateWithDef) {
+ return Importer.MapImported(D, TemplateWithDef);
+ }
+ FoundByLookup = FoundTemplate;
+ break;
}
+ // TODO: handle conflicting names
}
}
- // TODO: handle conflicting names
}
}
- auto ParamsOrErr = ImportTemplateParameterList(
- D->getTemplateParameters());
+ auto ParamsOrErr = import(D->getTemplateParameters());
if (!ParamsOrErr)
return ParamsOrErr.takeError();
@@ -5520,10 +5564,25 @@ ASTNodeImporter::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) {
return ToFunc;
TemplatedFD->setDescribedFunctionTemplate(ToFunc);
+
ToFunc->setAccess(D->getAccess());
ToFunc->setLexicalDeclContext(LexicalDC);
-
LexicalDC->addDeclInternal(ToFunc);
+
+ if (FoundByLookup) {
+ auto *Recent =
+ const_cast<FunctionTemplateDecl *>(FoundByLookup->getMostRecentDecl());
+ if (!TemplatedFD->getPreviousDecl()) {
+ assert(FoundByLookup->getTemplatedDecl() &&
+ "Found decl must have its templated decl set");
+ auto *PrevTemplated =
+ FoundByLookup->getTemplatedDecl()->getMostRecentDecl();
+ if (TemplatedFD != PrevTemplated)
+ TemplatedFD->setPreviousDecl(PrevTemplated);
+ }
+ ToFunc->setPreviousDecl(Recent);
+ }
+
return ToFunc;
}
@@ -5539,6 +5598,8 @@ ExpectedStmt ASTNodeImporter::VisitStmt(Stmt *S) {
ExpectedStmt ASTNodeImporter::VisitGCCAsmStmt(GCCAsmStmt *S) {
+ if (Importer.returnWithErrorInTest())
+ return make_error<ImportError>(ImportError::UnsupportedConstruct);
SmallVector<IdentifierInfo *, 4> Names;
for (unsigned I = 0, E = S->getNumOutputs(); I != E; I++) {
IdentifierInfo *ToII = Importer.Import(S->getOutputIdentifier(I));
@@ -5578,12 +5639,17 @@ ExpectedStmt ASTNodeImporter::VisitGCCAsmStmt(GCCAsmStmt *S) {
return InputOrErr.takeError();
}
- SmallVector<Expr *, 4> Exprs(S->getNumOutputs() + S->getNumInputs());
+ SmallVector<Expr *, 4> Exprs(S->getNumOutputs() + S->getNumInputs() +
+ S->getNumLabels());
if (Error Err = ImportContainerChecked(S->outputs(), Exprs))
return std::move(Err);
+ if (Error Err =
+ ImportArrayChecked(S->inputs(), Exprs.begin() + S->getNumOutputs()))
+ return std::move(Err);
+
if (Error Err = ImportArrayChecked(
- S->inputs(), Exprs.begin() + S->getNumOutputs()))
+ S->labels(), Exprs.begin() + S->getNumOutputs() + S->getNumInputs()))
return std::move(Err);
ExpectedSLoc AsmLocOrErr = import(S->getAsmLoc());
@@ -5609,6 +5675,7 @@ ExpectedStmt ASTNodeImporter::VisitGCCAsmStmt(GCCAsmStmt *S) {
*AsmStrOrErr,
S->getNumClobbers(),
Clobbers.data(),
+ S->getNumLabels(),
*RParenLocOrErr);
}
@@ -6079,6 +6146,33 @@ ExpectedStmt ASTNodeImporter::VisitVAArgExpr(VAArgExpr *E) {
E->isMicrosoftABI());
}
+ExpectedStmt ASTNodeImporter::VisitChooseExpr(ChooseExpr *E) {
+ auto Imp = importSeq(E->getCond(), E->getLHS(), E->getRHS(),
+ E->getBuiltinLoc(), E->getRParenLoc(), E->getType());
+ if (!Imp)
+ return Imp.takeError();
+
+ Expr *ToCond;
+ Expr *ToLHS;
+ Expr *ToRHS;
+ SourceLocation ToBuiltinLoc, ToRParenLoc;
+ QualType ToType;
+ std::tie(ToCond, ToLHS, ToRHS, ToBuiltinLoc, ToRParenLoc, ToType) = *Imp;
+
+ ExprValueKind VK = E->getValueKind();
+ ExprObjectKind OK = E->getObjectKind();
+
+ bool TypeDependent = ToCond->isTypeDependent();
+ bool ValueDependent = ToCond->isValueDependent();
+
+ // The value of CondIsTrue only matters if the value is not
+ // condition-dependent.
+ bool CondIsTrue = !E->isConditionDependent() && E->isConditionTrue();
+
+ return new (Importer.getToContext())
+ ChooseExpr(ToBuiltinLoc, ToCond, ToLHS, ToRHS, ToType, VK, OK,
+ ToRParenLoc, CondIsTrue, TypeDependent, ValueDependent);
+}
ExpectedStmt ASTNodeImporter::VisitGNUNullExpr(GNUNullExpr *E) {
ExpectedType TypeOrErr = import(E->getType());
@@ -6141,7 +6235,7 @@ ExpectedStmt ASTNodeImporter::VisitDeclRefExpr(DeclRefExpr *E) {
auto *ToE = DeclRefExpr::Create(
Importer.getToContext(), ToQualifierLoc, ToTemplateKeywordLoc, ToDecl,
E->refersToEnclosingVariableOrCapture(), ToLocation, ToType,
- E->getValueKind(), ToFoundD, ToResInfo);
+ E->getValueKind(), ToFoundD, ToResInfo, E->isNonOdrUse());
if (E->hadMultipleCandidates())
ToE->setHadMultipleCandidates(true);
return ToE;
@@ -6327,6 +6421,13 @@ ExpectedStmt ASTNodeImporter::VisitConstantExpr(ConstantExpr *E) {
Expr *ToSubExpr;
std::tie(ToSubExpr) = *Imp;
+ // TODO : Handle APValue::ValueKind that require importing.
+ APValue::ValueKind Kind = E->getResultAPValueKind();
+ if (Kind == APValue::Int || Kind == APValue::Float ||
+ Kind == APValue::FixedPoint || Kind == APValue::ComplexFloat ||
+ Kind == APValue::ComplexInt)
+ return ConstantExpr::Create(Importer.getToContext(), ToSubExpr,
+ E->getAPValueResult());
return ConstantExpr::Create(Importer.getToContext(), ToSubExpr);
}
@@ -6763,8 +6864,12 @@ ExpectedStmt ASTNodeImporter::VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E) {
if (!ToParamOrErr)
return ToParamOrErr.takeError();
+ auto UsedContextOrErr = Importer.ImportContext(E->getUsedContext());
+ if (!UsedContextOrErr)
+ return UsedContextOrErr.takeError();
+
return CXXDefaultArgExpr::Create(
- Importer.getToContext(), *ToUsedLocOrErr, *ToParamOrErr);
+ Importer.getToContext(), *ToUsedLocOrErr, *ToParamOrErr, *UsedContextOrErr);
}
ExpectedStmt
@@ -6898,7 +7003,8 @@ ExpectedStmt ASTNodeImporter::VisitCXXNewExpr(CXXNewExpr *E) {
FunctionDecl *ToOperatorNew, *ToOperatorDelete;
SourceRange ToTypeIdParens, ToSourceRange, ToDirectInitRange;
- Expr *ToArraySize, *ToInitializer;
+ Optional<Expr *> ToArraySize;
+ Expr *ToInitializer;
QualType ToType;
TypeSourceInfo *ToAllocatedTypeSourceInfo;
std::tie(
@@ -7051,15 +7157,20 @@ ExpectedStmt ASTNodeImporter::VisitMemberExpr(MemberExpr *E) {
DeclarationNameInfo ToMemberNameInfo(ToName, ToLoc);
+ TemplateArgumentListInfo ToTAInfo, *ResInfo = nullptr;
if (E->hasExplicitTemplateArgs()) {
- // FIXME: handle template arguments
- return make_error<ImportError>(ImportError::UnsupportedConstruct);
+ if (Error Err =
+ ImportTemplateArgumentListInfo(E->getLAngleLoc(), E->getRAngleLoc(),
+ E->template_arguments(), ToTAInfo))
+ return std::move(Err);
+ ResInfo = &ToTAInfo;
}
- return MemberExpr::Create(
- Importer.getToContext(), ToBase, E->isArrow(), ToOperatorLoc,
- ToQualifierLoc, ToTemplateKeywordLoc, ToMemberDecl, ToFoundDecl,
- ToMemberNameInfo, nullptr, ToType, E->getValueKind(), E->getObjectKind());
+ return MemberExpr::Create(Importer.getToContext(), ToBase, E->isArrow(),
+ ToOperatorLoc, ToQualifierLoc, ToTemplateKeywordLoc,
+ ToMemberDecl, ToFoundDecl, ToMemberNameInfo,
+ ResInfo, ToType, E->getValueKind(),
+ E->getObjectKind(), E->isNonOdrUse());
}
ExpectedStmt
@@ -7334,23 +7445,10 @@ ExpectedStmt ASTNodeImporter::VisitLambdaExpr(LambdaExpr *E) {
return ToClassOrErr.takeError();
CXXRecordDecl *ToClass = *ToClassOrErr;
- // NOTE: lambda classes are created with BeingDefined flag set up.
- // It means that ImportDefinition doesn't work for them and we should fill it
- // manually.
- if (ToClass->isBeingDefined()) {
- for (auto FromField : FromClass->fields()) {
- auto ToFieldOrErr = import(FromField);
- if (!ToFieldOrErr)
- return ToFieldOrErr.takeError();
- }
- }
-
auto ToCallOpOrErr = import(E->getCallOperator());
if (!ToCallOpOrErr)
return ToCallOpOrErr.takeError();
- ToClass->completeDefinition();
-
SmallVector<LambdaCapture, 8> ToCaptures;
ToCaptures.reserve(E->capture_size());
for (const auto &FromCapture : E->captures()) {
@@ -7486,8 +7584,12 @@ ExpectedStmt ASTNodeImporter::VisitCXXDefaultInitExpr(CXXDefaultInitExpr *E) {
if (!ToFieldOrErr)
return ToFieldOrErr.takeError();
+ auto UsedContextOrErr = Importer.ImportContext(E->getUsedContext());
+ if (!UsedContextOrErr)
+ return UsedContextOrErr.takeError();
+
return CXXDefaultInitExpr::Create(
- Importer.getToContext(), *ToBeginLocOrErr, *ToFieldOrErr);
+ Importer.getToContext(), *ToBeginLocOrErr, *ToFieldOrErr, *UsedContextOrErr);
}
ExpectedStmt ASTNodeImporter::VisitCXXNamedCastExpr(CXXNamedCastExpr *E) {
@@ -7613,24 +7715,22 @@ void ASTNodeImporter::ImportOverrides(CXXMethodDecl *ToMethod,
ASTImporter::ASTImporter(ASTContext &ToContext, FileManager &ToFileManager,
ASTContext &FromContext, FileManager &FromFileManager,
bool MinimalImport,
- ASTImporterLookupTable *LookupTable)
- : LookupTable(LookupTable), ToContext(ToContext), FromContext(FromContext),
+ std::shared_ptr<ASTImporterSharedState> SharedState)
+ : SharedState(SharedState), ToContext(ToContext), FromContext(FromContext),
ToFileManager(ToFileManager), FromFileManager(FromFileManager),
Minimal(MinimalImport) {
+ // Create a default state without the lookup table: LLDB case.
+ if (!SharedState) {
+ this->SharedState = std::make_shared<ASTImporterSharedState>();
+ }
+
ImportedDecls[FromContext.getTranslationUnitDecl()] =
ToContext.getTranslationUnitDecl();
}
ASTImporter::~ASTImporter() = default;
-Expected<QualType> ASTImporter::Import_New(QualType FromT) {
- QualType ToT = Import(FromT);
- if (ToT.isNull() && !FromT.isNull())
- return make_error<ImportError>();
- return ToT;
-}
-
Optional<unsigned> ASTImporter::getFieldIndex(Decl *F) {
assert(F && (isa<FieldDecl>(*F) || isa<IndirectFieldDecl>(*F)) &&
"Try to get field index for non-field.");
@@ -7663,28 +7763,46 @@ ASTImporter::findDeclsInToCtx(DeclContext *DC, DeclarationName Name) {
// then the enum constant 'A' and the variable 'A' violates ODR.
// We can diagnose this only if we search in the redecl context.
DeclContext *ReDC = DC->getRedeclContext();
- if (LookupTable) {
+ if (SharedState->getLookupTable()) {
ASTImporterLookupTable::LookupResult LookupResult =
- LookupTable->lookup(ReDC, Name);
+ SharedState->getLookupTable()->lookup(ReDC, Name);
return FoundDeclsTy(LookupResult.begin(), LookupResult.end());
} else {
- // FIXME Can we remove this kind of lookup?
- // Or lldb really needs this C/C++ lookup?
- FoundDeclsTy Result;
- ReDC->localUncachedLookup(Name, Result);
+ DeclContext::lookup_result NoloadLookupResult = ReDC->noload_lookup(Name);
+ FoundDeclsTy Result(NoloadLookupResult.begin(), NoloadLookupResult.end());
+ // We must search by the slow case of localUncachedLookup because that is
+ // working even if there is no LookupPtr for the DC. We could use
+ // DC::buildLookup() to create the LookupPtr, but that would load external
+ // decls again, we must avoid that case.
+ // Also, even if we had the LookupPtr, we must find Decls which are not
+ // in the LookupPtr, so we need the slow case.
+ // These cases are handled in ASTImporterLookupTable, but we cannot use
+ // that with LLDB since that traverses through the AST which initiates the
+ // load of external decls again via DC::decls(). And again, we must avoid
+ // loading external decls during the import.
+ if (Result.empty())
+ ReDC->localUncachedLookup(Name, Result);
return Result;
}
}
void ASTImporter::AddToLookupTable(Decl *ToD) {
- if (LookupTable)
- if (auto *ToND = dyn_cast<NamedDecl>(ToD))
- LookupTable->add(ToND);
+ SharedState->addDeclToLookup(ToD);
+}
+
+Expected<Decl *> ASTImporter::ImportImpl(Decl *FromD) {
+ // Import the decl using ASTNodeImporter.
+ ASTNodeImporter Importer(*this);
+ return Importer.Visit(FromD);
+}
+
+void ASTImporter::RegisterImportedDecl(Decl *FromD, Decl *ToD) {
+ MapImported(FromD, ToD);
}
-QualType ASTImporter::Import(QualType FromT) {
+Expected<QualType> ASTImporter::Import(QualType FromT) {
if (FromT.isNull())
- return {};
+ return QualType{};
const Type *FromTy = FromT.getTypePtr();
@@ -7697,10 +7815,8 @@ QualType ASTImporter::Import(QualType FromT) {
// Import the type
ASTNodeImporter Importer(*this);
ExpectedType ToTOrErr = Importer.Visit(FromTy);
- if (!ToTOrErr) {
- llvm::consumeError(ToTOrErr.takeError());
- return {};
- }
+ if (!ToTOrErr)
+ return ToTOrErr.takeError();
// Record the imported type.
ImportedTypes[FromTy] = (*ToTOrErr).getTypePtr();
@@ -7708,33 +7824,29 @@ QualType ASTImporter::Import(QualType FromT) {
return ToContext.getQualifiedType(*ToTOrErr, FromT.getLocalQualifiers());
}
-Expected<TypeSourceInfo *> ASTImporter::Import_New(TypeSourceInfo *FromTSI) {
- TypeSourceInfo *ToTSI = Import(FromTSI);
- if (!ToTSI && FromTSI)
- return llvm::make_error<ImportError>();
- return ToTSI;
-}
-TypeSourceInfo *ASTImporter::Import(TypeSourceInfo *FromTSI) {
+Expected<TypeSourceInfo *> ASTImporter::Import(TypeSourceInfo *FromTSI) {
if (!FromTSI)
return FromTSI;
// FIXME: For now we just create a "trivial" type source info based
// on the type and a single location. Implement a real version of this.
- QualType T = Import(FromTSI->getType());
- if (T.isNull())
- return nullptr;
+ ExpectedType TOrErr = Import(FromTSI->getType());
+ if (!TOrErr)
+ return TOrErr.takeError();
+ ExpectedSLoc BeginLocOrErr = Import(FromTSI->getTypeLoc().getBeginLoc());
+ if (!BeginLocOrErr)
+ return BeginLocOrErr.takeError();
- return ToContext.getTrivialTypeSourceInfo(
- T, Import(FromTSI->getTypeLoc().getBeginLoc()));
+ return ToContext.getTrivialTypeSourceInfo(*TOrErr, *BeginLocOrErr);
}
-Expected<Attr *> ASTImporter::Import_New(const Attr *FromAttr) {
- return Import(FromAttr);
-}
-Attr *ASTImporter::Import(const Attr *FromAttr) {
+Expected<Attr *> ASTImporter::Import(const Attr *FromAttr) {
Attr *ToAttr = FromAttr->clone(ToContext);
- // NOTE: Import of SourceRange may fail.
- ToAttr->setRange(Import(FromAttr->getRange()));
+ if (auto ToRangeOrErr = Import(FromAttr->getRange()))
+ ToAttr->setRange(*ToRangeOrErr);
+ else
+ return ToRangeOrErr.takeError();
+
return ToAttr;
}
@@ -7746,53 +7858,147 @@ Decl *ASTImporter::GetAlreadyImportedOrNull(const Decl *FromD) const {
return nullptr;
}
-Expected<Decl *> ASTImporter::Import_New(Decl *FromD) {
- Decl *ToD = Import(FromD);
- if (!ToD && FromD)
- return llvm::make_error<ImportError>();
- return ToD;
+TranslationUnitDecl *ASTImporter::GetFromTU(Decl *ToD) {
+ auto FromDPos = ImportedFromDecls.find(ToD);
+ if (FromDPos == ImportedFromDecls.end())
+ return nullptr;
+ return FromDPos->second->getTranslationUnitDecl();
}
-Decl *ASTImporter::Import(Decl *FromD) {
+
+Expected<Decl *> ASTImporter::Import(Decl *FromD) {
if (!FromD)
return nullptr;
- ASTNodeImporter Importer(*this);
+ // Push FromD to the stack, and remove that when we return.
+ ImportPath.push(FromD);
+ auto ImportPathBuilder =
+ llvm::make_scope_exit([this]() { ImportPath.pop(); });
+
+ // Check whether there was a previous failed import.
+ // If yes return the existing error.
+ if (auto Error = getImportDeclErrorIfAny(FromD))
+ return make_error<ImportError>(*Error);
// Check whether we've already imported this declaration.
Decl *ToD = GetAlreadyImportedOrNull(FromD);
if (ToD) {
+ // Already imported (possibly from another TU) and with an error.
+ if (auto Error = SharedState->getImportDeclErrorIfAny(ToD)) {
+ setImportDeclError(FromD, *Error);
+ return make_error<ImportError>(*Error);
+ }
+
// If FromD has some updated flags after last import, apply it
updateFlags(FromD, ToD);
+ // If we encounter a cycle during an import then we save the relevant part
+ // of the import path associated to the Decl.
+ if (ImportPath.hasCycleAtBack())
+ SavedImportPaths[FromD].push_back(ImportPath.copyCycleAtBack());
return ToD;
}
- // Import the type.
- ExpectedDecl ToDOrErr = Importer.Visit(FromD);
+ // Import the declaration.
+ ExpectedDecl ToDOrErr = ImportImpl(FromD);
if (!ToDOrErr) {
- llvm::consumeError(ToDOrErr.takeError());
- return nullptr;
+ // Failed to import.
+
+ auto Pos = ImportedDecls.find(FromD);
+ if (Pos != ImportedDecls.end()) {
+ // Import failed after the object was created.
+ // Remove all references to it.
+ auto *ToD = Pos->second;
+ ImportedDecls.erase(Pos);
+
+ // ImportedDecls and ImportedFromDecls are not symmetric. It may happen
+ // (e.g. with namespaces) that several decls from the 'from' context are
+ // mapped to the same decl in the 'to' context. If we removed entries
+ // from the LookupTable here then we may end up removing them multiple
+ // times.
+
+ // The Lookuptable contains decls only which are in the 'to' context.
+ // Remove from the Lookuptable only if it is *imported* into the 'to'
+ // context (and do not remove it if it was added during the initial
+ // traverse of the 'to' context).
+ auto PosF = ImportedFromDecls.find(ToD);
+ if (PosF != ImportedFromDecls.end()) {
+ SharedState->removeDeclFromLookup(ToD);
+ ImportedFromDecls.erase(PosF);
+ }
+
+ // FIXME: AST may contain remaining references to the failed object.
+ // However, the ImportDeclErrors in the shared state contains all the
+ // failed objects together with their error.
+ }
+
+ // Error encountered for the first time.
+ // After takeError the error is not usable any more in ToDOrErr.
+ // Get a copy of the error object (any more simple solution for this?).
+ ImportError ErrOut;
+ handleAllErrors(ToDOrErr.takeError(),
+ [&ErrOut](const ImportError &E) { ErrOut = E; });
+ setImportDeclError(FromD, ErrOut);
+ // Set the error for the mapped to Decl, which is in the "to" context.
+ if (Pos != ImportedDecls.end())
+ SharedState->setImportDeclError(Pos->second, ErrOut);
+
+ // Set the error for all nodes which have been created before we
+ // recognized the error.
+ for (const auto &Path : SavedImportPaths[FromD])
+ for (Decl *FromDi : Path) {
+ setImportDeclError(FromDi, ErrOut);
+ //FIXME Should we remove these Decls from ImportedDecls?
+ // Set the error for the mapped to Decl, which is in the "to" context.
+ auto Ii = ImportedDecls.find(FromDi);
+ if (Ii != ImportedDecls.end())
+ SharedState->setImportDeclError(Ii->second, ErrOut);
+ // FIXME Should we remove these Decls from the LookupTable,
+ // and from ImportedFromDecls?
+ }
+ SavedImportPaths[FromD].clear();
+
+ // Do not return ToDOrErr, error was taken out of it.
+ return make_error<ImportError>(ErrOut);
}
+
ToD = *ToDOrErr;
- // Once the decl is connected to the existing declarations, i.e. when the
- // redecl chain is properly set then we populate the lookup again.
- // This way the primary context will be able to find all decls.
- AddToLookupTable(ToD);
+ // FIXME: Handle the "already imported with error" case. We can get here
+ // nullptr only if GetImportedOrCreateDecl returned nullptr (after a
+ // previously failed create was requested).
+ // Later GetImportedOrCreateDecl can be updated to return the error.
+ if (!ToD) {
+ auto Err = getImportDeclErrorIfAny(FromD);
+ assert(Err);
+ return make_error<ImportError>(*Err);
+ }
+
+ // We could import from the current TU without error. But previously we
+ // already had imported a Decl as `ToD` from another TU (with another
+ // ASTImporter object) and with an error.
+ if (auto Error = SharedState->getImportDeclErrorIfAny(ToD)) {
+ setImportDeclError(FromD, *Error);
+ return make_error<ImportError>(*Error);
+ }
+
+ // Make sure that ImportImpl registered the imported decl.
+ assert(ImportedDecls.count(FromD) != 0 && "Missing call to MapImported?");
// Notify subclasses.
Imported(FromD, ToD);
updateFlags(FromD, ToD);
- return ToD;
+ SavedImportPaths[FromD].clear();
+ return ToDOrErr;
}
Expected<DeclContext *> ASTImporter::ImportContext(DeclContext *FromDC) {
if (!FromDC)
return FromDC;
- auto *ToDC = cast_or_null<DeclContext>(Import(cast<Decl>(FromDC)));
- if (!ToDC)
- return nullptr;
+ ExpectedDecl ToDCOrErr = Import(cast<Decl>(FromDC));
+ if (!ToDCOrErr)
+ return ToDCOrErr.takeError();
+ auto *ToDC = cast<DeclContext>(*ToDCOrErr);
// When we're using a record/enum/Objective-C class/protocol as a context, we
// need it to have a definition.
@@ -7845,30 +8051,18 @@ Expected<DeclContext *> ASTImporter::ImportContext(DeclContext *FromDC) {
return ToDC;
}
-Expected<Expr *> ASTImporter::Import_New(Expr *FromE) {
- Expr *ToE = Import(FromE);
- if (!ToE && FromE)
- return llvm::make_error<ImportError>();
- return ToE;
-}
-Expr *ASTImporter::Import(Expr *FromE) {
- if (!FromE)
- return nullptr;
-
- return cast_or_null<Expr>(Import(cast<Stmt>(FromE)));
+Expected<Expr *> ASTImporter::Import(Expr *FromE) {
+ if (ExpectedStmt ToSOrErr = Import(cast_or_null<Stmt>(FromE)))
+ return cast_or_null<Expr>(*ToSOrErr);
+ else
+ return ToSOrErr.takeError();
}
-Expected<Stmt *> ASTImporter::Import_New(Stmt *FromS) {
- Stmt *ToS = Import(FromS);
- if (!ToS && FromS)
- return llvm::make_error<ImportError>();
- return ToS;
-}
-Stmt *ASTImporter::Import(Stmt *FromS) {
+Expected<Stmt *> ASTImporter::Import(Stmt *FromS) {
if (!FromS)
return nullptr;
- // Check whether we've already imported this declaration.
+ // Check whether we've already imported this statement.
llvm::DenseMap<Stmt *, Stmt *>::iterator Pos = ImportedStmts.find(FromS);
if (Pos != ImportedStmts.end())
return Pos->second;
@@ -7876,10 +8070,8 @@ Stmt *ASTImporter::Import(Stmt *FromS) {
// Import the statement.
ASTNodeImporter Importer(*this);
ExpectedStmt ToSOrErr = Importer.Visit(FromS);
- if (!ToSOrErr) {
- llvm::consumeError(ToSOrErr.takeError());
- return nullptr;
- }
+ if (!ToSOrErr)
+ return ToSOrErr;
if (auto *ToE = dyn_cast<Expr>(*ToSOrErr)) {
auto *FromE = cast<Expr>(FromS);
@@ -7894,77 +8086,68 @@ Stmt *ASTImporter::Import(Stmt *FromS) {
FromE->containsUnexpandedParameterPack());
}
- // Record the imported declaration.
+ // Record the imported statement object.
ImportedStmts[FromS] = *ToSOrErr;
- return *ToSOrErr;
+ return ToSOrErr;
}
Expected<NestedNameSpecifier *>
-ASTImporter::Import_New(NestedNameSpecifier *FromNNS) {
- NestedNameSpecifier *ToNNS = Import(FromNNS);
- if (!ToNNS && FromNNS)
- return llvm::make_error<ImportError>();
- return ToNNS;
-}
-NestedNameSpecifier *ASTImporter::Import(NestedNameSpecifier *FromNNS) {
+ASTImporter::Import(NestedNameSpecifier *FromNNS) {
if (!FromNNS)
return nullptr;
- NestedNameSpecifier *prefix = Import(FromNNS->getPrefix());
+ NestedNameSpecifier *Prefix = nullptr;
+ if (Error Err = importInto(Prefix, FromNNS->getPrefix()))
+ return std::move(Err);
switch (FromNNS->getKind()) {
case NestedNameSpecifier::Identifier:
- if (IdentifierInfo *II = Import(FromNNS->getAsIdentifier())) {
- return NestedNameSpecifier::Create(ToContext, prefix, II);
- }
- return nullptr;
+ assert(FromNNS->getAsIdentifier() && "NNS should contain identifier.");
+ return NestedNameSpecifier::Create(ToContext, Prefix,
+ Import(FromNNS->getAsIdentifier()));
case NestedNameSpecifier::Namespace:
- if (auto *NS =
- cast_or_null<NamespaceDecl>(Import(FromNNS->getAsNamespace()))) {
- return NestedNameSpecifier::Create(ToContext, prefix, NS);
- }
- return nullptr;
+ if (ExpectedDecl NSOrErr = Import(FromNNS->getAsNamespace())) {
+ return NestedNameSpecifier::Create(ToContext, Prefix,
+ cast<NamespaceDecl>(*NSOrErr));
+ } else
+ return NSOrErr.takeError();
case NestedNameSpecifier::NamespaceAlias:
- if (auto *NSAD =
- cast_or_null<NamespaceAliasDecl>(Import(FromNNS->getAsNamespaceAlias()))) {
- return NestedNameSpecifier::Create(ToContext, prefix, NSAD);
- }
- return nullptr;
+ if (ExpectedDecl NSADOrErr = Import(FromNNS->getAsNamespaceAlias()))
+ return NestedNameSpecifier::Create(ToContext, Prefix,
+ cast<NamespaceAliasDecl>(*NSADOrErr));
+ else
+ return NSADOrErr.takeError();
case NestedNameSpecifier::Global:
return NestedNameSpecifier::GlobalSpecifier(ToContext);
case NestedNameSpecifier::Super:
- if (auto *RD =
- cast_or_null<CXXRecordDecl>(Import(FromNNS->getAsRecordDecl()))) {
- return NestedNameSpecifier::SuperSpecifier(ToContext, RD);
- }
- return nullptr;
+ if (ExpectedDecl RDOrErr = Import(FromNNS->getAsRecordDecl()))
+ return NestedNameSpecifier::SuperSpecifier(ToContext,
+ cast<CXXRecordDecl>(*RDOrErr));
+ else
+ return RDOrErr.takeError();
case NestedNameSpecifier::TypeSpec:
- case NestedNameSpecifier::TypeSpecWithTemplate: {
- QualType T = Import(QualType(FromNNS->getAsType(), 0u));
- if (!T.isNull()) {
- bool bTemplate = FromNNS->getKind() ==
- NestedNameSpecifier::TypeSpecWithTemplate;
- return NestedNameSpecifier::Create(ToContext, prefix,
- bTemplate, T.getTypePtr());
- }
+ case NestedNameSpecifier::TypeSpecWithTemplate:
+ if (Expected<QualType> TyOrErr =
+ Import(QualType(FromNNS->getAsType(), 0u))) {
+ bool TSTemplate =
+ FromNNS->getKind() == NestedNameSpecifier::TypeSpecWithTemplate;
+ return NestedNameSpecifier::Create(ToContext, Prefix, TSTemplate,
+ TyOrErr->getTypePtr());
+ } else {
+ return TyOrErr.takeError();
}
- return nullptr;
}
llvm_unreachable("Invalid nested name specifier kind");
}
Expected<NestedNameSpecifierLoc>
-ASTImporter::Import_New(NestedNameSpecifierLoc FromNNS) {
- NestedNameSpecifierLoc ToNNS = Import(FromNNS);
- return ToNNS;
-}
-NestedNameSpecifierLoc ASTImporter::Import(NestedNameSpecifierLoc FromNNS) {
+ASTImporter::Import(NestedNameSpecifierLoc FromNNS) {
// Copied from NestedNameSpecifier mostly.
SmallVector<NestedNameSpecifierLoc , 8> NestedNames;
NestedNameSpecifierLoc NNS = FromNNS;
@@ -7980,54 +8163,62 @@ NestedNameSpecifierLoc ASTImporter::Import(NestedNameSpecifierLoc FromNNS) {
while (!NestedNames.empty()) {
NNS = NestedNames.pop_back_val();
- NestedNameSpecifier *Spec = Import(NNS.getNestedNameSpecifier());
- if (!Spec)
- return NestedNameSpecifierLoc();
+ NestedNameSpecifier *Spec = nullptr;
+ if (Error Err = importInto(Spec, NNS.getNestedNameSpecifier()))
+ return std::move(Err);
NestedNameSpecifier::SpecifierKind Kind = Spec->getKind();
+
+ SourceLocation ToLocalBeginLoc, ToLocalEndLoc;
+ if (Kind != NestedNameSpecifier::Super) {
+ if (Error Err = importInto(ToLocalBeginLoc, NNS.getLocalBeginLoc()))
+ return std::move(Err);
+
+ if (Kind != NestedNameSpecifier::Global)
+ if (Error Err = importInto(ToLocalEndLoc, NNS.getLocalEndLoc()))
+ return std::move(Err);
+ }
+
switch (Kind) {
case NestedNameSpecifier::Identifier:
- Builder.Extend(getToContext(),
- Spec->getAsIdentifier(),
- Import(NNS.getLocalBeginLoc()),
- Import(NNS.getLocalEndLoc()));
+ Builder.Extend(getToContext(), Spec->getAsIdentifier(), ToLocalBeginLoc,
+ ToLocalEndLoc);
break;
case NestedNameSpecifier::Namespace:
- Builder.Extend(getToContext(),
- Spec->getAsNamespace(),
- Import(NNS.getLocalBeginLoc()),
- Import(NNS.getLocalEndLoc()));
+ Builder.Extend(getToContext(), Spec->getAsNamespace(), ToLocalBeginLoc,
+ ToLocalEndLoc);
break;
case NestedNameSpecifier::NamespaceAlias:
- Builder.Extend(getToContext(),
- Spec->getAsNamespaceAlias(),
- Import(NNS.getLocalBeginLoc()),
- Import(NNS.getLocalEndLoc()));
+ Builder.Extend(getToContext(), Spec->getAsNamespaceAlias(),
+ ToLocalBeginLoc, ToLocalEndLoc);
break;
case NestedNameSpecifier::TypeSpec:
case NestedNameSpecifier::TypeSpecWithTemplate: {
+ SourceLocation ToTLoc;
+ if (Error Err = importInto(ToTLoc, NNS.getTypeLoc().getBeginLoc()))
+ return std::move(Err);
TypeSourceInfo *TSI = getToContext().getTrivialTypeSourceInfo(
- QualType(Spec->getAsType(), 0));
- Builder.Extend(getToContext(),
- Import(NNS.getLocalBeginLoc()),
- TSI->getTypeLoc(),
- Import(NNS.getLocalEndLoc()));
+ QualType(Spec->getAsType(), 0), ToTLoc);
+ Builder.Extend(getToContext(), ToLocalBeginLoc, TSI->getTypeLoc(),
+ ToLocalEndLoc);
break;
}
case NestedNameSpecifier::Global:
- Builder.MakeGlobal(getToContext(), Import(NNS.getLocalBeginLoc()));
+ Builder.MakeGlobal(getToContext(), ToLocalBeginLoc);
break;
case NestedNameSpecifier::Super: {
- SourceRange ToRange = Import(NNS.getSourceRange());
- Builder.MakeSuper(getToContext(),
- Spec->getAsRecordDecl(),
- ToRange.getBegin(),
- ToRange.getEnd());
+ auto ToSourceRangeOrErr = Import(NNS.getSourceRange());
+ if (!ToSourceRangeOrErr)
+ return ToSourceRangeOrErr.takeError();
+
+ Builder.MakeSuper(getToContext(), Spec->getAsRecordDecl(),
+ ToSourceRangeOrErr->getBegin(),
+ ToSourceRangeOrErr->getEnd());
}
}
}
@@ -8035,137 +8226,126 @@ NestedNameSpecifierLoc ASTImporter::Import(NestedNameSpecifierLoc FromNNS) {
return Builder.getWithLocInContext(getToContext());
}
-Expected<TemplateName> ASTImporter::Import_New(TemplateName From) {
- TemplateName To = Import(From);
- if (To.isNull() && !From.isNull())
- return llvm::make_error<ImportError>();
- return To;
-}
-TemplateName ASTImporter::Import(TemplateName From) {
+Expected<TemplateName> ASTImporter::Import(TemplateName From) {
switch (From.getKind()) {
case TemplateName::Template:
- if (auto *ToTemplate =
- cast_or_null<TemplateDecl>(Import(From.getAsTemplateDecl())))
- return TemplateName(ToTemplate);
-
- return {};
+ if (ExpectedDecl ToTemplateOrErr = Import(From.getAsTemplateDecl()))
+ return TemplateName(cast<TemplateDecl>(*ToTemplateOrErr));
+ else
+ return ToTemplateOrErr.takeError();
case TemplateName::OverloadedTemplate: {
OverloadedTemplateStorage *FromStorage = From.getAsOverloadedTemplate();
UnresolvedSet<2> ToTemplates;
for (auto *I : *FromStorage) {
- if (auto *To = cast_or_null<NamedDecl>(Import(I)))
- ToTemplates.addDecl(To);
+ if (auto ToOrErr = Import(I))
+ ToTemplates.addDecl(cast<NamedDecl>(*ToOrErr));
else
- return {};
+ return ToOrErr.takeError();
}
return ToContext.getOverloadedTemplateName(ToTemplates.begin(),
ToTemplates.end());
}
+ case TemplateName::AssumedTemplate: {
+ AssumedTemplateStorage *FromStorage = From.getAsAssumedTemplateName();
+ auto DeclNameOrErr = Import(FromStorage->getDeclName());
+ if (!DeclNameOrErr)
+ return DeclNameOrErr.takeError();
+ return ToContext.getAssumedTemplateName(*DeclNameOrErr);
+ }
+
case TemplateName::QualifiedTemplate: {
QualifiedTemplateName *QTN = From.getAsQualifiedTemplateName();
- NestedNameSpecifier *Qualifier = Import(QTN->getQualifier());
- if (!Qualifier)
- return {};
-
- if (auto *ToTemplate =
- cast_or_null<TemplateDecl>(Import(From.getAsTemplateDecl())))
- return ToContext.getQualifiedTemplateName(Qualifier,
- QTN->hasTemplateKeyword(),
- ToTemplate);
-
- return {};
+ auto QualifierOrErr = Import(QTN->getQualifier());
+ if (!QualifierOrErr)
+ return QualifierOrErr.takeError();
+
+ if (ExpectedDecl ToTemplateOrErr = Import(From.getAsTemplateDecl()))
+ return ToContext.getQualifiedTemplateName(
+ *QualifierOrErr, QTN->hasTemplateKeyword(),
+ cast<TemplateDecl>(*ToTemplateOrErr));
+ else
+ return ToTemplateOrErr.takeError();
}
case TemplateName::DependentTemplate: {
DependentTemplateName *DTN = From.getAsDependentTemplateName();
- NestedNameSpecifier *Qualifier = Import(DTN->getQualifier());
- if (!Qualifier)
- return {};
+ auto QualifierOrErr = Import(DTN->getQualifier());
+ if (!QualifierOrErr)
+ return QualifierOrErr.takeError();
if (DTN->isIdentifier()) {
- return ToContext.getDependentTemplateName(Qualifier,
+ return ToContext.getDependentTemplateName(*QualifierOrErr,
Import(DTN->getIdentifier()));
}
- return ToContext.getDependentTemplateName(Qualifier, DTN->getOperator());
+ return ToContext.getDependentTemplateName(*QualifierOrErr,
+ DTN->getOperator());
}
case TemplateName::SubstTemplateTemplateParm: {
- SubstTemplateTemplateParmStorage *subst
- = From.getAsSubstTemplateTemplateParm();
- auto *param =
- cast_or_null<TemplateTemplateParmDecl>(Import(subst->getParameter()));
- if (!param)
- return {};
+ SubstTemplateTemplateParmStorage *Subst =
+ From.getAsSubstTemplateTemplateParm();
+ ExpectedDecl ParamOrErr = Import(Subst->getParameter());
+ if (!ParamOrErr)
+ return ParamOrErr.takeError();
- TemplateName replacement = Import(subst->getReplacement());
- if (replacement.isNull())
- return {};
+ auto ReplacementOrErr = Import(Subst->getReplacement());
+ if (!ReplacementOrErr)
+ return ReplacementOrErr.takeError();
- return ToContext.getSubstTemplateTemplateParm(param, replacement);
+ return ToContext.getSubstTemplateTemplateParm(
+ cast<TemplateTemplateParmDecl>(*ParamOrErr), *ReplacementOrErr);
}
case TemplateName::SubstTemplateTemplateParmPack: {
SubstTemplateTemplateParmPackStorage *SubstPack
= From.getAsSubstTemplateTemplateParmPack();
- auto *Param =
- cast_or_null<TemplateTemplateParmDecl>(
- Import(SubstPack->getParameterPack()));
- if (!Param)
- return {};
+ ExpectedDecl ParamOrErr = Import(SubstPack->getParameterPack());
+ if (!ParamOrErr)
+ return ParamOrErr.takeError();
ASTNodeImporter Importer(*this);
- Expected<TemplateArgument> ArgPack
- = Importer.ImportTemplateArgument(SubstPack->getArgumentPack());
- if (!ArgPack) {
- llvm::consumeError(ArgPack.takeError());
- return {};
- }
+ auto ArgPackOrErr =
+ Importer.ImportTemplateArgument(SubstPack->getArgumentPack());
+ if (!ArgPackOrErr)
+ return ArgPackOrErr.takeError();
- return ToContext.getSubstTemplateTemplateParmPack(Param, *ArgPack);
+ return ToContext.getSubstTemplateTemplateParmPack(
+ cast<TemplateTemplateParmDecl>(*ParamOrErr), *ArgPackOrErr);
}
}
llvm_unreachable("Invalid template name kind");
}
-Expected<SourceLocation> ASTImporter::Import_New(SourceLocation FromLoc) {
- SourceLocation ToLoc = Import(FromLoc);
- if (ToLoc.isInvalid() && !FromLoc.isInvalid())
- return llvm::make_error<ImportError>();
- return ToLoc;
-}
-SourceLocation ASTImporter::Import(SourceLocation FromLoc) {
+Expected<SourceLocation> ASTImporter::Import(SourceLocation FromLoc) {
if (FromLoc.isInvalid())
- return {};
+ return SourceLocation{};
SourceManager &FromSM = FromContext.getSourceManager();
+ bool IsBuiltin = FromSM.isWrittenInBuiltinFile(FromLoc);
std::pair<FileID, unsigned> Decomposed = FromSM.getDecomposedLoc(FromLoc);
- FileID ToFileID = Import(Decomposed.first);
- if (ToFileID.isInvalid())
- return {};
+ Expected<FileID> ToFileIDOrErr = Import(Decomposed.first, IsBuiltin);
+ if (!ToFileIDOrErr)
+ return ToFileIDOrErr.takeError();
SourceManager &ToSM = ToContext.getSourceManager();
- return ToSM.getComposedLoc(ToFileID, Decomposed.second);
+ return ToSM.getComposedLoc(*ToFileIDOrErr, Decomposed.second);
}
-Expected<SourceRange> ASTImporter::Import_New(SourceRange FromRange) {
- SourceRange ToRange = Import(FromRange);
- return ToRange;
-}
-SourceRange ASTImporter::Import(SourceRange FromRange) {
- return SourceRange(Import(FromRange.getBegin()), Import(FromRange.getEnd()));
-}
+Expected<SourceRange> ASTImporter::Import(SourceRange FromRange) {
+ SourceLocation ToBegin, ToEnd;
+ if (Error Err = importInto(ToBegin, FromRange.getBegin()))
+ return std::move(Err);
+ if (Error Err = importInto(ToEnd, FromRange.getEnd()))
+ return std::move(Err);
-Expected<FileID> ASTImporter::Import_New(FileID FromID) {
- FileID ToID = Import(FromID);
- if (ToID.isInvalid() && FromID.isValid())
- return llvm::make_error<ImportError>();
- return ToID;
+ return SourceRange(ToBegin, ToEnd);
}
-FileID ASTImporter::Import(FileID FromID) {
+
+Expected<FileID> ASTImporter::Import(FileID FromID, bool IsBuiltin) {
llvm::DenseMap<FileID, FileID>::iterator Pos = ImportedFileIDs.find(FromID);
if (Pos != ImportedFileIDs.end())
return Pos->second;
@@ -8178,38 +8358,58 @@ FileID ASTImporter::Import(FileID FromID) {
FileID ToID;
if (FromSLoc.isExpansion()) {
const SrcMgr::ExpansionInfo &FromEx = FromSLoc.getExpansion();
- SourceLocation ToSpLoc = Import(FromEx.getSpellingLoc());
- SourceLocation ToExLocS = Import(FromEx.getExpansionLocStart());
+ ExpectedSLoc ToSpLoc = Import(FromEx.getSpellingLoc());
+ if (!ToSpLoc)
+ return ToSpLoc.takeError();
+ ExpectedSLoc ToExLocS = Import(FromEx.getExpansionLocStart());
+ if (!ToExLocS)
+ return ToExLocS.takeError();
unsigned TokenLen = FromSM.getFileIDSize(FromID);
SourceLocation MLoc;
if (FromEx.isMacroArgExpansion()) {
- MLoc = ToSM.createMacroArgExpansionLoc(ToSpLoc, ToExLocS, TokenLen);
+ MLoc = ToSM.createMacroArgExpansionLoc(*ToSpLoc, *ToExLocS, TokenLen);
} else {
- SourceLocation ToExLocE = Import(FromEx.getExpansionLocEnd());
- MLoc = ToSM.createExpansionLoc(ToSpLoc, ToExLocS, ToExLocE, TokenLen,
- FromEx.isExpansionTokenRange());
+ if (ExpectedSLoc ToExLocE = Import(FromEx.getExpansionLocEnd()))
+ MLoc = ToSM.createExpansionLoc(*ToSpLoc, *ToExLocS, *ToExLocE, TokenLen,
+ FromEx.isExpansionTokenRange());
+ else
+ return ToExLocE.takeError();
}
ToID = ToSM.getFileID(MLoc);
} else {
- // Include location of this file.
- SourceLocation ToIncludeLoc = Import(FromSLoc.getFile().getIncludeLoc());
-
const SrcMgr::ContentCache *Cache = FromSLoc.getFile().getContentCache();
- if (Cache->OrigEntry && Cache->OrigEntry->getDir()) {
- // FIXME: We probably want to use getVirtualFile(), so we don't hit the
- // disk again
- // FIXME: We definitely want to re-use the existing MemoryBuffer, rather
- // than mmap the files several times.
- const FileEntry *Entry =
- ToFileManager.getFile(Cache->OrigEntry->getName());
- if (!Entry)
- return {};
- ToID = ToSM.createFileID(Entry, ToIncludeLoc,
- FromSLoc.getFile().getFileCharacteristic());
- } else {
+
+ if (!IsBuiltin) {
+ // Include location of this file.
+ ExpectedSLoc ToIncludeLoc = Import(FromSLoc.getFile().getIncludeLoc());
+ if (!ToIncludeLoc)
+ return ToIncludeLoc.takeError();
+
+ if (Cache->OrigEntry && Cache->OrigEntry->getDir()) {
+ // FIXME: We probably want to use getVirtualFile(), so we don't hit the
+ // disk again
+ // FIXME: We definitely want to re-use the existing MemoryBuffer, rather
+ // than mmap the files several times.
+ const FileEntry *Entry =
+ ToFileManager.getFile(Cache->OrigEntry->getName());
+ // FIXME: The filename may be a virtual name that does probably not
+ // point to a valid file and we get no Entry here. In this case try with
+ // the memory buffer below.
+ if (Entry)
+ ToID = ToSM.createFileID(Entry, *ToIncludeLoc,
+ FromSLoc.getFile().getFileCharacteristic());
+ }
+ }
+
+ if (ToID.isInvalid() || IsBuiltin) {
// FIXME: We want to re-use the existing MemoryBuffer!
- const llvm::MemoryBuffer *FromBuf =
- Cache->getBuffer(FromContext.getDiagnostics(), FromSM);
+ bool Invalid = true;
+ const llvm::MemoryBuffer *FromBuf = Cache->getBuffer(
+ FromContext.getDiagnostics(), FromSM, SourceLocation{}, &Invalid);
+ if (!FromBuf || Invalid)
+ // FIXME: Use a new error kind?
+ return llvm::make_error<ImportError>(ImportError::Unknown);
+
std::unique_ptr<llvm::MemoryBuffer> ToBuf =
llvm::MemoryBuffer::getMemBufferCopy(FromBuf->getBuffer(),
FromBuf->getBufferIdentifier());
@@ -8218,186 +8418,187 @@ FileID ASTImporter::Import(FileID FromID) {
}
}
+ assert(ToID.isValid() && "Unexpected invalid fileID was created.");
+
ImportedFileIDs[FromID] = ToID;
return ToID;
}
-Expected<CXXCtorInitializer *>
-ASTImporter::Import_New(CXXCtorInitializer *From) {
- CXXCtorInitializer *To = Import(From);
- if (!To && From)
- return llvm::make_error<ImportError>();
- return To;
-}
-CXXCtorInitializer *ASTImporter::Import(CXXCtorInitializer *From) {
- Expr *ToExpr = Import(From->getInit());
- if (!ToExpr && From->getInit())
- return nullptr;
+Expected<CXXCtorInitializer *> ASTImporter::Import(CXXCtorInitializer *From) {
+ ExpectedExpr ToExprOrErr = Import(From->getInit());
+ if (!ToExprOrErr)
+ return ToExprOrErr.takeError();
+
+ auto LParenLocOrErr = Import(From->getLParenLoc());
+ if (!LParenLocOrErr)
+ return LParenLocOrErr.takeError();
+
+ auto RParenLocOrErr = Import(From->getRParenLoc());
+ if (!RParenLocOrErr)
+ return RParenLocOrErr.takeError();
if (From->isBaseInitializer()) {
- TypeSourceInfo *ToTInfo = Import(From->getTypeSourceInfo());
- if (!ToTInfo && From->getTypeSourceInfo())
- return nullptr;
+ auto ToTInfoOrErr = Import(From->getTypeSourceInfo());
+ if (!ToTInfoOrErr)
+ return ToTInfoOrErr.takeError();
+
+ SourceLocation EllipsisLoc;
+ if (From->isPackExpansion())
+ if (Error Err = importInto(EllipsisLoc, From->getEllipsisLoc()))
+ return std::move(Err);
return new (ToContext) CXXCtorInitializer(
- ToContext, ToTInfo, From->isBaseVirtual(), Import(From->getLParenLoc()),
- ToExpr, Import(From->getRParenLoc()),
- From->isPackExpansion() ? Import(From->getEllipsisLoc())
- : SourceLocation());
+ ToContext, *ToTInfoOrErr, From->isBaseVirtual(), *LParenLocOrErr,
+ *ToExprOrErr, *RParenLocOrErr, EllipsisLoc);
} else if (From->isMemberInitializer()) {
- auto *ToField = cast_or_null<FieldDecl>(Import(From->getMember()));
- if (!ToField && From->getMember())
- return nullptr;
+ ExpectedDecl ToFieldOrErr = Import(From->getMember());
+ if (!ToFieldOrErr)
+ return ToFieldOrErr.takeError();
+
+ auto MemberLocOrErr = Import(From->getMemberLocation());
+ if (!MemberLocOrErr)
+ return MemberLocOrErr.takeError();
return new (ToContext) CXXCtorInitializer(
- ToContext, ToField, Import(From->getMemberLocation()),
- Import(From->getLParenLoc()), ToExpr, Import(From->getRParenLoc()));
+ ToContext, cast_or_null<FieldDecl>(*ToFieldOrErr), *MemberLocOrErr,
+ *LParenLocOrErr, *ToExprOrErr, *RParenLocOrErr);
} else if (From->isIndirectMemberInitializer()) {
- auto *ToIField = cast_or_null<IndirectFieldDecl>(
- Import(From->getIndirectMember()));
- if (!ToIField && From->getIndirectMember())
- return nullptr;
+ ExpectedDecl ToIFieldOrErr = Import(From->getIndirectMember());
+ if (!ToIFieldOrErr)
+ return ToIFieldOrErr.takeError();
+
+ auto MemberLocOrErr = Import(From->getMemberLocation());
+ if (!MemberLocOrErr)
+ return MemberLocOrErr.takeError();
return new (ToContext) CXXCtorInitializer(
- ToContext, ToIField, Import(From->getMemberLocation()),
- Import(From->getLParenLoc()), ToExpr, Import(From->getRParenLoc()));
+ ToContext, cast_or_null<IndirectFieldDecl>(*ToIFieldOrErr),
+ *MemberLocOrErr, *LParenLocOrErr, *ToExprOrErr, *RParenLocOrErr);
} else if (From->isDelegatingInitializer()) {
- TypeSourceInfo *ToTInfo = Import(From->getTypeSourceInfo());
- if (!ToTInfo && From->getTypeSourceInfo())
- return nullptr;
+ auto ToTInfoOrErr = Import(From->getTypeSourceInfo());
+ if (!ToTInfoOrErr)
+ return ToTInfoOrErr.takeError();
return new (ToContext)
- CXXCtorInitializer(ToContext, ToTInfo, Import(From->getLParenLoc()),
- ToExpr, Import(From->getRParenLoc()));
+ CXXCtorInitializer(ToContext, *ToTInfoOrErr, *LParenLocOrErr,
+ *ToExprOrErr, *RParenLocOrErr);
} else {
- return nullptr;
+ // FIXME: assert?
+ return make_error<ImportError>();
}
}
Expected<CXXBaseSpecifier *>
-ASTImporter::Import_New(const CXXBaseSpecifier *From) {
- CXXBaseSpecifier *To = Import(From);
- if (!To && From)
- return llvm::make_error<ImportError>();
- return To;
-}
-CXXBaseSpecifier *ASTImporter::Import(const CXXBaseSpecifier *BaseSpec) {
+ASTImporter::Import(const CXXBaseSpecifier *BaseSpec) {
auto Pos = ImportedCXXBaseSpecifiers.find(BaseSpec);
if (Pos != ImportedCXXBaseSpecifiers.end())
return Pos->second;
+ Expected<SourceRange> ToSourceRange = Import(BaseSpec->getSourceRange());
+ if (!ToSourceRange)
+ return ToSourceRange.takeError();
+ Expected<TypeSourceInfo *> ToTSI = Import(BaseSpec->getTypeSourceInfo());
+ if (!ToTSI)
+ return ToTSI.takeError();
+ ExpectedSLoc ToEllipsisLoc = Import(BaseSpec->getEllipsisLoc());
+ if (!ToEllipsisLoc)
+ return ToEllipsisLoc.takeError();
CXXBaseSpecifier *Imported = new (ToContext) CXXBaseSpecifier(
- Import(BaseSpec->getSourceRange()),
- BaseSpec->isVirtual(), BaseSpec->isBaseOfClass(),
- BaseSpec->getAccessSpecifierAsWritten(),
- Import(BaseSpec->getTypeSourceInfo()),
- Import(BaseSpec->getEllipsisLoc()));
+ *ToSourceRange, BaseSpec->isVirtual(), BaseSpec->isBaseOfClass(),
+ BaseSpec->getAccessSpecifierAsWritten(), *ToTSI, *ToEllipsisLoc);
ImportedCXXBaseSpecifiers[BaseSpec] = Imported;
return Imported;
}
-Error ASTImporter::ImportDefinition_New(Decl *From) {
- Decl *To = Import(From);
- if (!To)
- return llvm::make_error<ImportError>();
+Error ASTImporter::ImportDefinition(Decl *From) {
+ ExpectedDecl ToOrErr = Import(From);
+ if (!ToOrErr)
+ return ToOrErr.takeError();
+ Decl *To = *ToOrErr;
- if (auto *FromDC = cast<DeclContext>(From)) {
- ASTNodeImporter Importer(*this);
+ auto *FromDC = cast<DeclContext>(From);
+ ASTNodeImporter Importer(*this);
- if (auto *ToRecord = dyn_cast<RecordDecl>(To)) {
- if (!ToRecord->getDefinition()) {
- return Importer.ImportDefinition(
- cast<RecordDecl>(FromDC), ToRecord,
- ASTNodeImporter::IDK_Everything);
- }
+ if (auto *ToRecord = dyn_cast<RecordDecl>(To)) {
+ if (!ToRecord->getDefinition()) {
+ return Importer.ImportDefinition(
+ cast<RecordDecl>(FromDC), ToRecord,
+ ASTNodeImporter::IDK_Everything);
}
+ }
- if (auto *ToEnum = dyn_cast<EnumDecl>(To)) {
- if (!ToEnum->getDefinition()) {
- return Importer.ImportDefinition(
- cast<EnumDecl>(FromDC), ToEnum, ASTNodeImporter::IDK_Everything);
- }
+ if (auto *ToEnum = dyn_cast<EnumDecl>(To)) {
+ if (!ToEnum->getDefinition()) {
+ return Importer.ImportDefinition(
+ cast<EnumDecl>(FromDC), ToEnum, ASTNodeImporter::IDK_Everything);
}
+ }
- if (auto *ToIFace = dyn_cast<ObjCInterfaceDecl>(To)) {
- if (!ToIFace->getDefinition()) {
- return Importer.ImportDefinition(
- cast<ObjCInterfaceDecl>(FromDC), ToIFace,
- ASTNodeImporter::IDK_Everything);
- }
+ if (auto *ToIFace = dyn_cast<ObjCInterfaceDecl>(To)) {
+ if (!ToIFace->getDefinition()) {
+ return Importer.ImportDefinition(
+ cast<ObjCInterfaceDecl>(FromDC), ToIFace,
+ ASTNodeImporter::IDK_Everything);
}
+ }
- if (auto *ToProto = dyn_cast<ObjCProtocolDecl>(To)) {
- if (!ToProto->getDefinition()) {
- return Importer.ImportDefinition(
- cast<ObjCProtocolDecl>(FromDC), ToProto,
- ASTNodeImporter::IDK_Everything);
- }
+ if (auto *ToProto = dyn_cast<ObjCProtocolDecl>(To)) {
+ if (!ToProto->getDefinition()) {
+ return Importer.ImportDefinition(
+ cast<ObjCProtocolDecl>(FromDC), ToProto,
+ ASTNodeImporter::IDK_Everything);
}
-
- return Importer.ImportDeclContext(FromDC, true);
}
- return Error::success();
+ return Importer.ImportDeclContext(FromDC, true);
}
-void ASTImporter::ImportDefinition(Decl *From) {
- Error Err = ImportDefinition_New(From);
- llvm::consumeError(std::move(Err));
-}
-
-Expected<DeclarationName> ASTImporter::Import_New(DeclarationName FromName) {
- DeclarationName ToName = Import(FromName);
- if (!ToName && FromName)
- return llvm::make_error<ImportError>();
- return ToName;
-}
-DeclarationName ASTImporter::Import(DeclarationName FromName) {
+Expected<DeclarationName> ASTImporter::Import(DeclarationName FromName) {
if (!FromName)
- return {};
+ return DeclarationName{};
switch (FromName.getNameKind()) {
case DeclarationName::Identifier:
- return Import(FromName.getAsIdentifierInfo());
+ return DeclarationName(Import(FromName.getAsIdentifierInfo()));
case DeclarationName::ObjCZeroArgSelector:
case DeclarationName::ObjCOneArgSelector:
case DeclarationName::ObjCMultiArgSelector:
- return Import(FromName.getObjCSelector());
+ if (auto ToSelOrErr = Import(FromName.getObjCSelector()))
+ return DeclarationName(*ToSelOrErr);
+ else
+ return ToSelOrErr.takeError();
case DeclarationName::CXXConstructorName: {
- QualType T = Import(FromName.getCXXNameType());
- if (T.isNull())
- return {};
-
- return ToContext.DeclarationNames.getCXXConstructorName(
- ToContext.getCanonicalType(T));
+ if (auto ToTyOrErr = Import(FromName.getCXXNameType()))
+ return ToContext.DeclarationNames.getCXXConstructorName(
+ ToContext.getCanonicalType(*ToTyOrErr));
+ else
+ return ToTyOrErr.takeError();
}
case DeclarationName::CXXDestructorName: {
- QualType T = Import(FromName.getCXXNameType());
- if (T.isNull())
- return {};
-
- return ToContext.DeclarationNames.getCXXDestructorName(
- ToContext.getCanonicalType(T));
+ if (auto ToTyOrErr = Import(FromName.getCXXNameType()))
+ return ToContext.DeclarationNames.getCXXDestructorName(
+ ToContext.getCanonicalType(*ToTyOrErr));
+ else
+ return ToTyOrErr.takeError();
}
case DeclarationName::CXXDeductionGuideName: {
- auto *Template = cast_or_null<TemplateDecl>(
- Import(FromName.getCXXDeductionGuideTemplate()));
- if (!Template)
- return {};
- return ToContext.DeclarationNames.getCXXDeductionGuideName(Template);
+ if (auto ToTemplateOrErr = Import(FromName.getCXXDeductionGuideTemplate()))
+ return ToContext.DeclarationNames.getCXXDeductionGuideName(
+ cast<TemplateDecl>(*ToTemplateOrErr));
+ else
+ return ToTemplateOrErr.takeError();
}
case DeclarationName::CXXConversionFunctionName: {
- QualType T = Import(FromName.getCXXNameType());
- if (T.isNull())
- return {};
-
- return ToContext.DeclarationNames.getCXXConversionFunctionName(
- ToContext.getCanonicalType(T));
+ if (auto ToTyOrErr = Import(FromName.getCXXNameType()))
+ return ToContext.DeclarationNames.getCXXConversionFunctionName(
+ ToContext.getCanonicalType(*ToTyOrErr));
+ else
+ return ToTyOrErr.takeError();
}
case DeclarationName::CXXOperatorName:
@@ -8406,7 +8607,7 @@ DeclarationName ASTImporter::Import(DeclarationName FromName) {
case DeclarationName::CXXLiteralOperatorName:
return ToContext.DeclarationNames.getCXXLiteralOperatorName(
- Import(FromName.getCXXLiteralIdentifier()));
+ Import(FromName.getCXXLiteralIdentifier()));
case DeclarationName::CXXUsingDirective:
// FIXME: STATICS!
@@ -8428,15 +8629,9 @@ IdentifierInfo *ASTImporter::Import(const IdentifierInfo *FromId) {
return ToId;
}
-Expected<Selector> ASTImporter::Import_New(Selector FromSel) {
- Selector ToSel = Import(FromSel);
- if (ToSel.isNull() && !FromSel.isNull())
- return llvm::make_error<ImportError>();
- return ToSel;
-}
-Selector ASTImporter::Import(Selector FromSel) {
+Expected<Selector> ASTImporter::Import(Selector FromSel) {
if (FromSel.isNull())
- return {};
+ return Selector{};
SmallVector<IdentifierInfo *, 4> Idents;
Idents.push_back(Import(FromSel.getIdentifierInfoForSlot(0)));
@@ -8496,15 +8691,42 @@ Decl *ASTImporter::MapImported(Decl *From, Decl *To) {
if (Pos != ImportedDecls.end())
return Pos->second;
ImportedDecls[From] = To;
+ // This mapping should be maintained only in this function. Therefore do not
+ // check for additional consistency.
+ ImportedFromDecls[To] = From;
+ AddToLookupTable(To);
return To;
}
+llvm::Optional<ImportError>
+ASTImporter::getImportDeclErrorIfAny(Decl *FromD) const {
+ auto Pos = ImportDeclErrors.find(FromD);
+ if (Pos != ImportDeclErrors.end())
+ return Pos->second;
+ else
+ return Optional<ImportError>();
+}
+
+void ASTImporter::setImportDeclError(Decl *From, ImportError Error) {
+ auto InsertRes = ImportDeclErrors.insert({From, Error});
+ (void)InsertRes;
+ // Either we set the error for the first time, or we already had set one and
+ // now we want to set the same error.
+ assert(InsertRes.second || InsertRes.first->second.Error == Error.Error);
+}
+
bool ASTImporter::IsStructurallyEquivalent(QualType From, QualType To,
bool Complain) {
- llvm::DenseMap<const Type *, const Type *>::iterator Pos
- = ImportedTypes.find(From.getTypePtr());
- if (Pos != ImportedTypes.end() && ToContext.hasSameType(Import(From), To))
- return true;
+ llvm::DenseMap<const Type *, const Type *>::iterator Pos =
+ ImportedTypes.find(From.getTypePtr());
+ if (Pos != ImportedTypes.end()) {
+ if (ExpectedType ToFromOrErr = Import(From)) {
+ if (ToContext.hasSameType(*ToFromOrErr, To))
+ return true;
+ } else {
+ llvm::consumeError(ToFromOrErr.takeError());
+ }
+ }
StructuralEquivalenceContext Ctx(FromContext, ToContext, NonEquivalentDecls,
getStructuralEquivalenceKind(*this), false,
diff --git a/lib/AST/ASTImporterLookupTable.cpp b/lib/AST/ASTImporterLookupTable.cpp
index fbcd4f5cb341..7390329d4ed8 100644
--- a/lib/AST/ASTImporterLookupTable.cpp
+++ b/lib/AST/ASTImporterLookupTable.cpp
@@ -1,9 +1,8 @@
//===- ASTImporterLookupTable.cpp - ASTImporter specific lookup -----------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -27,17 +26,30 @@ struct Builder : RecursiveASTVisitor<Builder> {
LT.add(D);
return true;
}
+ // In most cases the FriendDecl contains the declaration of the befriended
+ // class as a child node, so it is discovered during the recursive
+ // visitation. However, there are cases when the befriended class is not a
+ // child, thus it must be fetched explicitly from the FriendDecl, and only
+ // then can we add it to the lookup table.
bool VisitFriendDecl(FriendDecl *D) {
if (D->getFriendType()) {
QualType Ty = D->getFriendType()->getType();
- // FIXME Can this be other than elaborated?
- QualType NamedTy = cast<ElaboratedType>(Ty)->getNamedType();
- if (!NamedTy->isDependentType()) {
- if (const auto *RTy = dyn_cast<RecordType>(NamedTy))
+ if (isa<ElaboratedType>(Ty))
+ Ty = cast<ElaboratedType>(Ty)->getNamedType();
+ // A FriendDecl with a dependent type (e.g. ClassTemplateSpecialization)
+ // always has that decl as child node.
+ // However, there are non-dependent cases which does not have the
+ // type as a child node. We have to dig up that type now.
+ if (!Ty->isDependentType()) {
+ if (const auto *RTy = dyn_cast<RecordType>(Ty))
LT.add(RTy->getAsCXXRecordDecl());
- else if (const auto *SpecTy =
- dyn_cast<TemplateSpecializationType>(NamedTy)) {
+ else if (const auto *SpecTy = dyn_cast<TemplateSpecializationType>(Ty))
LT.add(SpecTy->getAsCXXRecordDecl());
+ else if (isa<TypedefType>(Ty)) {
+ // We do not put friend typedefs to the lookup table because
+ // ASTImporter does not organize typedefs into redecl chains.
+ } else {
+ llvm_unreachable("Unhandled type of friend class");
}
}
}
diff --git a/lib/AST/ASTStructuralEquivalence.cpp b/lib/AST/ASTStructuralEquivalence.cpp
index d19b89bb95b4..912db3c130c5 100644
--- a/lib/AST/ASTStructuralEquivalence.cpp
+++ b/lib/AST/ASTStructuralEquivalence.cpp
@@ -1,9 +1,8 @@
//===- ASTStructuralEquivalence.cpp ---------------------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -74,6 +73,7 @@
#include "clang/AST/DeclFriend.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/ExprCXX.h"
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/TemplateBase.h"
#include "clang/AST/TemplateName.h"
@@ -101,6 +101,59 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
const TemplateArgument &Arg1,
const TemplateArgument &Arg2);
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ NestedNameSpecifier *NNS1,
+ NestedNameSpecifier *NNS2);
+static bool IsStructurallyEquivalent(const IdentifierInfo *Name1,
+ const IdentifierInfo *Name2);
+
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ const DeclarationName Name1,
+ const DeclarationName Name2) {
+ if (Name1.getNameKind() != Name2.getNameKind())
+ return false;
+
+ switch (Name1.getNameKind()) {
+
+ case DeclarationName::Identifier:
+ return IsStructurallyEquivalent(Name1.getAsIdentifierInfo(),
+ Name2.getAsIdentifierInfo());
+
+ case DeclarationName::CXXConstructorName:
+ case DeclarationName::CXXDestructorName:
+ case DeclarationName::CXXConversionFunctionName:
+ return IsStructurallyEquivalent(Context, Name1.getCXXNameType(),
+ Name2.getCXXNameType());
+
+ case DeclarationName::CXXDeductionGuideName: {
+ if (!IsStructurallyEquivalent(
+ Context, Name1.getCXXDeductionGuideTemplate()->getDeclName(),
+ Name2.getCXXDeductionGuideTemplate()->getDeclName()))
+ return false;
+ return IsStructurallyEquivalent(Context,
+ Name1.getCXXDeductionGuideTemplate(),
+ Name2.getCXXDeductionGuideTemplate());
+ }
+
+ case DeclarationName::CXXOperatorName:
+ return Name1.getCXXOverloadedOperator() == Name2.getCXXOverloadedOperator();
+
+ case DeclarationName::CXXLiteralOperatorName:
+ return IsStructurallyEquivalent(Name1.getCXXLiteralIdentifier(),
+ Name2.getCXXLiteralIdentifier());
+
+ case DeclarationName::CXXUsingDirective:
+ return true; // FIXME When do we consider two using directives equal?
+
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ return true; // FIXME
+ }
+
+ llvm_unreachable("Unhandled kind of DeclarationName");
+ return true;
+}
/// Determine structural equivalence of two expressions.
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
@@ -108,7 +161,26 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
if (!E1 || !E2)
return E1 == E2;
- // FIXME: Actually perform a structural comparison!
+ if (auto *DE1 = dyn_cast<DependentScopeDeclRefExpr>(E1)) {
+ auto *DE2 = dyn_cast<DependentScopeDeclRefExpr>(E2);
+ if (!DE2)
+ return false;
+ if (!IsStructurallyEquivalent(Context, DE1->getDeclName(),
+ DE2->getDeclName()))
+ return false;
+ return IsStructurallyEquivalent(Context, DE1->getQualifier(),
+ DE2->getQualifier());
+ } else if (auto CastE1 = dyn_cast<ImplicitCastExpr>(E1)) {
+ auto *CastE2 = dyn_cast<ImplicitCastExpr>(E2);
+ if (!CastE2)
+ return false;
+ if (!IsStructurallyEquivalent(Context, CastE1->getType(),
+ CastE2->getType()))
+ return false;
+ return IsStructurallyEquivalent(Context, CastE1->getSubExpr(),
+ CastE2->getSubExpr());
+ }
+ // FIXME: Handle other kind of expressions!
return true;
}
@@ -181,6 +253,12 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
return I1 == E1 && I2 == E2;
}
+ case TemplateName::AssumedTemplate: {
+ AssumedTemplateStorage *TN1 = N1.getAsAssumedTemplateName(),
+ *TN2 = N1.getAsAssumedTemplateName();
+ return TN1->getDeclName() == TN2->getDeclName();
+ }
+
case TemplateName::QualifiedTemplate: {
QualifiedTemplateName *QN1 = N1.getAsQualifiedTemplateName(),
*QN2 = N2.getAsQualifiedTemplateName();
@@ -297,6 +375,62 @@ static bool IsArrayStructurallyEquivalent(StructuralEquivalenceContext &Context,
return true;
}
+/// Determine structural equivalence based on the ExtInfo of functions. This
+/// is inspired by ASTContext::mergeFunctionTypes(), we compare calling
+/// conventions bits but must not compare some other bits.
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ FunctionType::ExtInfo EI1,
+ FunctionType::ExtInfo EI2) {
+ // Compatible functions must have compatible calling conventions.
+ if (EI1.getCC() != EI2.getCC())
+ return false;
+
+ // Regparm is part of the calling convention.
+ if (EI1.getHasRegParm() != EI2.getHasRegParm())
+ return false;
+ if (EI1.getRegParm() != EI2.getRegParm())
+ return false;
+
+ if (EI1.getProducesResult() != EI2.getProducesResult())
+ return false;
+ if (EI1.getNoCallerSavedRegs() != EI2.getNoCallerSavedRegs())
+ return false;
+ if (EI1.getNoCfCheck() != EI2.getNoCfCheck())
+ return false;
+
+ return true;
+}
+
+/// Check the equivalence of exception specifications.
+static bool IsEquivalentExceptionSpec(StructuralEquivalenceContext &Context,
+ const FunctionProtoType *Proto1,
+ const FunctionProtoType *Proto2) {
+
+ auto Spec1 = Proto1->getExceptionSpecType();
+ auto Spec2 = Proto2->getExceptionSpecType();
+
+ if (isUnresolvedExceptionSpec(Spec1) || isUnresolvedExceptionSpec(Spec2))
+ return true;
+
+ if (Spec1 != Spec2)
+ return false;
+ if (Spec1 == EST_Dynamic) {
+ if (Proto1->getNumExceptions() != Proto2->getNumExceptions())
+ return false;
+ for (unsigned I = 0, N = Proto1->getNumExceptions(); I != N; ++I) {
+ if (!IsStructurallyEquivalent(Context, Proto1->getExceptionType(I),
+ Proto2->getExceptionType(I)))
+ return false;
+ }
+ } else if (isComputedNoexcept(Spec1)) {
+ if (!IsStructurallyEquivalent(Context, Proto1->getNoexceptExpr(),
+ Proto2->getNoexceptExpr()))
+ return false;
+ }
+
+ return true;
+}
+
/// Determine structural equivalence of two types.
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
QualType T1, QualType T2) {
@@ -503,7 +637,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
if (Proto1->isVariadic() != Proto2->isVariadic())
return false;
- if (Proto1->getTypeQuals() != Proto2->getTypeQuals())
+ if (Proto1->getMethodQuals() != Proto2->getMethodQuals())
return false;
// Check exceptions, this information is lost in canonical type.
@@ -511,24 +645,8 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
cast<FunctionProtoType>(OrigT1.getDesugaredType(Context.FromCtx));
const auto *OrigProto2 =
cast<FunctionProtoType>(OrigT2.getDesugaredType(Context.ToCtx));
- auto Spec1 = OrigProto1->getExceptionSpecType();
- auto Spec2 = OrigProto2->getExceptionSpecType();
-
- if (Spec1 != Spec2)
+ if (!IsEquivalentExceptionSpec(Context, OrigProto1, OrigProto2))
return false;
- if (Spec1 == EST_Dynamic) {
- if (OrigProto1->getNumExceptions() != OrigProto2->getNumExceptions())
- return false;
- for (unsigned I = 0, N = OrigProto1->getNumExceptions(); I != N; ++I) {
- if (!IsStructurallyEquivalent(Context, OrigProto1->getExceptionType(I),
- OrigProto2->getExceptionType(I)))
- return false;
- }
- } else if (isComputedNoexcept(Spec1)) {
- if (!IsStructurallyEquivalent(Context, OrigProto1->getNoexceptExpr(),
- OrigProto2->getNoexceptExpr()))
- return false;
- }
// Fall through to check the bits common with FunctionNoProtoType.
LLVM_FALLTHROUGH;
@@ -540,7 +658,8 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
if (!IsStructurallyEquivalent(Context, Function1->getReturnType(),
Function2->getReturnType()))
return false;
- if (Function1->getExtInfo() != Function2->getExtInfo())
+ if (!IsStructurallyEquivalent(Context, Function1->getExtInfo(),
+ Function2->getExtInfo()))
return false;
break;
}
@@ -569,6 +688,13 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
return false;
break;
+ case Type::MacroQualified:
+ if (!IsStructurallyEquivalent(
+ Context, cast<MacroQualifiedType>(T1)->getUnderlyingType(),
+ cast<MacroQualifiedType>(T2)->getUnderlyingType()))
+ return false;
+ break;
+
case Type::Typedef:
if (!IsStructurallyEquivalent(Context, cast<TypedefType>(T1)->getDecl(),
cast<TypedefType>(T2)->getDecl()))
@@ -834,10 +960,9 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
IdentifierInfo *Name2 = Field2->getIdentifier();
if (!::IsStructurallyEquivalent(Name1, Name2)) {
if (Context.Complain) {
- Context.Diag2(Owner2->getLocation(),
- Context.ErrorOnTagTypeMismatch
- ? diag::err_odr_tag_type_inconsistent
- : diag::warn_odr_tag_type_inconsistent)
+ Context.Diag2(
+ Owner2->getLocation(),
+ Context.getApplicableDiagnostic(diag::err_odr_tag_type_inconsistent))
<< Context.ToCtx.getTypeDeclType(Owner2);
Context.Diag2(Field2->getLocation(), diag::note_odr_field_name)
<< Field2->getDeclName();
@@ -850,10 +975,9 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
if (!IsStructurallyEquivalent(Context, Field1->getType(),
Field2->getType())) {
if (Context.Complain) {
- Context.Diag2(Owner2->getLocation(),
- Context.ErrorOnTagTypeMismatch
- ? diag::err_odr_tag_type_inconsistent
- : diag::warn_odr_tag_type_inconsistent)
+ Context.Diag2(
+ Owner2->getLocation(),
+ Context.getApplicableDiagnostic(diag::err_odr_tag_type_inconsistent))
<< Context.ToCtx.getTypeDeclType(Owner2);
Context.Diag2(Field2->getLocation(), diag::note_odr_field)
<< Field2->getDeclName() << Field2->getType();
@@ -865,10 +989,9 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
if (Field1->isBitField() != Field2->isBitField()) {
if (Context.Complain) {
- Context.Diag2(Owner2->getLocation(),
- Context.ErrorOnTagTypeMismatch
- ? diag::err_odr_tag_type_inconsistent
- : diag::warn_odr_tag_type_inconsistent)
+ Context.Diag2(
+ Owner2->getLocation(),
+ Context.getApplicableDiagnostic(diag::err_odr_tag_type_inconsistent))
<< Context.ToCtx.getTypeDeclType(Owner2);
if (Field1->isBitField()) {
Context.Diag1(Field1->getLocation(), diag::note_odr_bit_field)
@@ -895,9 +1018,8 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
if (Bits1 != Bits2) {
if (Context.Complain) {
Context.Diag2(Owner2->getLocation(),
- Context.ErrorOnTagTypeMismatch
- ? diag::err_odr_tag_type_inconsistent
- : diag::warn_odr_tag_type_inconsistent)
+ Context.getApplicableDiagnostic(
+ diag::err_odr_tag_type_inconsistent))
<< Context.ToCtx.getTypeDeclType(Owner2);
Context.Diag2(Field2->getLocation(), diag::note_odr_bit_field)
<< Field2->getDeclName() << Field2->getType() << Bits2;
@@ -933,13 +1055,15 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
if (auto *Constructor1 = dyn_cast<CXXConstructorDecl>(Method1)) {
auto *Constructor2 = cast<CXXConstructorDecl>(Method2);
- if (Constructor1->isExplicit() != Constructor2->isExplicit())
+ if (!Constructor1->getExplicitSpecifier().isEquivalent(
+ Constructor2->getExplicitSpecifier()))
return false;
}
if (auto *Conversion1 = dyn_cast<CXXConversionDecl>(Method1)) {
auto *Conversion2 = cast<CXXConversionDecl>(Method2);
- if (Conversion1->isExplicit() != Conversion2->isExplicit())
+ if (!Conversion1->getExplicitSpecifier().isEquivalent(
+ Conversion2->getExplicitSpecifier()))
return false;
if (!IsStructurallyEquivalent(Context, Conversion1->getConversionType(),
Conversion2->getConversionType()))
@@ -961,15 +1085,26 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
return true;
}
+/// Determine structural equivalence of two lambda classes.
+static bool
+IsStructurallyEquivalentLambdas(StructuralEquivalenceContext &Context,
+ CXXRecordDecl *D1, CXXRecordDecl *D2) {
+ assert(D1->isLambda() && D2->isLambda() &&
+ "Must be called on lambda classes");
+ if (!IsStructurallyEquivalent(Context, D1->getLambdaCallOperator(),
+ D2->getLambdaCallOperator()))
+ return false;
+
+ return true;
+}
+
/// Determine structural equivalence of two records.
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
RecordDecl *D1, RecordDecl *D2) {
if (D1->isUnion() != D2->isUnion()) {
if (Context.Complain) {
- Context.Diag2(D2->getLocation(),
- Context.ErrorOnTagTypeMismatch
- ? diag::err_odr_tag_type_inconsistent
- : diag::warn_odr_tag_type_inconsistent)
+ Context.Diag2(D2->getLocation(), Context.getApplicableDiagnostic(
+ diag::err_odr_tag_type_inconsistent))
<< Context.ToCtx.getTypeDeclType(D2);
Context.Diag1(D1->getLocation(), diag::note_odr_tag_kind_here)
<< D1->getDeclName() << (unsigned)D1->getTagKind();
@@ -1044,9 +1179,18 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
D1CXX->getASTContext().getExternalSource()->CompleteType(D1CXX);
}
+ if (D1CXX->isLambda() != D2CXX->isLambda())
+ return false;
+ if (D1CXX->isLambda()) {
+ if (!IsStructurallyEquivalentLambdas(Context, D1CXX, D2CXX))
+ return false;
+ }
+
if (D1CXX->getNumBases() != D2CXX->getNumBases()) {
if (Context.Complain) {
- Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
+ Context.Diag2(D2->getLocation(),
+ Context.getApplicableDiagnostic(
+ diag::err_odr_tag_type_inconsistent))
<< Context.ToCtx.getTypeDeclType(D2);
Context.Diag2(D2->getLocation(), diag::note_odr_number_of_bases)
<< D2CXX->getNumBases();
@@ -1065,7 +1209,8 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
Base2->getType())) {
if (Context.Complain) {
Context.Diag2(D2->getLocation(),
- diag::warn_odr_tag_type_inconsistent)
+ Context.getApplicableDiagnostic(
+ diag::err_odr_tag_type_inconsistent))
<< Context.ToCtx.getTypeDeclType(D2);
Context.Diag2(Base2->getBeginLoc(), diag::note_odr_base)
<< Base2->getType() << Base2->getSourceRange();
@@ -1079,7 +1224,8 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
if (Base1->isVirtual() != Base2->isVirtual()) {
if (Context.Complain) {
Context.Diag2(D2->getLocation(),
- diag::warn_odr_tag_type_inconsistent)
+ Context.getApplicableDiagnostic(
+ diag::err_odr_tag_type_inconsistent))
<< Context.ToCtx.getTypeDeclType(D2);
Context.Diag2(Base2->getBeginLoc(), diag::note_odr_virtual_base)
<< Base2->isVirtual() << Base2->getSourceRange();
@@ -1092,15 +1238,16 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
// Check the friends for consistency.
CXXRecordDecl::friend_iterator Friend2 = D2CXX->friend_begin(),
- Friend2End = D2CXX->friend_end();
+ Friend2End = D2CXX->friend_end();
for (CXXRecordDecl::friend_iterator Friend1 = D1CXX->friend_begin(),
- Friend1End = D1CXX->friend_end();
+ Friend1End = D1CXX->friend_end();
Friend1 != Friend1End; ++Friend1, ++Friend2) {
if (Friend2 == Friend2End) {
if (Context.Complain) {
Context.Diag2(D2->getLocation(),
- diag::warn_odr_tag_type_inconsistent)
- << Context.ToCtx.getTypeDeclType(D2CXX);
+ Context.getApplicableDiagnostic(
+ diag::err_odr_tag_type_inconsistent))
+ << Context.ToCtx.getTypeDeclType(D2CXX);
Context.Diag1((*Friend1)->getFriendLoc(), diag::note_odr_friend);
Context.Diag2(D2->getLocation(), diag::note_odr_missing_friend);
}
@@ -1109,8 +1256,10 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
if (!IsStructurallyEquivalent(Context, *Friend1, *Friend2)) {
if (Context.Complain) {
- Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
- << Context.ToCtx.getTypeDeclType(D2CXX);
+ Context.Diag2(D2->getLocation(),
+ Context.getApplicableDiagnostic(
+ diag::err_odr_tag_type_inconsistent))
+ << Context.ToCtx.getTypeDeclType(D2CXX);
Context.Diag1((*Friend1)->getFriendLoc(), diag::note_odr_friend);
Context.Diag2((*Friend2)->getFriendLoc(), diag::note_odr_friend);
}
@@ -1120,8 +1269,10 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
if (Friend2 != Friend2End) {
if (Context.Complain) {
- Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
- << Context.ToCtx.getTypeDeclType(D2);
+ Context.Diag2(D2->getLocation(),
+ Context.getApplicableDiagnostic(
+ diag::err_odr_tag_type_inconsistent))
+ << Context.ToCtx.getTypeDeclType(D2);
Context.Diag2((*Friend2)->getFriendLoc(), diag::note_odr_friend);
Context.Diag1(D1->getLocation(), diag::note_odr_missing_friend);
}
@@ -1129,7 +1280,9 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
}
} else if (D1CXX->getNumBases() > 0) {
if (Context.Complain) {
- Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
+ Context.Diag2(D2->getLocation(),
+ Context.getApplicableDiagnostic(
+ diag::err_odr_tag_type_inconsistent))
<< Context.ToCtx.getTypeDeclType(D2);
const CXXBaseSpecifier *Base1 = D1CXX->bases_begin();
Context.Diag1(Base1->getBeginLoc(), diag::note_odr_base)
@@ -1149,9 +1302,8 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
if (Field2 == Field2End) {
if (Context.Complain) {
Context.Diag2(D2->getLocation(),
- Context.ErrorOnTagTypeMismatch
- ? diag::err_odr_tag_type_inconsistent
- : diag::warn_odr_tag_type_inconsistent)
+ Context.getApplicableDiagnostic(
+ diag::err_odr_tag_type_inconsistent))
<< Context.ToCtx.getTypeDeclType(D2);
Context.Diag1(Field1->getLocation(), diag::note_odr_field)
<< Field1->getDeclName() << Field1->getType();
@@ -1166,10 +1318,8 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
if (Field2 != Field2End) {
if (Context.Complain) {
- Context.Diag2(D2->getLocation(),
- Context.ErrorOnTagTypeMismatch
- ? diag::err_odr_tag_type_inconsistent
- : diag::warn_odr_tag_type_inconsistent)
+ Context.Diag2(D2->getLocation(), Context.getApplicableDiagnostic(
+ diag::err_odr_tag_type_inconsistent))
<< Context.ToCtx.getTypeDeclType(D2);
Context.Diag2(Field2->getLocation(), diag::note_odr_field)
<< Field2->getDeclName() << Field2->getType();
@@ -1200,9 +1350,8 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
if (EC2 == EC2End) {
if (Context.Complain) {
Context.Diag2(D2->getLocation(),
- Context.ErrorOnTagTypeMismatch
- ? diag::err_odr_tag_type_inconsistent
- : diag::warn_odr_tag_type_inconsistent)
+ Context.getApplicableDiagnostic(
+ diag::err_odr_tag_type_inconsistent))
<< Context.ToCtx.getTypeDeclType(D2);
Context.Diag1(EC1->getLocation(), diag::note_odr_enumerator)
<< EC1->getDeclName() << EC1->getInitVal().toString(10);
@@ -1217,9 +1366,8 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
!IsStructurallyEquivalent(EC1->getIdentifier(), EC2->getIdentifier())) {
if (Context.Complain) {
Context.Diag2(D2->getLocation(),
- Context.ErrorOnTagTypeMismatch
- ? diag::err_odr_tag_type_inconsistent
- : diag::warn_odr_tag_type_inconsistent)
+ Context.getApplicableDiagnostic(
+ diag::err_odr_tag_type_inconsistent))
<< Context.ToCtx.getTypeDeclType(D2);
Context.Diag2(EC2->getLocation(), diag::note_odr_enumerator)
<< EC2->getDeclName() << EC2->getInitVal().toString(10);
@@ -1232,10 +1380,8 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
if (EC2 != EC2End) {
if (Context.Complain) {
- Context.Diag2(D2->getLocation(),
- Context.ErrorOnTagTypeMismatch
- ? diag::err_odr_tag_type_inconsistent
- : diag::warn_odr_tag_type_inconsistent)
+ Context.Diag2(D2->getLocation(), Context.getApplicableDiagnostic(
+ diag::err_odr_tag_type_inconsistent))
<< Context.ToCtx.getTypeDeclType(D2);
Context.Diag2(EC2->getLocation(), diag::note_odr_enumerator)
<< EC2->getDeclName() << EC2->getInitVal().toString(10);
@@ -1253,7 +1399,8 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
if (Params1->size() != Params2->size()) {
if (Context.Complain) {
Context.Diag2(Params2->getTemplateLoc(),
- diag::err_odr_different_num_template_parameters)
+ Context.getApplicableDiagnostic(
+ diag::err_odr_different_num_template_parameters))
<< Params1->size() << Params2->size();
Context.Diag1(Params1->getTemplateLoc(),
diag::note_odr_template_parameter_list);
@@ -1265,7 +1412,8 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
if (Params1->getParam(I)->getKind() != Params2->getParam(I)->getKind()) {
if (Context.Complain) {
Context.Diag2(Params2->getParam(I)->getLocation(),
- diag::err_odr_different_template_parameter_kind);
+ Context.getApplicableDiagnostic(
+ diag::err_odr_different_template_parameter_kind));
Context.Diag1(Params1->getParam(I)->getLocation(),
diag::note_odr_template_parameter_here);
}
@@ -1285,7 +1433,9 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
TemplateTypeParmDecl *D2) {
if (D1->isParameterPack() != D2->isParameterPack()) {
if (Context.Complain) {
- Context.Diag2(D2->getLocation(), diag::err_odr_parameter_pack_non_pack)
+ Context.Diag2(D2->getLocation(),
+ Context.getApplicableDiagnostic(
+ diag::err_odr_parameter_pack_non_pack))
<< D2->isParameterPack();
Context.Diag1(D1->getLocation(), diag::note_odr_parameter_pack_non_pack)
<< D1->isParameterPack();
@@ -1301,7 +1451,9 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
NonTypeTemplateParmDecl *D2) {
if (D1->isParameterPack() != D2->isParameterPack()) {
if (Context.Complain) {
- Context.Diag2(D2->getLocation(), diag::err_odr_parameter_pack_non_pack)
+ Context.Diag2(D2->getLocation(),
+ Context.getApplicableDiagnostic(
+ diag::err_odr_parameter_pack_non_pack))
<< D2->isParameterPack();
Context.Diag1(D1->getLocation(), diag::note_odr_parameter_pack_non_pack)
<< D1->isParameterPack();
@@ -1313,7 +1465,8 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
if (!IsStructurallyEquivalent(Context, D1->getType(), D2->getType())) {
if (Context.Complain) {
Context.Diag2(D2->getLocation(),
- diag::err_odr_non_type_parameter_type_inconsistent)
+ Context.getApplicableDiagnostic(
+ diag::err_odr_non_type_parameter_type_inconsistent))
<< D2->getType() << D1->getType();
Context.Diag1(D1->getLocation(), diag::note_odr_value_here)
<< D1->getType();
@@ -1329,7 +1482,9 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
TemplateTemplateParmDecl *D2) {
if (D1->isParameterPack() != D2->isParameterPack()) {
if (Context.Complain) {
- Context.Diag2(D2->getLocation(), diag::err_odr_parameter_pack_non_pack)
+ Context.Diag2(D2->getLocation(),
+ Context.getApplicableDiagnostic(
+ diag::err_odr_parameter_pack_non_pack))
<< D2->isParameterPack();
Context.Diag1(D1->getLocation(), diag::note_odr_parameter_pack_non_pack)
<< D1->isParameterPack();
@@ -1378,6 +1533,18 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
}
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ ConceptDecl *D1,
+ ConceptDecl *D2) {
+ // Check template parameters.
+ if (!IsTemplateDeclCommonStructurallyEquivalent(Context, D1, D2))
+ return false;
+
+ // Check the constraint expression.
+ return IsStructurallyEquivalent(Context, D1->getConstraintExpr(),
+ D2->getConstraintExpr());
+}
+
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
FriendDecl *D1, FriendDecl *D2) {
if ((D1->getFriendType() && D2->getFriendDecl()) ||
(D1->getFriendDecl() && D2->getFriendType())) {
@@ -1485,6 +1652,52 @@ StructuralEquivalenceContext::findUntaggedStructOrUnionIndex(RecordDecl *Anon) {
return Index;
}
+unsigned StructuralEquivalenceContext::getApplicableDiagnostic(
+ unsigned ErrorDiagnostic) {
+ if (ErrorOnTagTypeMismatch)
+ return ErrorDiagnostic;
+
+ switch (ErrorDiagnostic) {
+ case diag::err_odr_variable_type_inconsistent:
+ return diag::warn_odr_variable_type_inconsistent;
+ case diag::err_odr_variable_multiple_def:
+ return diag::warn_odr_variable_multiple_def;
+ case diag::err_odr_function_type_inconsistent:
+ return diag::warn_odr_function_type_inconsistent;
+ case diag::err_odr_tag_type_inconsistent:
+ return diag::warn_odr_tag_type_inconsistent;
+ case diag::err_odr_field_type_inconsistent:
+ return diag::warn_odr_field_type_inconsistent;
+ case diag::err_odr_ivar_type_inconsistent:
+ return diag::warn_odr_ivar_type_inconsistent;
+ case diag::err_odr_objc_superclass_inconsistent:
+ return diag::warn_odr_objc_superclass_inconsistent;
+ case diag::err_odr_objc_method_result_type_inconsistent:
+ return diag::warn_odr_objc_method_result_type_inconsistent;
+ case diag::err_odr_objc_method_num_params_inconsistent:
+ return diag::warn_odr_objc_method_num_params_inconsistent;
+ case diag::err_odr_objc_method_param_type_inconsistent:
+ return diag::warn_odr_objc_method_param_type_inconsistent;
+ case diag::err_odr_objc_method_variadic_inconsistent:
+ return diag::warn_odr_objc_method_variadic_inconsistent;
+ case diag::err_odr_objc_property_type_inconsistent:
+ return diag::warn_odr_objc_property_type_inconsistent;
+ case diag::err_odr_objc_property_impl_kind_inconsistent:
+ return diag::warn_odr_objc_property_impl_kind_inconsistent;
+ case diag::err_odr_objc_synthesize_ivar_inconsistent:
+ return diag::warn_odr_objc_synthesize_ivar_inconsistent;
+ case diag::err_odr_different_num_template_parameters:
+ return diag::warn_odr_different_num_template_parameters;
+ case diag::err_odr_different_template_parameter_kind:
+ return diag::warn_odr_different_template_parameter_kind;
+ case diag::err_odr_parameter_pack_non_pack:
+ return diag::warn_odr_parameter_pack_non_pack;
+ case diag::err_odr_non_type_parameter_type_inconsistent:
+ return diag::warn_odr_non_type_parameter_type_inconsistent;
+ }
+ llvm_unreachable("Diagnostic kind not handled in preceding switch");
+}
+
bool StructuralEquivalenceContext::IsEquivalent(Decl *D1, Decl *D2) {
// Ensure that the implementation functions (all static functions in this TU)
@@ -1590,6 +1803,14 @@ bool StructuralEquivalenceContext::CheckKindSpecificEquivalence(
// Class template/non-class-template mismatch.
return false;
}
+ } else if (auto *ConceptDecl1 = dyn_cast<ConceptDecl>(D1)) {
+ if (auto *ConceptDecl2 = dyn_cast<ConceptDecl>(D2)) {
+ if (!::IsStructurallyEquivalent(*this, ConceptDecl1, ConceptDecl2))
+ return false;
+ } else {
+ // Concept/non-concept mismatch.
+ return false;
+ }
} else if (auto *TTP1 = dyn_cast<TemplateTypeParmDecl>(D1)) {
if (auto *TTP2 = dyn_cast<TemplateTypeParmDecl>(D2)) {
if (!::IsStructurallyEquivalent(*this, TTP1, TTP2))
@@ -1624,6 +1845,12 @@ bool StructuralEquivalenceContext::CheckKindSpecificEquivalence(
}
} else if (FunctionDecl *FD1 = dyn_cast<FunctionDecl>(D1)) {
if (FunctionDecl *FD2 = dyn_cast<FunctionDecl>(D2)) {
+ if (FD1->isOverloadedOperator()) {
+ if (!FD2->isOverloadedOperator())
+ return false;
+ if (FD1->getOverloadedOperator() != FD2->getOverloadedOperator())
+ return false;
+ }
if (!::IsStructurallyEquivalent(FD1->getIdentifier(),
FD2->getIdentifier()))
return false;
diff --git a/lib/AST/ASTTypeTraits.cpp b/lib/AST/ASTTypeTraits.cpp
index 461084ce707c..ba1581bd3f6b 100644
--- a/lib/AST/ASTTypeTraits.cpp
+++ b/lib/AST/ASTTypeTraits.cpp
@@ -1,9 +1,8 @@
//===--- ASTTypeTraits.cpp --------------------------------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -38,6 +37,9 @@ const ASTNodeKind::KindInfo ASTNodeKind::AllKindInfo[] = {
{ NKI_None, "Type" },
#define TYPE(DERIVED, BASE) { NKI_##BASE, #DERIVED "Type" },
#include "clang/AST/TypeNodes.def"
+ { NKI_None, "OMPClause" },
+#define OPENMP_CLAUSE(TextualSpelling, Class) {NKI_OMPClause, #Class},
+#include "clang/Basic/OpenMPKinds.def"
};
bool ASTNodeKind::isBaseOf(ASTNodeKind Other, unsigned *Distance) const {
@@ -104,6 +106,19 @@ ASTNodeKind ASTNodeKind::getFromNode(const Type &T) {
#include "clang/AST/TypeNodes.def"
}
llvm_unreachable("invalid type kind");
+ }
+
+ASTNodeKind ASTNodeKind::getFromNode(const OMPClause &C) {
+ switch (C.getClauseKind()) {
+#define OPENMP_CLAUSE(Name, Class) \
+ case OMPC_##Name: return ASTNodeKind(NKI_##Class);
+#include "clang/Basic/OpenMPKinds.def"
+ case OMPC_threadprivate:
+ case OMPC_uniform:
+ case OMPC_unknown:
+ llvm_unreachable("unexpected OpenMP clause kind");
+ }
+ llvm_unreachable("invalid stmt kind");
}
void DynTypedNode::print(llvm::raw_ostream &OS,
@@ -152,6 +167,8 @@ SourceRange DynTypedNode::getSourceRange() const {
return D->getSourceRange();
if (const Stmt *S = get<Stmt>())
return S->getSourceRange();
+ if (const auto *C = get<OMPClause>())
+ return SourceRange(C->getBeginLoc(), C->getEndLoc());
return SourceRange();
}
diff --git a/lib/AST/AttrImpl.cpp b/lib/AST/AttrImpl.cpp
index b06b50c9b4b8..0ef925ec1c90 100644
--- a/lib/AST/AttrImpl.cpp
+++ b/lib/AST/AttrImpl.cpp
@@ -1,9 +1,8 @@
//===--- AttrImpl.cpp - Classes for representing attributes -----*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/AST/CXXABI.h b/lib/AST/CXXABI.h
index 06295b58178b..31cb36918726 100644
--- a/lib/AST/CXXABI.h
+++ b/lib/AST/CXXABI.h
@@ -1,9 +1,8 @@
//===----- CXXABI.h - Interface to C++ ABIs ---------------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/AST/CXXInheritance.cpp b/lib/AST/CXXInheritance.cpp
index ddb350e72bbd..ecf451b175af 100644
--- a/lib/AST/CXXInheritance.cpp
+++ b/lib/AST/CXXInheritance.cpp
@@ -1,9 +1,8 @@
//===- CXXInheritance.cpp - C++ Inheritance -------------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -487,6 +486,21 @@ bool CXXRecordDecl::FindOMPReductionMember(const CXXBaseSpecifier *Specifier,
return false;
}
+bool CXXRecordDecl::FindOMPMapperMember(const CXXBaseSpecifier *Specifier,
+ CXXBasePath &Path,
+ DeclarationName Name) {
+ RecordDecl *BaseRecord =
+ Specifier->getType()->castAs<RecordType>()->getDecl();
+
+ for (Path.Decls = BaseRecord->lookup(Name); !Path.Decls.empty();
+ Path.Decls = Path.Decls.slice(1)) {
+ if (Path.Decls.front()->isInIdentifierNamespace(IDNS_OMPMapper))
+ return true;
+ }
+
+ return false;
+}
+
bool CXXRecordDecl::
FindNestedNameSpecifierMember(const CXXBaseSpecifier *Specifier,
CXXBasePath &Path,
@@ -540,8 +554,7 @@ void OverridingMethods::add(unsigned OverriddenSubobject,
UniqueVirtualMethod Overriding) {
SmallVectorImpl<UniqueVirtualMethod> &SubobjectOverrides
= Overrides[OverriddenSubobject];
- if (std::find(SubobjectOverrides.begin(), SubobjectOverrides.end(),
- Overriding) == SubobjectOverrides.end())
+ if (llvm::find(SubobjectOverrides, Overriding) == SubobjectOverrides.end())
SubobjectOverrides.push_back(Overriding);
}
diff --git a/lib/AST/Comment.cpp b/lib/AST/Comment.cpp
index 0d759e226c42..25339c7901e3 100644
--- a/lib/AST/Comment.cpp
+++ b/lib/AST/Comment.cpp
@@ -1,9 +1,8 @@
//===--- Comment.cpp - Comment AST node implementation --------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/AST/CommentBriefParser.cpp b/lib/AST/CommentBriefParser.cpp
index 5ec7586a475d..2b648cbb1d4b 100644
--- a/lib/AST/CommentBriefParser.cpp
+++ b/lib/AST/CommentBriefParser.cpp
@@ -1,9 +1,8 @@
//===--- CommentBriefParser.cpp - Dumb comment parser ---------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/AST/CommentCommandTraits.cpp b/lib/AST/CommentCommandTraits.cpp
index 7378a7c3ac06..b306fcbb154f 100644
--- a/lib/AST/CommentCommandTraits.cpp
+++ b/lib/AST/CommentCommandTraits.cpp
@@ -1,9 +1,8 @@
//===--- CommentCommandTraits.cpp - Comment command properties --*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/AST/CommentLexer.cpp b/lib/AST/CommentLexer.cpp
index c43275318dd7..19485f6018c0 100644
--- a/lib/AST/CommentLexer.cpp
+++ b/lib/AST/CommentLexer.cpp
@@ -1,9 +1,8 @@
//===--- CommentLexer.cpp -------------------------------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/AST/CommentParser.cpp b/lib/AST/CommentParser.cpp
index 7f70b95e9812..c7f8aa7e16a0 100644
--- a/lib/AST/CommentParser.cpp
+++ b/lib/AST/CommentParser.cpp
@@ -1,9 +1,8 @@
//===--- CommentParser.cpp - Doxygen comment parser -----------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/AST/CommentSema.cpp b/lib/AST/CommentSema.cpp
index 88588a7a89e6..067b3ae4222e 100644
--- a/lib/AST/CommentSema.cpp
+++ b/lib/AST/CommentSema.cpp
@@ -1,9 +1,8 @@
//===--- CommentSema.cpp - Doxygen comment semantic analysis --------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/AST/ComparisonCategories.cpp b/lib/AST/ComparisonCategories.cpp
index 87f51facc59b..ee4c1b0443a3 100644
--- a/lib/AST/ComparisonCategories.cpp
+++ b/lib/AST/ComparisonCategories.cpp
@@ -1,9 +1,8 @@
//===- ComparisonCategories.cpp - Three Way Comparison Data -----*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/AST/DataCollection.cpp b/lib/AST/DataCollection.cpp
index c2ecabe8e6f8..8e67c101dee1 100644
--- a/lib/AST/DataCollection.cpp
+++ b/lib/AST/DataCollection.cpp
@@ -1,9 +1,8 @@
//===-- DataCollection.cpp --------------------------------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/AST/Decl.cpp b/lib/AST/Decl.cpp
index 5536358b1ecf..21cf9da18a8b 100644
--- a/lib/AST/Decl.cpp
+++ b/lib/AST/Decl.cpp
@@ -1,9 +1,8 @@
//===- Decl.cpp - Declaration AST Node Implementation ---------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -568,7 +567,14 @@ static bool isSingleLineLanguageLinkage(const Decl &D) {
return false;
}
-static bool isExportedFromModuleIntefaceUnit(const NamedDecl *D) {
+/// Determine whether D is declared in the purview of a named module.
+static bool isInModulePurview(const NamedDecl *D) {
+ if (auto *M = D->getOwningModule())
+ return M->isModulePurview();
+ return false;
+}
+
+static bool isExportedFromModuleInterfaceUnit(const NamedDecl *D) {
// FIXME: Handle isModulePrivate.
switch (D->getModuleOwnershipKind()) {
case Decl::ModuleOwnershipKind::Unowned:
@@ -576,8 +582,7 @@ static bool isExportedFromModuleIntefaceUnit(const NamedDecl *D) {
return false;
case Decl::ModuleOwnershipKind::Visible:
case Decl::ModuleOwnershipKind::VisibleWhenImported:
- if (auto *M = D->getOwningModule())
- return M->Kind == Module::ModuleInterfaceUnit;
+ return isInModulePurview(D);
}
llvm_unreachable("unexpected module ownership kind");
}
@@ -587,9 +592,8 @@ static LinkageInfo getInternalLinkageFor(const NamedDecl *D) {
// as "module-internal linkage", which means that they have internal linkage
// formally but can be indirectly accessed from outside the module via inline
// functions and templates defined within the module.
- if (auto *M = D->getOwningModule())
- if (M->Kind == Module::ModuleInterfaceUnit)
- return LinkageInfo(ModuleInternalLinkage, DefaultVisibility, false);
+ if (isInModulePurview(D))
+ return LinkageInfo(ModuleInternalLinkage, DefaultVisibility, false);
return LinkageInfo::internal();
}
@@ -599,15 +603,25 @@ static LinkageInfo getExternalLinkageFor(const NamedDecl *D) {
// - A name declared at namespace scope that does not have internal linkage
// by the previous rules and that is introduced by a non-exported
// declaration has module linkage.
- if (auto *M = D->getOwningModule())
- if (M->Kind == Module::ModuleInterfaceUnit)
- if (!isExportedFromModuleIntefaceUnit(
- cast<NamedDecl>(D->getCanonicalDecl())))
- return LinkageInfo(ModuleLinkage, DefaultVisibility, false);
+ if (isInModulePurview(D) && !isExportedFromModuleInterfaceUnit(
+ cast<NamedDecl>(D->getCanonicalDecl())))
+ return LinkageInfo(ModuleLinkage, DefaultVisibility, false);
return LinkageInfo::external();
}
+static StorageClass getStorageClass(const Decl *D) {
+ if (auto *TD = dyn_cast<TemplateDecl>(D))
+ D = TD->getTemplatedDecl();
+ if (D) {
+ if (auto *VD = dyn_cast<VarDecl>(D))
+ return VD->getStorageClass();
+ if (auto *FD = dyn_cast<FunctionDecl>(D))
+ return FD->getStorageClass();
+ }
+ return SC_None;
+}
+
LinkageInfo
LinkageComputer::getLVForNamespaceScopeDecl(const NamedDecl *D,
LVComputationKind computation,
@@ -619,24 +633,28 @@ LinkageComputer::getLVForNamespaceScopeDecl(const NamedDecl *D,
// C++ [basic.link]p3:
// A name having namespace scope (3.3.6) has internal linkage if it
// is the name of
- // - an object, reference, function or function template that is
- // explicitly declared static; or,
- // (This bullet corresponds to C99 6.2.2p3.)
+
+ if (getStorageClass(D->getCanonicalDecl()) == SC_Static) {
+ // - a variable, variable template, function, or function template
+ // that is explicitly declared static; or
+ // (This bullet corresponds to C99 6.2.2p3.)
+ return getInternalLinkageFor(D);
+ }
+
if (const auto *Var = dyn_cast<VarDecl>(D)) {
- // Explicitly declared static.
- if (Var->getStorageClass() == SC_Static)
- return getInternalLinkageFor(Var);
-
- // - a non-inline, non-volatile object or reference that is explicitly
- // declared const or constexpr and neither explicitly declared extern
- // nor previously declared to have external linkage; or (there is no
- // equivalent in C99)
- // The C++ modules TS adds "non-exported" to this list.
+ // - a non-template variable of non-volatile const-qualified type, unless
+ // - it is explicitly declared extern, or
+ // - it is inline or exported, or
+ // - it was previously declared and the prior declaration did not have
+ // internal linkage
+ // (There is no equivalent in C99.)
if (Context.getLangOpts().CPlusPlus &&
Var->getType().isConstQualified() &&
!Var->getType().isVolatileQualified() &&
!Var->isInline() &&
- !isExportedFromModuleIntefaceUnit(Var)) {
+ !isExportedFromModuleInterfaceUnit(Var) &&
+ !isa<VarTemplateSpecializationDecl>(Var) &&
+ !Var->getDescribedVarTemplate()) {
const VarDecl *PrevVar = Var->getPreviousDecl();
if (PrevVar)
return getLVForDecl(PrevVar, computation);
@@ -656,14 +674,6 @@ LinkageComputer::getLVForNamespaceScopeDecl(const NamedDecl *D,
if (PrevVar->getStorageClass() == SC_Static)
return getInternalLinkageFor(Var);
}
- } else if (const FunctionDecl *Function = D->getAsFunction()) {
- // C++ [temp]p4:
- // A non-member function template can have internal linkage; any
- // other template name shall have external linkage.
-
- // Explicitly declared static.
- if (Function->getCanonicalDecl()->getStorageClass() == SC_Static)
- return getInternalLinkageFor(Function);
} else if (const auto *IFD = dyn_cast<IndirectFieldDecl>(D)) {
// - a data member of an anonymous union.
const VarDecl *VD = IFD->getVarDecl();
@@ -672,6 +682,8 @@ LinkageComputer::getLVForNamespaceScopeDecl(const NamedDecl *D,
}
assert(!isa<FieldDecl>(D) && "Didn't expect a FieldDecl!");
+ // FIXME: This gives internal linkage to names that should have no linkage
+ // (those not covered by [basic.link]p6).
if (D->isInAnonymousNamespace()) {
const auto *Var = dyn_cast<VarDecl>(D);
const auto *Func = dyn_cast<FunctionDecl>(D);
@@ -731,10 +743,20 @@ LinkageComputer::getLVForNamespaceScopeDecl(const NamedDecl *D,
// C++ [basic.link]p4:
- // A name having namespace scope has external linkage if it is the
- // name of
+ // A name having namespace scope that has not been given internal linkage
+ // above and that is the name of
+ // [...bullets...]
+ // has its linkage determined as follows:
+ // - if the enclosing namespace has internal linkage, the name has
+ // internal linkage; [handled above]
+ // - otherwise, if the declaration of the name is attached to a named
+ // module and is not exported, the name has module linkage;
+ // - otherwise, the name has external linkage.
+ // LV is currently set up to handle the last two bullets.
//
- // - an object or reference, unless it has internal linkage; or
+ // The bullets are:
+
+ // - a variable; or
if (const auto *Var = dyn_cast<VarDecl>(D)) {
// GCC applies the following optimization to variables and static
// data members, but not to functions:
@@ -780,7 +802,7 @@ LinkageComputer::getLVForNamespaceScopeDecl(const NamedDecl *D,
mergeTemplateLV(LV, spec, computation);
}
- // - a function, unless it has internal linkage; or
+ // - a function; or
} else if (const auto *Function = dyn_cast<FunctionDecl>(D)) {
// In theory, we can modify the function's LV by the LV of its
// type unless it has C linkage (see comment above about variables
@@ -834,7 +856,8 @@ LinkageComputer::getLVForNamespaceScopeDecl(const NamedDecl *D,
mergeTemplateLV(LV, spec, computation);
}
- // - an enumerator belonging to an enumeration with external linkage;
+ // FIXME: This is not part of the C++ standard any more.
+ // - an enumerator belonging to an enumeration with external linkage; or
} else if (isa<EnumConstantDecl>(D)) {
LinkageInfo EnumLV = getLVForDecl(cast<NamedDecl>(D->getDeclContext()),
computation);
@@ -842,16 +865,16 @@ LinkageComputer::getLVForNamespaceScopeDecl(const NamedDecl *D,
return LinkageInfo::none();
LV.merge(EnumLV);
- // - a template, unless it is a function template that has
- // internal linkage (Clause 14);
+ // - a template
} else if (const auto *temp = dyn_cast<TemplateDecl>(D)) {
bool considerVisibility = !hasExplicitVisibilityAlready(computation);
LinkageInfo tempLV =
getLVForTemplateParameterList(temp->getTemplateParameters(), computation);
LV.mergeMaybeWithVisibility(tempLV, considerVisibility);
- // - a namespace (7.3), unless it is declared within an unnamed
- // namespace.
+ // An unnamed namespace or a namespace declared directly or indirectly
+ // within an unnamed namespace has internal linkage. All other namespaces
+ // have external linkage.
//
// We handled names in anonymous namespaces above.
} else if (isa<NamespaceDecl>(D)) {
@@ -1508,6 +1531,11 @@ Module *Decl::getOwningModuleForLinkage(bool IgnoreLinkage) const {
}
return InternalLinkage ? M->Parent : nullptr;
}
+
+ case Module::PrivateModuleFragment:
+ // The private module fragment is part of its containing module for linkage
+ // purposes.
+ return M->Parent;
}
llvm_unreachable("unknown module kind");
@@ -1532,10 +1560,16 @@ void NamedDecl::printQualifiedName(raw_ostream &OS,
const PrintingPolicy &P) const {
const DeclContext *Ctx = getDeclContext();
- // For ObjC methods, look through categories and use the interface as context.
+ // For ObjC methods and properties, look through categories and use the
+ // interface as context.
if (auto *MD = dyn_cast<ObjCMethodDecl>(this))
if (auto *ID = MD->getClassInterface())
Ctx = ID;
+ if (auto *PD = dyn_cast<ObjCPropertyDecl>(this)) {
+ if (auto *MD = PD->getGetterMethodDecl())
+ if (auto *ID = MD->getClassInterface())
+ Ctx = ID;
+ }
if (Ctx->isFunctionOrMethod()) {
printName(OS);
@@ -2211,12 +2245,16 @@ void VarDecl::setInit(Expr *I) {
Init = I;
}
-bool VarDecl::isUsableInConstantExpressions(ASTContext &C) const {
+bool VarDecl::mightBeUsableInConstantExpressions(ASTContext &C) const {
const LangOptions &Lang = C.getLangOpts();
if (!Lang.CPlusPlus)
return false;
+ // Function parameters are never usable in constant expressions.
+ if (isa<ParmVarDecl>(this))
+ return false;
+
// In C++11, any variable of reference type can be used in a constant
// expression if it is initialized by a constant expression.
if (Lang.CPlusPlus11 && getType()->isReferenceType())
@@ -2238,6 +2276,22 @@ bool VarDecl::isUsableInConstantExpressions(ASTContext &C) const {
return Lang.CPlusPlus11 && isConstexpr();
}
+bool VarDecl::isUsableInConstantExpressions(ASTContext &Context) const {
+ // C++2a [expr.const]p3:
+ // A variable is usable in constant expressions after its initializing
+ // declaration is encountered...
+ const VarDecl *DefVD = nullptr;
+ const Expr *Init = getAnyInitializer(DefVD);
+ if (!Init || Init->isValueDependent() || getType()->isDependentType())
+ return false;
+ // ... if it is a constexpr variable, or it is of reference type or of
+ // const-qualified integral or enumeration type, ...
+ if (!DefVD->mightBeUsableInConstantExpressions(Context))
+ return false;
+ // ... and its initializer is a constant initializer.
+ return DefVD->checkInitIsICE();
+}
+
/// Convert the initializer for this declaration to the elaborated EvaluatedStmt
/// form, which contains extra information on the evaluated value of the
/// initializer.
@@ -2268,7 +2322,7 @@ APValue *VarDecl::evaluateValue(
// first time it is evaluated. FIXME: The notes won't always be emitted the
// first time we try evaluation, so might not be produced at all.
if (Eval->WasEvaluated)
- return Eval->Evaluated.isUninit() ? nullptr : &Eval->Evaluated;
+ return Eval->Evaluated.isAbsent() ? nullptr : &Eval->Evaluated;
const auto *Init = cast<Expr>(Eval->Value);
assert(!Init->isValueDependent());
@@ -2363,6 +2417,10 @@ bool VarDecl::checkInitIsICE() const {
return Eval->IsICE;
}
+bool VarDecl::isParameterPack() const {
+ return isa<PackExpansionType>(getType());
+}
+
template<typename DeclT>
static DeclT *getDefinitionOrSelf(DeclT *D) {
assert(D);
@@ -2380,48 +2438,61 @@ bool VarDecl::isNonEscapingByref() const {
}
VarDecl *VarDecl::getTemplateInstantiationPattern() const {
- // If it's a variable template specialization, find the template or partial
- // specialization from which it was instantiated.
- if (auto *VDTemplSpec = dyn_cast<VarTemplateSpecializationDecl>(this)) {
- auto From = VDTemplSpec->getInstantiatedFrom();
- if (auto *VTD = From.dyn_cast<VarTemplateDecl *>()) {
- while (auto *NewVTD = VTD->getInstantiatedFromMemberTemplate()) {
- if (NewVTD->isMemberSpecialization())
- break;
- VTD = NewVTD;
- }
- return getDefinitionOrSelf(VTD->getTemplatedDecl());
- }
- if (auto *VTPSD =
- From.dyn_cast<VarTemplatePartialSpecializationDecl *>()) {
- while (auto *NewVTPSD = VTPSD->getInstantiatedFromMember()) {
- if (NewVTPSD->isMemberSpecialization())
- break;
- VTPSD = NewVTPSD;
- }
- return getDefinitionOrSelf<VarDecl>(VTPSD);
- }
- }
+ const VarDecl *VD = this;
- if (MemberSpecializationInfo *MSInfo = getMemberSpecializationInfo()) {
+ // If this is an instantiated member, walk back to the template from which
+ // it was instantiated.
+ if (MemberSpecializationInfo *MSInfo = VD->getMemberSpecializationInfo()) {
if (isTemplateInstantiation(MSInfo->getTemplateSpecializationKind())) {
- VarDecl *VD = getInstantiatedFromStaticDataMember();
+ VD = VD->getInstantiatedFromStaticDataMember();
while (auto *NewVD = VD->getInstantiatedFromStaticDataMember())
VD = NewVD;
- return getDefinitionOrSelf(VD);
}
}
- if (VarTemplateDecl *VarTemplate = getDescribedVarTemplate()) {
- while (VarTemplate->getInstantiatedFromMemberTemplate()) {
- if (VarTemplate->isMemberSpecialization())
+ // If it's an instantiated variable template specialization, find the
+ // template or partial specialization from which it was instantiated.
+ if (auto *VDTemplSpec = dyn_cast<VarTemplateSpecializationDecl>(VD)) {
+ if (isTemplateInstantiation(VDTemplSpec->getTemplateSpecializationKind())) {
+ auto From = VDTemplSpec->getInstantiatedFrom();
+ if (auto *VTD = From.dyn_cast<VarTemplateDecl *>()) {
+ while (!VTD->isMemberSpecialization()) {
+ auto *NewVTD = VTD->getInstantiatedFromMemberTemplate();
+ if (!NewVTD)
+ break;
+ VTD = NewVTD;
+ }
+ return getDefinitionOrSelf(VTD->getTemplatedDecl());
+ }
+ if (auto *VTPSD =
+ From.dyn_cast<VarTemplatePartialSpecializationDecl *>()) {
+ while (!VTPSD->isMemberSpecialization()) {
+ auto *NewVTPSD = VTPSD->getInstantiatedFromMember();
+ if (!NewVTPSD)
+ break;
+ VTPSD = NewVTPSD;
+ }
+ return getDefinitionOrSelf<VarDecl>(VTPSD);
+ }
+ }
+ }
+
+ // If this is the pattern of a variable template, find where it was
+ // instantiated from. FIXME: Is this necessary?
+ if (VarTemplateDecl *VarTemplate = VD->getDescribedVarTemplate()) {
+ while (!VarTemplate->isMemberSpecialization()) {
+ auto *NewVT = VarTemplate->getInstantiatedFromMemberTemplate();
+ if (!NewVT)
break;
- VarTemplate = VarTemplate->getInstantiatedFromMemberTemplate();
+ VarTemplate = NewVT;
}
return getDefinitionOrSelf(VarTemplate->getTemplatedDecl());
}
- return nullptr;
+
+ if (VD == this)
+ return nullptr;
+ return getDefinitionOrSelf(const_cast<VarDecl*>(VD));
}
VarDecl *VarDecl::getInstantiatedFromStaticDataMember() const {
@@ -2441,6 +2512,17 @@ TemplateSpecializationKind VarDecl::getTemplateSpecializationKind() const {
return TSK_Undeclared;
}
+TemplateSpecializationKind
+VarDecl::getTemplateSpecializationKindForInstantiation() const {
+ if (MemberSpecializationInfo *MSI = getMemberSpecializationInfo())
+ return MSI->getTemplateSpecializationKind();
+
+ if (const auto *Spec = dyn_cast<VarTemplateSpecializationDecl>(this))
+ return Spec->getSpecializationKind();
+
+ return TSK_Undeclared;
+}
+
SourceLocation VarDecl::getPointOfInstantiation() const {
if (const auto *Spec = dyn_cast<VarTemplateSpecializationDecl>(this))
return Spec->getPointOfInstantiation();
@@ -2501,15 +2583,14 @@ void VarDecl::setTemplateSpecializationKind(TemplateSpecializationKind TSK,
if (VarTemplateSpecializationDecl *Spec =
dyn_cast<VarTemplateSpecializationDecl>(this)) {
Spec->setSpecializationKind(TSK);
- if (TSK != TSK_ExplicitSpecialization && PointOfInstantiation.isValid() &&
+ if (TSK != TSK_ExplicitSpecialization &&
+ PointOfInstantiation.isValid() &&
Spec->getPointOfInstantiation().isInvalid()) {
Spec->setPointOfInstantiation(PointOfInstantiation);
if (ASTMutationListener *L = getASTContext().getASTMutationListener())
L->InstantiationRequested(this);
}
- }
-
- if (MemberSpecializationInfo *MSI = getMemberSpecializationInfo()) {
+ } else if (MemberSpecializationInfo *MSI = getMemberSpecializationInfo()) {
MSI->setTemplateSpecializationKind(TSK);
if (TSK != TSK_ExplicitSpecialization && PointOfInstantiation.isValid() &&
MSI->getPointOfInstantiation().isInvalid()) {
@@ -2626,10 +2707,6 @@ bool ParmVarDecl::hasDefaultArg() const {
!Init.isNull();
}
-bool ParmVarDecl::isParameterPack() const {
- return isa<PackExpansionType>(getType());
-}
-
void ParmVarDecl::setParameterIndexLarge(unsigned parameterIndex) {
getASTContext().setParameterIndex(this, parameterIndex);
ParmVarDeclBits.ParameterIndex = ParameterIndexSentinel;
@@ -2647,7 +2724,8 @@ FunctionDecl::FunctionDecl(Kind DK, ASTContext &C, DeclContext *DC,
SourceLocation StartLoc,
const DeclarationNameInfo &NameInfo, QualType T,
TypeSourceInfo *TInfo, StorageClass S,
- bool isInlineSpecified, bool isConstexprSpecified)
+ bool isInlineSpecified,
+ ConstexprSpecKind ConstexprKind)
: DeclaratorDecl(DK, DC, NameInfo.getLoc(), NameInfo.getName(), T, TInfo,
StartLoc),
DeclContext(DK), redeclarable_base(C), ODRHash(0),
@@ -2656,7 +2734,6 @@ FunctionDecl::FunctionDecl(Kind DK, ASTContext &C, DeclContext *DC,
FunctionDeclBits.SClass = S;
FunctionDeclBits.IsInline = isInlineSpecified;
FunctionDeclBits.IsInlineSpecified = isInlineSpecified;
- FunctionDeclBits.IsExplicitSpecified = false;
FunctionDeclBits.IsVirtualAsWritten = false;
FunctionDeclBits.IsPure = false;
FunctionDeclBits.HasInheritedPrototype = false;
@@ -2668,7 +2745,7 @@ FunctionDecl::FunctionDecl(Kind DK, ASTContext &C, DeclContext *DC,
FunctionDeclBits.IsExplicitlyDefaulted = false;
FunctionDeclBits.HasImplicitReturnZero = false;
FunctionDeclBits.IsLateTemplateParsed = false;
- FunctionDeclBits.IsConstexpr = isConstexprSpecified;
+ FunctionDeclBits.ConstexprKind = ConstexprKind;
FunctionDeclBits.InstantiationIsPending = false;
FunctionDeclBits.UsesSEHTry = false;
FunctionDeclBits.HasSkippedBody = false;
@@ -2904,6 +2981,8 @@ bool FunctionDecl::isExternC() const {
}
bool FunctionDecl::isInExternCContext() const {
+ if (hasAttr<OpenCLKernelAttr>())
+ return true;
return getLexicalDeclContext()->isExternCContext();
}
@@ -2982,16 +3061,20 @@ FunctionDecl::setPreviousDeclaration(FunctionDecl *PrevDecl) {
FunctionDecl *FunctionDecl::getCanonicalDecl() { return getFirstDecl(); }
-/// Returns a value indicating whether this function
-/// corresponds to a builtin function.
+/// Returns a value indicating whether this function corresponds to a builtin
+/// function.
///
-/// The function corresponds to a built-in function if it is
-/// declared at translation scope or within an extern "C" block and
-/// its name matches with the name of a builtin. The returned value
-/// will be 0 for functions that do not correspond to a builtin, a
-/// value of type \c Builtin::ID if in the target-independent range
-/// \c [1,Builtin::First), or a target-specific builtin value.
-unsigned FunctionDecl::getBuiltinID() const {
+/// The function corresponds to a built-in function if it is declared at
+/// translation scope or within an extern "C" block and its name matches with
+/// the name of a builtin. The returned value will be 0 for functions that do
+/// not correspond to a builtin, a value of type \c Builtin::ID if in the
+/// target-independent range \c [1,Builtin::First), or a target-specific builtin
+/// value.
+///
+/// \param ConsiderWrapperFunctions If true, we should consider wrapper
+/// functions as their wrapped builtins. This shouldn't be done in general, but
+/// it's useful in Sema to diagnose calls to wrappers based on their semantics.
+unsigned FunctionDecl::getBuiltinID(bool ConsiderWrapperFunctions) const {
if (!getIdentifier())
return 0;
@@ -3019,7 +3102,7 @@ unsigned FunctionDecl::getBuiltinID() const {
// If the function is marked "overloadable", it has a different mangled name
// and is not the C library function.
- if (hasAttr<OverloadableAttr>())
+ if (!ConsiderWrapperFunctions && hasAttr<OverloadableAttr>())
return 0;
if (!Context.BuiltinInfo.isPredefinedLibFunction(BuiltinID))
@@ -3030,7 +3113,7 @@ unsigned FunctionDecl::getBuiltinID() const {
// function or whether it just has the same name.
// If this is a static function, it's not a builtin.
- if (getStorageClass() == SC_Static)
+ if (!ConsiderWrapperFunctions && getStorageClass() == SC_Static)
return 0;
// OpenCL v1.2 s6.9.f - The library functions defined in
@@ -3337,7 +3420,13 @@ FunctionDecl *FunctionDecl::getInstantiatedFromMemberFunction() const {
}
MemberSpecializationInfo *FunctionDecl::getMemberSpecializationInfo() const {
- return TemplateOrSpecialization.dyn_cast<MemberSpecializationInfo *>();
+ if (auto *MSI =
+ TemplateOrSpecialization.dyn_cast<MemberSpecializationInfo *>())
+ return MSI;
+ if (auto *FTSI = TemplateOrSpecialization
+ .dyn_cast<FunctionTemplateSpecializationInfo *>())
+ return FTSI->getMemberSpecializationInfo();
+ return nullptr;
}
void
@@ -3356,6 +3445,8 @@ FunctionTemplateDecl *FunctionDecl::getDescribedFunctionTemplate() const {
}
void FunctionDecl::setDescribedFunctionTemplate(FunctionTemplateDecl *Template) {
+ assert(TemplateOrSpecialization.isNull() &&
+ "Member function is already a specialization");
TemplateOrSpecialization = Template;
}
@@ -3364,19 +3455,15 @@ bool FunctionDecl::isImplicitlyInstantiable() const {
if (isInvalidDecl())
return false;
- switch (getTemplateSpecializationKind()) {
+ switch (getTemplateSpecializationKindForInstantiation()) {
case TSK_Undeclared:
case TSK_ExplicitInstantiationDefinition:
+ case TSK_ExplicitSpecialization:
return false;
case TSK_ImplicitInstantiation:
return true;
- // It is possible to instantiate TSK_ExplicitSpecialization kind
- // if the FunctionDecl has a class scope specialization pattern.
- case TSK_ExplicitSpecialization:
- return getClassScopeSpecializationPattern() != nullptr;
-
case TSK_ExplicitInstantiationDeclaration:
// Handled below.
break;
@@ -3399,26 +3486,12 @@ bool FunctionDecl::isImplicitlyInstantiable() const {
}
bool FunctionDecl::isTemplateInstantiation() const {
- switch (getTemplateSpecializationKind()) {
- case TSK_Undeclared:
- case TSK_ExplicitSpecialization:
- return false;
- case TSK_ImplicitInstantiation:
- case TSK_ExplicitInstantiationDeclaration:
- case TSK_ExplicitInstantiationDefinition:
- return true;
- }
- llvm_unreachable("All TSK values handled.");
+ // FIXME: Remove this, it's not clear what it means. (Which template
+ // specialization kind?)
+ return clang::isTemplateInstantiation(getTemplateSpecializationKind());
}
FunctionDecl *FunctionDecl::getTemplateInstantiationPattern() const {
- // Handle class scope explicit specialization special case.
- if (getTemplateSpecializationKind() == TSK_ExplicitSpecialization) {
- if (auto *Spec = getClassScopeSpecializationPattern())
- return getDefinitionOrSelf(Spec);
- return nullptr;
- }
-
// If this is a generic lambda call operator specialization, its
// instantiation pattern is always its primary template's pattern
// even if its primary template was instantiated from another
@@ -3434,21 +3507,28 @@ FunctionDecl *FunctionDecl::getTemplateInstantiationPattern() const {
return getDefinitionOrSelf(getPrimaryTemplate()->getTemplatedDecl());
}
+ if (MemberSpecializationInfo *Info = getMemberSpecializationInfo()) {
+ if (!clang::isTemplateInstantiation(Info->getTemplateSpecializationKind()))
+ return nullptr;
+ return getDefinitionOrSelf(cast<FunctionDecl>(Info->getInstantiatedFrom()));
+ }
+
+ if (!clang::isTemplateInstantiation(getTemplateSpecializationKind()))
+ return nullptr;
+
if (FunctionTemplateDecl *Primary = getPrimaryTemplate()) {
- while (Primary->getInstantiatedFromMemberTemplate()) {
- // If we have hit a point where the user provided a specialization of
- // this template, we're done looking.
- if (Primary->isMemberSpecialization())
+ // If we hit a point where the user provided a specialization of this
+ // template, we're done looking.
+ while (!Primary->isMemberSpecialization()) {
+ auto *NewPrimary = Primary->getInstantiatedFromMemberTemplate();
+ if (!NewPrimary)
break;
- Primary = Primary->getInstantiatedFromMemberTemplate();
+ Primary = NewPrimary;
}
return getDefinitionOrSelf(Primary->getTemplatedDecl());
}
- if (auto *MFD = getInstantiatedFromMemberFunction())
- return getDefinitionOrSelf(MFD);
-
return nullptr;
}
@@ -3456,15 +3536,11 @@ FunctionTemplateDecl *FunctionDecl::getPrimaryTemplate() const {
if (FunctionTemplateSpecializationInfo *Info
= TemplateOrSpecialization
.dyn_cast<FunctionTemplateSpecializationInfo*>()) {
- return Info->Template.getPointer();
+ return Info->getTemplate();
}
return nullptr;
}
-FunctionDecl *FunctionDecl::getClassScopeSpecializationPattern() const {
- return getASTContext().getClassScopeSpecializationPattern(this);
-}
-
FunctionTemplateSpecializationInfo *
FunctionDecl::getTemplateSpecializationInfo() const {
return TemplateOrSpecialization
@@ -3499,15 +3575,19 @@ FunctionDecl::setFunctionTemplateSpecialization(ASTContext &C,
TemplateSpecializationKind TSK,
const TemplateArgumentListInfo *TemplateArgsAsWritten,
SourceLocation PointOfInstantiation) {
+ assert((TemplateOrSpecialization.isNull() ||
+ TemplateOrSpecialization.is<MemberSpecializationInfo *>()) &&
+ "Member function is already a specialization");
assert(TSK != TSK_Undeclared &&
"Must specify the type of function template specialization");
- FunctionTemplateSpecializationInfo *Info
- = TemplateOrSpecialization.dyn_cast<FunctionTemplateSpecializationInfo*>();
- if (!Info)
- Info = FunctionTemplateSpecializationInfo::Create(C, this, Template, TSK,
- TemplateArgs,
- TemplateArgsAsWritten,
- PointOfInstantiation);
+ assert((TemplateOrSpecialization.isNull() ||
+ TSK == TSK_ExplicitSpecialization) &&
+ "Member specialization must be an explicit specialization");
+ FunctionTemplateSpecializationInfo *Info =
+ FunctionTemplateSpecializationInfo::Create(
+ C, this, Template, TSK, TemplateArgs, TemplateArgsAsWritten,
+ PointOfInstantiation,
+ TemplateOrSpecialization.dyn_cast<MemberSpecializationInfo *>());
TemplateOrSpecialization = Info;
Template->addSpecialization(Info, InsertPos);
}
@@ -3558,14 +3638,47 @@ DependentFunctionTemplateSpecializationInfo(const UnresolvedSetImpl &Ts,
TemplateSpecializationKind FunctionDecl::getTemplateSpecializationKind() const {
// For a function template specialization, query the specialization
// information object.
- FunctionTemplateSpecializationInfo *FTSInfo
- = TemplateOrSpecialization.dyn_cast<FunctionTemplateSpecializationInfo*>();
- if (FTSInfo)
+ if (FunctionTemplateSpecializationInfo *FTSInfo =
+ TemplateOrSpecialization
+ .dyn_cast<FunctionTemplateSpecializationInfo *>())
+ return FTSInfo->getTemplateSpecializationKind();
+
+ if (MemberSpecializationInfo *MSInfo =
+ TemplateOrSpecialization.dyn_cast<MemberSpecializationInfo *>())
+ return MSInfo->getTemplateSpecializationKind();
+
+ return TSK_Undeclared;
+}
+
+TemplateSpecializationKind
+FunctionDecl::getTemplateSpecializationKindForInstantiation() const {
+ // This is the same as getTemplateSpecializationKind(), except that for a
+ // function that is both a function template specialization and a member
+ // specialization, we prefer the member specialization information. Eg:
+ //
+ // template<typename T> struct A {
+ // template<typename U> void f() {}
+ // template<> void f<int>() {}
+ // };
+ //
+ // For A<int>::f<int>():
+ // * getTemplateSpecializationKind() will return TSK_ExplicitSpecialization
+ // * getTemplateSpecializationKindForInstantiation() will return
+ // TSK_ImplicitInstantiation
+ //
+ // This reflects the facts that A<int>::f<int> is an explicit specialization
+ // of A<int>::f, and that A<int>::f<int> should be implicitly instantiated
+ // from A::f<int> if a definition is needed.
+ if (FunctionTemplateSpecializationInfo *FTSInfo =
+ TemplateOrSpecialization
+ .dyn_cast<FunctionTemplateSpecializationInfo *>()) {
+ if (auto *MSInfo = FTSInfo->getMemberSpecializationInfo())
+ return MSInfo->getTemplateSpecializationKind();
return FTSInfo->getTemplateSpecializationKind();
+ }
- MemberSpecializationInfo *MSInfo
- = TemplateOrSpecialization.dyn_cast<MemberSpecializationInfo*>();
- if (MSInfo)
+ if (MemberSpecializationInfo *MSInfo =
+ TemplateOrSpecialization.dyn_cast<MemberSpecializationInfo *>())
return MSInfo->getTemplateSpecializationKind();
return TSK_Undeclared;
@@ -3673,6 +3786,10 @@ unsigned FunctionDecl::getMemoryFunctionKind() const {
case Builtin::BImemcmp:
return Builtin::BImemcmp;
+ case Builtin::BI__builtin_bcmp:
+ case Builtin::BIbcmp:
+ return Builtin::BIbcmp;
+
case Builtin::BI__builtin_strncpy:
case Builtin::BI__builtin___strncpy_chk:
case Builtin::BIstrncpy:
@@ -3713,6 +3830,8 @@ unsigned FunctionDecl::getMemoryFunctionKind() const {
return Builtin::BImemmove;
else if (FnInfo->isStr("memcmp"))
return Builtin::BImemcmp;
+ else if (FnInfo->isStr("bcmp"))
+ return Builtin::BIbcmp;
else if (FnInfo->isStr("strncpy"))
return Builtin::BIstrncpy;
else if (FnInfo->isStr("strncmp"))
@@ -3794,6 +3913,39 @@ bool FieldDecl::isZeroLengthBitField(const ASTContext &Ctx) const {
getBitWidthValue(Ctx) == 0;
}
+bool FieldDecl::isZeroSize(const ASTContext &Ctx) const {
+ if (isZeroLengthBitField(Ctx))
+ return true;
+
+ // C++2a [intro.object]p7:
+ // An object has nonzero size if it
+ // -- is not a potentially-overlapping subobject, or
+ if (!hasAttr<NoUniqueAddressAttr>())
+ return false;
+
+ // -- is not of class type, or
+ const auto *RT = getType()->getAs<RecordType>();
+ if (!RT)
+ return false;
+ const RecordDecl *RD = RT->getDecl()->getDefinition();
+ if (!RD) {
+ assert(isInvalidDecl() && "valid field has incomplete type");
+ return false;
+ }
+
+ // -- [has] virtual member functions or virtual base classes, or
+ // -- has subobjects of nonzero size or bit-fields of nonzero length
+ const auto *CXXRD = cast<CXXRecordDecl>(RD);
+ if (!CXXRD->isEmpty())
+ return false;
+
+ // Otherwise, [...] the circumstances under which the object has zero size
+ // are implementation-defined.
+ // FIXME: This might be Itanium ABI specific; we don't yet know what the MS
+ // ABI will do.
+ return true;
+}
+
unsigned FieldDecl::getFieldIndex() const {
const FieldDecl *Canonical = getCanonicalDecl();
if (Canonical != this)
@@ -4100,6 +4252,9 @@ RecordDecl::RecordDecl(Kind DK, TagKind TK, const ASTContext &C,
setNonTrivialToPrimitiveDefaultInitialize(false);
setNonTrivialToPrimitiveCopy(false);
setNonTrivialToPrimitiveDestroy(false);
+ setHasNonTrivialToPrimitiveDefaultInitializeCUnion(false);
+ setHasNonTrivialToPrimitiveDestructCUnion(false);
+ setHasNonTrivialToPrimitiveCopyCUnion(false);
setParamDestroyedInCallee(false);
setArgPassingRestrictions(APK_CanPassInRegs);
}
@@ -4260,6 +4415,7 @@ BlockDecl::BlockDecl(DeclContext *DC, SourceLocation CaretLoc)
setBlockMissingReturnType(true);
setIsConversionFromLambda(false);
setDoesNotEscape(false);
+ setCanAvoidCopyToHeap(false);
}
void BlockDecl::setParams(ArrayRef<ParmVarDecl *> NewParamInfo) {
@@ -4422,13 +4578,12 @@ FunctionDecl *FunctionDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation StartLoc,
const DeclarationNameInfo &NameInfo,
QualType T, TypeSourceInfo *TInfo,
- StorageClass SC,
- bool isInlineSpecified,
+ StorageClass SC, bool isInlineSpecified,
bool hasWrittenPrototype,
- bool isConstexprSpecified) {
+ ConstexprSpecKind ConstexprKind) {
FunctionDecl *New =
new (C, DC) FunctionDecl(Function, C, DC, StartLoc, NameInfo, T, TInfo,
- SC, isInlineSpecified, isConstexprSpecified);
+ SC, isInlineSpecified, ConstexprKind);
New->setHasWrittenPrototype(hasWrittenPrototype);
return New;
}
@@ -4436,7 +4591,7 @@ FunctionDecl *FunctionDecl::Create(ASTContext &C, DeclContext *DC,
FunctionDecl *FunctionDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
return new (C, ID) FunctionDecl(Function, C, nullptr, SourceLocation(),
DeclarationNameInfo(), QualType(), nullptr,
- SC_None, false, false);
+ SC_None, false, CSK_unspecified);
}
BlockDecl *BlockDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation L) {
diff --git a/lib/AST/DeclBase.cpp b/lib/AST/DeclBase.cpp
index b83082e9eb08..fd80e1532eb5 100644
--- a/lib/AST/DeclBase.cpp
+++ b/lib/AST/DeclBase.cpp
@@ -1,9 +1,8 @@
//===- DeclBase.cpp - Declaration AST Node Implementation -----------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -209,8 +208,8 @@ bool Decl::isTemplateParameterPack() const {
}
bool Decl::isParameterPack() const {
- if (const auto *Parm = dyn_cast<ParmVarDecl>(this))
- return Parm->isParameterPack();
+ if (const auto *Var = dyn_cast<VarDecl>(this))
+ return Var->isParameterPack();
return isTemplateParameterPack();
}
@@ -355,7 +354,8 @@ bool Decl::isInAnonymousNamespace() const {
}
bool Decl::isInStdNamespace() const {
- return getDeclContext()->isStdNamespace();
+ const DeclContext *DC = getDeclContext();
+ return DC && DC->isStdNamespace();
}
TranslationUnitDecl *Decl::getTranslationUnitDecl() {
@@ -431,22 +431,6 @@ bool Decl::isReferenced() const {
return false;
}
-bool Decl::isExported() const {
- if (isModulePrivate())
- return false;
- // Namespaces are always exported.
- if (isa<TranslationUnitDecl>(this) || isa<NamespaceDecl>(this))
- return true;
- // Otherwise, this is a strictly lexical check.
- for (auto *DC = getLexicalDeclContext(); DC; DC = DC->getLexicalParent()) {
- if (cast<Decl>(DC)->isModulePrivate())
- return false;
- if (isa<ExportDecl>(DC))
- return true;
- }
- return false;
-}
-
ExternalSourceSymbolAttr *Decl::getExternalSourceSymbolAttr() const {
const Decl *Definition = nullptr;
if (auto *ID = dyn_cast<ObjCInterfaceDecl>(this)) {
@@ -726,6 +710,7 @@ unsigned Decl::getIdentifierNamespaceForKind(Kind DeclKind) {
case Binding:
case NonTypeTemplateParm:
case VarTemplate:
+ case Concept:
// These (C++-only) declarations are found by redeclaration lookup for
// tag types, so we include them in the tag namespace.
return IDNS_Ordinary | IDNS_Tag;
@@ -781,6 +766,9 @@ unsigned Decl::getIdentifierNamespaceForKind(Kind DeclKind) {
case OMPDeclareReduction:
return IDNS_OMPReduction;
+ case OMPDeclareMapper:
+ return IDNS_OMPMapper;
+
// Never have names.
case Friend:
case FriendTemplate:
@@ -810,6 +798,7 @@ unsigned Decl::getIdentifierNamespaceForKind(Kind DeclKind) {
case ObjCCategoryImpl:
case Import:
case OMPThreadPrivate:
+ case OMPAllocate:
case OMPRequires:
case OMPCapturedExpr:
case Empty:
@@ -932,6 +921,7 @@ bool Decl::AccessDeclContextSanity() const {
if (isa<TranslationUnitDecl>(this) ||
isa<TemplateTypeParmDecl>(this) ||
isa<NonTypeTemplateParmDecl>(this) ||
+ !getDeclContext() ||
!isa<CXXRecordDecl>(getDeclContext()) ||
isInvalidDecl() ||
isa<StaticAssertDecl>(this) ||
@@ -969,6 +959,8 @@ const FunctionType *Decl::getFunctionType(bool BlocksToo) const {
if (Ty->isFunctionPointerType())
Ty = Ty->getAs<PointerType>()->getPointeeType();
+ else if (Ty->isFunctionReferenceType())
+ Ty = Ty->getAs<ReferenceType>()->getPointeeType();
else if (BlocksToo && Ty->isBlockPointerType())
Ty = Ty->getAs<BlockPointerType>()->getPointeeType();
@@ -1054,6 +1046,18 @@ DeclContext *DeclContext::getLookupParent() {
return getParent();
}
+const BlockDecl *DeclContext::getInnermostBlockDecl() const {
+ const DeclContext *Ctx = this;
+
+ do {
+ if (Ctx->isClosure())
+ return cast<BlockDecl>(Ctx);
+ Ctx = Ctx->getParent();
+ } while (Ctx);
+
+ return nullptr;
+}
+
bool DeclContext::isInlineNamespace() const {
return isNamespace() &&
cast<NamespaceDecl>(this)->isInline();
@@ -1164,6 +1168,7 @@ DeclContext *DeclContext::getPrimaryContext() {
case Decl::Block:
case Decl::Captured:
case Decl::OMPDeclareReduction:
+ case Decl::OMPDeclareMapper:
// There is only one DeclContext for these entities.
return this;
@@ -1175,13 +1180,15 @@ DeclContext *DeclContext::getPrimaryContext() {
return this;
case Decl::ObjCInterface:
- if (auto *Def = cast<ObjCInterfaceDecl>(this)->getDefinition())
- return Def;
+ if (auto *OID = dyn_cast<ObjCInterfaceDecl>(this))
+ if (auto *Def = OID->getDefinition())
+ return Def;
return this;
case Decl::ObjCProtocol:
- if (auto *Def = cast<ObjCProtocolDecl>(this)->getDefinition())
- return Def;
+ if (auto *OPD = dyn_cast<ObjCProtocolDecl>(this))
+ if (auto *Def = OPD->getDefinition())
+ return Def;
return this;
case Decl::ObjCCategory:
diff --git a/lib/AST/DeclCXX.cpp b/lib/AST/DeclCXX.cpp
index 31ffeb0dcd1e..59710a55498f 100644
--- a/lib/AST/DeclCXX.cpp
+++ b/lib/AST/DeclCXX.cpp
@@ -1,9 +1,8 @@
//===- DeclCXX.cpp - C++ Declaration AST Node Implementation --------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -606,14 +605,19 @@ bool CXXRecordDecl::hasSubobjectAtOffsetZeroOfEmptyBaseType(
// that sure looks like a wording bug.
// -- If X is a non-union class type with a non-static data member
- // [recurse to] the first non-static data member of X
+ // [recurse to each field] that is either of zero size or is the
+ // first non-static data member of X
// -- If X is a union type, [recurse to union members]
+ bool IsFirstField = true;
for (auto *FD : X->fields()) {
// FIXME: Should we really care about the type of the first non-static
// data member of a non-union if there are preceding unnamed bit-fields?
if (FD->isUnnamedBitfield())
continue;
+ if (!IsFirstField && !FD->isZeroSize(Ctx))
+ continue;
+
// -- If X is n array type, [visit the element type]
QualType T = Ctx.getBaseElementType(FD->getType());
if (auto *RD = T->getAsCXXRecordDecl())
@@ -621,7 +625,7 @@ bool CXXRecordDecl::hasSubobjectAtOffsetZeroOfEmptyBaseType(
return true;
if (!X->isUnion())
- break;
+ IsFirstField = false;
}
}
@@ -641,7 +645,8 @@ bool CXXRecordDecl::lambdaIsDefaultConstructibleAndAssignable() const {
// C++17 [expr.prim.lambda]p21:
// The closure type associated with a lambda-expression has no default
// constructor and a deleted copy assignment operator.
- if (getLambdaCaptureDefault() != LCD_None)
+ if (getLambdaCaptureDefault() != LCD_None ||
+ getLambdaData().NumCaptures != 0)
return false;
return getASTContext().getLangOpts().CPlusPlus2a;
}
@@ -992,6 +997,17 @@ void CXXRecordDecl::addedMember(Decl *D) {
setArgPassingRestrictions(RecordDecl::APK_CanNeverPassInRegs);
Data.HasIrrelevantDestructor = false;
+
+ if (isUnion()) {
+ data().DefaultedCopyConstructorIsDeleted = true;
+ data().DefaultedMoveConstructorIsDeleted = true;
+ data().DefaultedMoveAssignmentIsDeleted = true;
+ data().DefaultedDestructorIsDeleted = true;
+ data().NeedOverloadResolutionForCopyConstructor = true;
+ data().NeedOverloadResolutionForMoveConstructor = true;
+ data().NeedOverloadResolutionForMoveAssignment = true;
+ data().NeedOverloadResolutionForDestructor = true;
+ }
} else if (!Context.getLangOpts().ObjCAutoRefCount) {
setHasObjectMember(true);
}
@@ -1058,6 +1074,10 @@ void CXXRecordDecl::addedMember(Decl *D) {
if (T->isReferenceType())
data().DefaultedMoveAssignmentIsDeleted = true;
+ // Bitfields of length 0 are also zero-sized, but we already bailed out for
+ // those because they are always unnamed.
+ bool IsZeroSize = Field->isZeroSize(Context);
+
if (const auto *RecordTy = T->getAs<RecordType>()) {
auto *FieldRec = cast<CXXRecordDecl>(RecordTy->getDecl());
if (FieldRec->getDefinition()) {
@@ -1173,7 +1193,8 @@ void CXXRecordDecl::addedMember(Decl *D) {
// A standard-layout class is a class that:
// [...]
// -- has no element of the set M(S) of types as a base class.
- if (data().IsStandardLayout && (isUnion() || IsFirstField) &&
+ if (data().IsStandardLayout &&
+ (isUnion() || IsFirstField || IsZeroSize) &&
hasSubobjectAtOffsetZeroOfEmptyBaseType(Context, FieldRec))
data().IsStandardLayout = false;
@@ -1255,8 +1276,10 @@ void CXXRecordDecl::addedMember(Decl *D) {
}
// C++14 [meta.unary.prop]p4:
- // T is a class type [...] with [...] no non-static data members
- data().Empty = false;
+ // T is a class type [...] with [...] no non-static data members other
+ // than subobjects of zero size
+ if (data().Empty && !IsZeroSize)
+ data().Empty = false;
}
// Handle using declarations of conversion functions.
@@ -1411,13 +1434,28 @@ void CXXRecordDecl::getCaptureFields(
TemplateParameterList *
CXXRecordDecl::getGenericLambdaTemplateParameterList() const {
- if (!isLambda()) return nullptr;
+ if (!isGenericLambda()) return nullptr;
CXXMethodDecl *CallOp = getLambdaCallOperator();
if (FunctionTemplateDecl *Tmpl = CallOp->getDescribedFunctionTemplate())
return Tmpl->getTemplateParameters();
return nullptr;
}
+ArrayRef<NamedDecl *>
+CXXRecordDecl::getLambdaExplicitTemplateParameters() const {
+ TemplateParameterList *List = getGenericLambdaTemplateParameterList();
+ if (!List)
+ return {};
+
+ assert(std::is_partitioned(List->begin(), List->end(),
+ [](const NamedDecl *D) { return !D->isImplicit(); })
+ && "Explicit template params should be ordered before implicit ones");
+
+ const auto ExplicitEnd = llvm::partition_point(
+ *List, [](const NamedDecl *D) { return !D->isImplicit(); });
+ return llvm::makeArrayRef(List->begin(), ExplicitEnd);
+}
+
Decl *CXXRecordDecl::getLambdaContextDecl() const {
assert(isLambda() && "Not a lambda closure type!");
ExternalASTSource *Source = getParentASTContext().getExternalSource();
@@ -1586,8 +1624,8 @@ void CXXRecordDecl::removeConversion(const NamedDecl *ConvDecl) {
for (unsigned I = 0, E = Convs.size(); I != E; ++I) {
if (Convs[I].getDecl() == ConvDecl) {
Convs.erase(I);
- assert(std::find(Convs.begin(), Convs.end(), ConvDecl) == Convs.end()
- && "conversion was found multiple times in unresolved set");
+ assert(llvm::find(Convs, ConvDecl) == Convs.end() &&
+ "conversion was found multiple times in unresolved set");
return;
}
}
@@ -1852,19 +1890,47 @@ bool CXXRecordDecl::mayBeAbstract() const {
void CXXDeductionGuideDecl::anchor() {}
+bool ExplicitSpecifier::isEquivalent(const ExplicitSpecifier Other) const {
+ if ((getKind() != Other.getKind() ||
+ getKind() == ExplicitSpecKind::Unresolved)) {
+ if (getKind() == ExplicitSpecKind::Unresolved &&
+ Other.getKind() == ExplicitSpecKind::Unresolved) {
+ ODRHash SelfHash, OtherHash;
+ SelfHash.AddStmt(getExpr());
+ OtherHash.AddStmt(Other.getExpr());
+ return SelfHash.CalculateHash() == OtherHash.CalculateHash();
+ } else
+ return false;
+ }
+ return true;
+}
+
+ExplicitSpecifier ExplicitSpecifier::getFromDecl(FunctionDecl *Function) {
+ switch (Function->getDeclKind()) {
+ case Decl::Kind::CXXConstructor:
+ return cast<CXXConstructorDecl>(Function)->getExplicitSpecifier();
+ case Decl::Kind::CXXConversion:
+ return cast<CXXConversionDecl>(Function)->getExplicitSpecifier();
+ case Decl::Kind::CXXDeductionGuide:
+ return cast<CXXDeductionGuideDecl>(Function)->getExplicitSpecifier();
+ default:
+ return {};
+ }
+}
+
CXXDeductionGuideDecl *CXXDeductionGuideDecl::Create(
- ASTContext &C, DeclContext *DC, SourceLocation StartLoc, bool IsExplicit,
- const DeclarationNameInfo &NameInfo, QualType T, TypeSourceInfo *TInfo,
- SourceLocation EndLocation) {
- return new (C, DC) CXXDeductionGuideDecl(C, DC, StartLoc, IsExplicit,
- NameInfo, T, TInfo, EndLocation);
+ ASTContext &C, DeclContext *DC, SourceLocation StartLoc,
+ ExplicitSpecifier ES, const DeclarationNameInfo &NameInfo, QualType T,
+ TypeSourceInfo *TInfo, SourceLocation EndLocation) {
+ return new (C, DC) CXXDeductionGuideDecl(C, DC, StartLoc, ES, NameInfo, T,
+ TInfo, EndLocation);
}
CXXDeductionGuideDecl *CXXDeductionGuideDecl::CreateDeserialized(ASTContext &C,
unsigned ID) {
- return new (C, ID) CXXDeductionGuideDecl(C, nullptr, SourceLocation(), false,
- DeclarationNameInfo(), QualType(),
- nullptr, SourceLocation());
+ return new (C, ID) CXXDeductionGuideDecl(
+ C, nullptr, SourceLocation(), ExplicitSpecifier(), DeclarationNameInfo(),
+ QualType(), nullptr, SourceLocation());
}
void CXXMethodDecl::anchor() {}
@@ -1891,8 +1957,8 @@ static bool recursivelyOverrides(const CXXMethodDecl *DerivedMD,
}
CXXMethodDecl *
-CXXMethodDecl::getCorrespondingMethodInClass(const CXXRecordDecl *RD,
- bool MayBeBase) {
+CXXMethodDecl::getCorrespondingMethodDeclaredInClass(const CXXRecordDecl *RD,
+ bool MayBeBase) {
if (this->getParent()->getCanonicalDecl() == RD->getCanonicalDecl())
return this;
@@ -1918,6 +1984,15 @@ CXXMethodDecl::getCorrespondingMethodInClass(const CXXRecordDecl *RD,
return MD;
}
+ return nullptr;
+}
+
+CXXMethodDecl *
+CXXMethodDecl::getCorrespondingMethodInClass(const CXXRecordDecl *RD,
+ bool MayBeBase) {
+ if (auto *MD = getCorrespondingMethodDeclaredInClass(RD, MayBeBase))
+ return MD;
+
for (const auto &I : RD->bases()) {
const RecordType *RT = I.getType()->getAs<RecordType>();
if (!RT)
@@ -1931,22 +2006,22 @@ CXXMethodDecl::getCorrespondingMethodInClass(const CXXRecordDecl *RD,
return nullptr;
}
-CXXMethodDecl *
-CXXMethodDecl::Create(ASTContext &C, CXXRecordDecl *RD,
- SourceLocation StartLoc,
- const DeclarationNameInfo &NameInfo,
- QualType T, TypeSourceInfo *TInfo,
- StorageClass SC, bool isInline,
- bool isConstexpr, SourceLocation EndLocation) {
- return new (C, RD) CXXMethodDecl(CXXMethod, C, RD, StartLoc, NameInfo,
- T, TInfo, SC, isInline, isConstexpr,
- EndLocation);
+CXXMethodDecl *CXXMethodDecl::Create(ASTContext &C, CXXRecordDecl *RD,
+ SourceLocation StartLoc,
+ const DeclarationNameInfo &NameInfo,
+ QualType T, TypeSourceInfo *TInfo,
+ StorageClass SC, bool isInline,
+ ConstexprSpecKind ConstexprKind,
+ SourceLocation EndLocation) {
+ return new (C, RD)
+ CXXMethodDecl(CXXMethod, C, RD, StartLoc, NameInfo, T, TInfo, SC,
+ isInline, ConstexprKind, EndLocation);
}
CXXMethodDecl *CXXMethodDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
- return new (C, ID) CXXMethodDecl(CXXMethod, C, nullptr, SourceLocation(),
- DeclarationNameInfo(), QualType(), nullptr,
- SC_None, false, false, SourceLocation());
+ return new (C, ID) CXXMethodDecl(
+ CXXMethod, C, nullptr, SourceLocation(), DeclarationNameInfo(),
+ QualType(), nullptr, SC_None, false, CSK_unspecified, SourceLocation());
}
CXXMethodDecl *CXXMethodDecl::getDevirtualizedMethod(const Expr *Base,
@@ -2081,8 +2156,13 @@ bool CXXMethodDecl::isUsualDeallocationFunction(
return false;
// In C++17 onwards, all potential usual deallocation functions are actual
- // usual deallocation functions.
- if (Context.getLangOpts().AlignedAllocation)
+ // usual deallocation functions. Honor this behavior when post-C++14
+ // deallocation functions are offered as extensions too.
+ // FIXME(EricWF): Destrying Delete should be a language option. How do we
+ // handle when destroying delete is used prior to C++17?
+ if (Context.getLangOpts().CPlusPlus17 ||
+ Context.getLangOpts().AlignedAllocation ||
+ isDestroyingOperatorDelete())
return true;
// This function is a usual deallocation function if there are no
@@ -2173,12 +2253,23 @@ CXXMethodDecl::overridden_methods() const {
return getASTContext().overridden_methods(this);
}
+static QualType getThisObjectType(ASTContext &C, const FunctionProtoType *FPT,
+ const CXXRecordDecl *Decl) {
+ QualType ClassTy = C.getTypeDeclType(Decl);
+ return C.getQualifiedType(ClassTy, FPT->getMethodQuals());
+}
+
QualType CXXMethodDecl::getThisType(const FunctionProtoType *FPT,
const CXXRecordDecl *Decl) {
ASTContext &C = Decl->getASTContext();
- QualType ClassTy = C.getTypeDeclType(Decl);
- ClassTy = C.getQualifiedType(ClassTy, FPT->getTypeQuals());
- return C.getPointerType(ClassTy);
+ QualType ObjectTy = ::getThisObjectType(C, FPT, Decl);
+ return C.getPointerType(ObjectTy);
+}
+
+QualType CXXMethodDecl::getThisObjectType(const FunctionProtoType *FPT,
+ const CXXRecordDecl *Decl) {
+ ASTContext &C = Decl->getASTContext();
+ return ::getThisObjectType(C, FPT, Decl);
}
QualType CXXMethodDecl::getThisType() const {
@@ -2193,6 +2284,14 @@ QualType CXXMethodDecl::getThisType() const {
getParent());
}
+QualType CXXMethodDecl::getThisObjectType() const {
+ // Ditto getThisType.
+ assert(isInstance() && "No 'this' for static methods!");
+
+ return CXXMethodDecl::getThisObjectType(getType()->getAs<FunctionProtoType>(),
+ getParent());
+}
+
bool CXXMethodDecl::hasInlineBody() const {
// If this function is a template instantiation, look at the template from
// which it was instantiated.
@@ -2297,47 +2396,55 @@ SourceRange CXXCtorInitializer::getSourceRange() const {
CXXConstructorDecl::CXXConstructorDecl(
ASTContext &C, CXXRecordDecl *RD, SourceLocation StartLoc,
const DeclarationNameInfo &NameInfo, QualType T, TypeSourceInfo *TInfo,
- bool isExplicitSpecified, bool isInline, bool isImplicitlyDeclared,
- bool isConstexpr, InheritedConstructor Inherited)
+ ExplicitSpecifier ES, bool isInline, bool isImplicitlyDeclared,
+ ConstexprSpecKind ConstexprKind, InheritedConstructor Inherited)
: CXXMethodDecl(CXXConstructor, C, RD, StartLoc, NameInfo, T, TInfo,
- SC_None, isInline, isConstexpr, SourceLocation()) {
+ SC_None, isInline, ConstexprKind, SourceLocation()) {
setNumCtorInitializers(0);
setInheritingConstructor(static_cast<bool>(Inherited));
setImplicit(isImplicitlyDeclared);
+ CXXConstructorDeclBits.HasTrailingExplicitSpecifier = ES.getExpr() ? 1 : 0;
if (Inherited)
*getTrailingObjects<InheritedConstructor>() = Inherited;
- setExplicitSpecified(isExplicitSpecified);
+ setExplicitSpecifier(ES);
}
void CXXConstructorDecl::anchor() {}
CXXConstructorDecl *CXXConstructorDecl::CreateDeserialized(ASTContext &C,
unsigned ID,
- bool Inherited) {
- unsigned Extra = additionalSizeToAlloc<InheritedConstructor>(Inherited);
- auto *Result = new (C, ID, Extra) CXXConstructorDecl(
- C, nullptr, SourceLocation(), DeclarationNameInfo(), QualType(), nullptr,
- false, false, false, false, InheritedConstructor());
- Result->setInheritingConstructor(Inherited);
+ uint64_t AllocKind) {
+ bool hasTraillingExplicit = static_cast<bool>(AllocKind & TAKHasTailExplicit);
+ bool isInheritingConstructor =
+ static_cast<bool>(AllocKind & TAKInheritsConstructor);
+ unsigned Extra =
+ additionalSizeToAlloc<InheritedConstructor, ExplicitSpecifier>(
+ isInheritingConstructor, hasTraillingExplicit);
+ auto *Result = new (C, ID, Extra)
+ CXXConstructorDecl(C, nullptr, SourceLocation(), DeclarationNameInfo(),
+ QualType(), nullptr, ExplicitSpecifier(), false, false,
+ CSK_unspecified, InheritedConstructor());
+ Result->setInheritingConstructor(isInheritingConstructor);
+ Result->CXXConstructorDeclBits.HasTrailingExplicitSpecifier =
+ hasTraillingExplicit;
+ Result->setExplicitSpecifier(ExplicitSpecifier());
return Result;
}
-CXXConstructorDecl *
-CXXConstructorDecl::Create(ASTContext &C, CXXRecordDecl *RD,
- SourceLocation StartLoc,
- const DeclarationNameInfo &NameInfo,
- QualType T, TypeSourceInfo *TInfo,
- bool isExplicit, bool isInline,
- bool isImplicitlyDeclared, bool isConstexpr,
- InheritedConstructor Inherited) {
+CXXConstructorDecl *CXXConstructorDecl::Create(
+ ASTContext &C, CXXRecordDecl *RD, SourceLocation StartLoc,
+ const DeclarationNameInfo &NameInfo, QualType T, TypeSourceInfo *TInfo,
+ ExplicitSpecifier ES, bool isInline, bool isImplicitlyDeclared,
+ ConstexprSpecKind ConstexprKind, InheritedConstructor Inherited) {
assert(NameInfo.getName().getNameKind()
== DeclarationName::CXXConstructorName &&
"Name must refer to a constructor");
unsigned Extra =
- additionalSizeToAlloc<InheritedConstructor>(Inherited ? 1 : 0);
- return new (C, RD, Extra) CXXConstructorDecl(
- C, RD, StartLoc, NameInfo, T, TInfo, isExplicit, isInline,
- isImplicitlyDeclared, isConstexpr, Inherited);
+ additionalSizeToAlloc<InheritedConstructor, ExplicitSpecifier>(
+ Inherited ? 1 : 0, ES.getExpr() ? 1 : 0);
+ return new (C, RD, Extra)
+ CXXConstructorDecl(C, RD, StartLoc, NameInfo, T, TInfo, ES, isInline,
+ isImplicitlyDeclared, ConstexprKind, Inherited);
}
CXXConstructorDecl::init_const_iterator CXXConstructorDecl::init_begin() const {
@@ -2488,25 +2595,22 @@ void CXXConversionDecl::anchor() {}
CXXConversionDecl *
CXXConversionDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
- return new (C, ID) CXXConversionDecl(C, nullptr, SourceLocation(),
- DeclarationNameInfo(), QualType(),
- nullptr, false, false, false,
- SourceLocation());
+ return new (C, ID) CXXConversionDecl(
+ C, nullptr, SourceLocation(), DeclarationNameInfo(), QualType(), nullptr,
+ false, ExplicitSpecifier(), CSK_unspecified, SourceLocation());
}
-CXXConversionDecl *
-CXXConversionDecl::Create(ASTContext &C, CXXRecordDecl *RD,
- SourceLocation StartLoc,
- const DeclarationNameInfo &NameInfo,
- QualType T, TypeSourceInfo *TInfo,
- bool isInline, bool isExplicit,
- bool isConstexpr, SourceLocation EndLocation) {
+CXXConversionDecl *CXXConversionDecl::Create(
+ ASTContext &C, CXXRecordDecl *RD, SourceLocation StartLoc,
+ const DeclarationNameInfo &NameInfo, QualType T, TypeSourceInfo *TInfo,
+ bool isInline, ExplicitSpecifier ES, ConstexprSpecKind ConstexprKind,
+ SourceLocation EndLocation) {
assert(NameInfo.getName().getNameKind()
== DeclarationName::CXXConversionFunctionName &&
"Name must refer to a conversion function");
- return new (C, RD) CXXConversionDecl(C, RD, StartLoc, NameInfo, T, TInfo,
- isInline, isExplicit, isConstexpr,
- EndLocation);
+ return new (C, RD)
+ CXXConversionDecl(C, RD, StartLoc, NameInfo, T, TInfo, isInline, ES,
+ ConstexprKind, EndLocation);
}
bool CXXConversionDecl::isLambdaToBlockPointerConversion() const {
@@ -2857,6 +2961,12 @@ BindingDecl *BindingDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
return new (C, ID) BindingDecl(nullptr, SourceLocation(), nullptr);
}
+ValueDecl *BindingDecl::getDecomposedDecl() const {
+ ExternalASTSource *Source =
+ Decomp.isOffset() ? getASTContext().getExternalSource() : nullptr;
+ return cast_or_null<ValueDecl>(Decomp.get(Source));
+}
+
VarDecl *BindingDecl::getHoldingVar() const {
Expr *B = getBinding();
if (!B)
diff --git a/lib/AST/DeclFriend.cpp b/lib/AST/DeclFriend.cpp
index 08fbed361579..8ec1dea84df5 100644
--- a/lib/AST/DeclFriend.cpp
+++ b/lib/AST/DeclFriend.cpp
@@ -1,9 +1,8 @@
//===- DeclFriend.cpp - C++ Friend Declaration AST Node Implementation ----===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/AST/DeclGroup.cpp b/lib/AST/DeclGroup.cpp
index f74ef9bbb839..27dbdaab6f30 100644
--- a/lib/AST/DeclGroup.cpp
+++ b/lib/AST/DeclGroup.cpp
@@ -1,9 +1,8 @@
//===- DeclGroup.cpp - Classes for representing groups of Decls -----------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/AST/DeclObjC.cpp b/lib/AST/DeclObjC.cpp
index 1ed7fc71b025..bf748fbab8e9 100644
--- a/lib/AST/DeclObjC.cpp
+++ b/lib/AST/DeclObjC.cpp
@@ -1,9 +1,8 @@
//===- DeclObjC.cpp - ObjC Declaration AST Node Implementation ------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -1642,7 +1641,7 @@ ObjCIvarDecl *ObjCInterfaceDecl::all_declared_ivar_begin() {
if (!layout.empty()) {
// Order synthesized ivars by their size.
- std::stable_sort(layout.begin(), layout.end());
+ llvm::stable_sort(layout);
unsigned Ix = 0, EIx = layout.size();
if (!data().IvarList) {
data().IvarList = layout[0].Ivar; Ix++;
diff --git a/lib/AST/DeclOpenMP.cpp b/lib/AST/DeclOpenMP.cpp
index b77a67cbf38d..af321280d417 100644
--- a/lib/AST/DeclOpenMP.cpp
+++ b/lib/AST/DeclOpenMP.cpp
@@ -1,9 +1,8 @@
//===--- DeclOpenMP.cpp - Declaration OpenMP AST Node Implementation ------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
@@ -54,6 +53,49 @@ void OMPThreadPrivateDecl::setVars(ArrayRef<Expr *> VL) {
}
//===----------------------------------------------------------------------===//
+// OMPAllocateDecl Implementation.
+//===----------------------------------------------------------------------===//
+
+void OMPAllocateDecl::anchor() { }
+
+OMPAllocateDecl *OMPAllocateDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation L, ArrayRef<Expr *> VL,
+ ArrayRef<OMPClause *> CL) {
+ OMPAllocateDecl *D = new (
+ C, DC, additionalSizeToAlloc<Expr *, OMPClause *>(VL.size(), CL.size()))
+ OMPAllocateDecl(OMPAllocate, DC, L);
+ D->NumVars = VL.size();
+ D->setVars(VL);
+ D->NumClauses = CL.size();
+ D->setClauses(CL);
+ return D;
+}
+
+OMPAllocateDecl *OMPAllocateDecl::CreateDeserialized(ASTContext &C, unsigned ID,
+ unsigned NVars,
+ unsigned NClauses) {
+ OMPAllocateDecl *D =
+ new (C, ID, additionalSizeToAlloc<Expr *, OMPClause *>(NVars, NClauses))
+ OMPAllocateDecl(OMPAllocate, nullptr, SourceLocation());
+ D->NumVars = NVars;
+ D->NumClauses = NClauses;
+ return D;
+}
+
+void OMPAllocateDecl::setVars(ArrayRef<Expr *> VL) {
+ assert(VL.size() == NumVars &&
+ "Number of variables is not the same as the preallocated buffer");
+ std::uninitialized_copy(VL.begin(), VL.end(), getTrailingObjects<Expr *>());
+}
+
+void OMPAllocateDecl::setClauses(ArrayRef<OMPClause *> CL) {
+ assert(CL.size() == NumClauses &&
+ "Number of variables is not the same as the preallocated buffer");
+ std::uninitialized_copy(CL.begin(), CL.end(),
+ getTrailingObjects<OMPClause *>());
+}
+
+//===----------------------------------------------------------------------===//
// OMPRequiresDecl Implementation.
//===----------------------------------------------------------------------===//
@@ -124,6 +166,66 @@ OMPDeclareReductionDecl::getPrevDeclInScope() const {
}
//===----------------------------------------------------------------------===//
+// OMPDeclareMapperDecl Implementation.
+//===----------------------------------------------------------------------===//
+
+void OMPDeclareMapperDecl::anchor() {}
+
+OMPDeclareMapperDecl *
+OMPDeclareMapperDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation L,
+ DeclarationName Name, QualType T,
+ DeclarationName VarName,
+ OMPDeclareMapperDecl *PrevDeclInScope) {
+ return new (C, DC) OMPDeclareMapperDecl(OMPDeclareMapper, DC, L, Name, T,
+ VarName, PrevDeclInScope);
+}
+
+OMPDeclareMapperDecl *OMPDeclareMapperDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID,
+ unsigned N) {
+ auto *D = new (C, ID)
+ OMPDeclareMapperDecl(OMPDeclareMapper, /*DC=*/nullptr, SourceLocation(),
+ DeclarationName(), QualType(), DeclarationName(),
+ /*PrevDeclInScope=*/nullptr);
+ if (N) {
+ auto **ClauseStorage = C.Allocate<OMPClause *>(N);
+ D->Clauses = llvm::makeMutableArrayRef<OMPClause *>(ClauseStorage, N);
+ }
+ return D;
+}
+
+/// Creates an array of clauses to this mapper declaration and intializes
+/// them. The space used to store clause pointers is dynamically allocated,
+/// because we do not know the number of clauses when creating
+/// OMPDeclareMapperDecl
+void OMPDeclareMapperDecl::CreateClauses(ASTContext &C,
+ ArrayRef<OMPClause *> CL) {
+ assert(Clauses.empty() && "Number of clauses should be 0 on initialization");
+ size_t NumClauses = CL.size();
+ if (NumClauses) {
+ auto **ClauseStorage = C.Allocate<OMPClause *>(NumClauses);
+ Clauses = llvm::makeMutableArrayRef<OMPClause *>(ClauseStorage, NumClauses);
+ setClauses(CL);
+ }
+}
+
+void OMPDeclareMapperDecl::setClauses(ArrayRef<OMPClause *> CL) {
+ assert(CL.size() == Clauses.size() &&
+ "Number of clauses is not the same as the preallocated buffer");
+ std::uninitialized_copy(CL.begin(), CL.end(), Clauses.data());
+}
+
+OMPDeclareMapperDecl *OMPDeclareMapperDecl::getPrevDeclInScope() {
+ return cast_or_null<OMPDeclareMapperDecl>(
+ PrevDeclInScope.get(getASTContext().getExternalSource()));
+}
+
+const OMPDeclareMapperDecl *OMPDeclareMapperDecl::getPrevDeclInScope() const {
+ return cast_or_null<OMPDeclareMapperDecl>(
+ PrevDeclInScope.get(getASTContext().getExternalSource()));
+}
+
+//===----------------------------------------------------------------------===//
// OMPCapturedExprDecl Implementation.
//===----------------------------------------------------------------------===//
diff --git a/lib/AST/DeclPrinter.cpp b/lib/AST/DeclPrinter.cpp
index 517851f9eeb1..f5c69944034a 100644
--- a/lib/AST/DeclPrinter.cpp
+++ b/lib/AST/DeclPrinter.cpp
@@ -1,9 +1,8 @@
//===--- DeclPrinter.cpp - Printing implementation for Decl ASTs ----------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -16,6 +15,7 @@
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclVisitor.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
@@ -100,11 +100,14 @@ namespace {
void VisitUsingDecl(UsingDecl *D);
void VisitUsingShadowDecl(UsingShadowDecl *D);
void VisitOMPThreadPrivateDecl(OMPThreadPrivateDecl *D);
+ void VisitOMPAllocateDecl(OMPAllocateDecl *D);
void VisitOMPRequiresDecl(OMPRequiresDecl *D);
void VisitOMPDeclareReductionDecl(OMPDeclareReductionDecl *D);
+ void VisitOMPDeclareMapperDecl(OMPDeclareMapperDecl *D);
void VisitOMPCapturedExprDecl(OMPCapturedExprDecl *D);
- void printTemplateParameters(const TemplateParameterList *Params);
+ void printTemplateParameters(const TemplateParameterList *Params,
+ bool OmitTemplateKW = false);
void printTemplateArguments(const TemplateArgumentList &Args,
const TemplateParameterList *Params = nullptr);
void prettyPrintAttributes(Decl *D);
@@ -125,6 +128,18 @@ void Decl::print(raw_ostream &Out, const PrintingPolicy &Policy,
Printer.Visit(const_cast<Decl*>(this));
}
+void TemplateParameterList::print(raw_ostream &Out, const ASTContext &Context,
+ bool OmitTemplateKW) const {
+ print(Out, Context, Context.getPrintingPolicy(), OmitTemplateKW);
+}
+
+void TemplateParameterList::print(raw_ostream &Out, const ASTContext &Context,
+ const PrintingPolicy &Policy,
+ bool OmitTemplateKW) const {
+ DeclPrinter Printer(Out, Policy, Context);
+ Printer.printTemplateParameters(this, OmitTemplateKW);
+}
+
static QualType GetBaseType(QualType T) {
// FIXME: This should be on the Type class!
QualType BaseType = T;
@@ -424,7 +439,8 @@ void DeclPrinter::VisitDeclContext(DeclContext *DC, bool Indent) {
// FIXME: Need to be able to tell the DeclPrinter when
const char *Terminator = nullptr;
if (isa<OMPThreadPrivateDecl>(*D) || isa<OMPDeclareReductionDecl>(*D) ||
- isa<OMPRequiresDecl>(*D))
+ isa<OMPDeclareMapperDecl>(*D) || isa<OMPRequiresDecl>(*D) ||
+ isa<OMPAllocateDecl>(*D))
Terminator = nullptr;
else if (isa<ObjCMethodDecl>(*D) && cast<ObjCMethodDecl>(*D)->hasBody())
Terminator = nullptr;
@@ -550,6 +566,21 @@ void DeclPrinter::VisitEnumConstantDecl(EnumConstantDecl *D) {
}
}
+static void printExplicitSpecifier(ExplicitSpecifier ES, llvm::raw_ostream &Out,
+ PrintingPolicy &Policy,
+ unsigned Indentation) {
+ std::string Proto = "explicit";
+ llvm::raw_string_ostream EOut(Proto);
+ if (ES.getExpr()) {
+ EOut << "(";
+ ES.getExpr()->printPretty(EOut, nullptr, Policy, Indentation);
+ EOut << ")";
+ }
+ EOut << " ";
+ EOut.flush();
+ Out << EOut.str();
+}
+
void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
if (!D->getDescribedFunctionTemplate() &&
!D->isFunctionTemplateSpecialization())
@@ -579,11 +610,12 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
if (D->isInlineSpecified()) Out << "inline ";
if (D->isVirtualAsWritten()) Out << "virtual ";
if (D->isModulePrivate()) Out << "__module_private__ ";
- if (D->isConstexpr() && !D->isExplicitlyDefaulted()) Out << "constexpr ";
- if ((CDecl && CDecl->isExplicitSpecified()) ||
- (ConversionDecl && ConversionDecl->isExplicitSpecified()) ||
- (GuideDecl && GuideDecl->isExplicitSpecified()))
- Out << "explicit ";
+ if (D->isConstexprSpecified() && !D->isExplicitlyDefaulted())
+ Out << "constexpr ";
+ if (D->isConsteval()) Out << "consteval ";
+ ExplicitSpecifier ExplicitSpec = ExplicitSpecifier::getFromDecl(D);
+ if (ExplicitSpec.isSpecified())
+ printExplicitSpecifier(ExplicitSpec, Out, Policy, Indentation);
}
PrintingPolicy SubPolicy(Policy);
@@ -986,25 +1018,35 @@ void DeclPrinter::VisitLinkageSpecDecl(LinkageSpecDecl *D) {
Visit(*D->decls_begin());
}
-void DeclPrinter::printTemplateParameters(const TemplateParameterList *Params) {
+void DeclPrinter::printTemplateParameters(const TemplateParameterList *Params,
+ bool OmitTemplateKW) {
assert(Params);
- Out << "template <";
+ if (!OmitTemplateKW)
+ Out << "template ";
+ Out << '<';
+
+ bool NeedComma = false;
+ for (const Decl *Param : *Params) {
+ if (Param->isImplicit())
+ continue;
- for (unsigned i = 0, e = Params->size(); i != e; ++i) {
- if (i != 0)
+ if (NeedComma)
Out << ", ";
+ else
+ NeedComma = true;
- const Decl *Param = Params->getParam(i);
if (auto TTP = dyn_cast<TemplateTypeParmDecl>(Param)) {
if (TTP->wasDeclaredWithTypename())
- Out << "typename ";
+ Out << "typename";
else
- Out << "class ";
+ Out << "class";
if (TTP->isParameterPack())
- Out << "...";
+ Out << " ...";
+ else if (!TTP->getName().empty())
+ Out << ' ';
Out << *TTP;
@@ -1029,7 +1071,9 @@ void DeclPrinter::printTemplateParameters(const TemplateParameterList *Params) {
}
}
- Out << "> ";
+ Out << '>';
+ if (!OmitTemplateKW)
+ Out << ' ';
}
void DeclPrinter::printTemplateArguments(const TemplateArgumentList &Args,
@@ -1079,8 +1123,13 @@ void DeclPrinter::VisitTemplateDecl(const TemplateDecl *D) {
if (TTP->isParameterPack())
Out << "...";
Out << D->getName();
- } else {
- Visit(D->getTemplatedDecl());
+ } else if (auto *TD = D->getTemplatedDecl())
+ Visit(TD);
+ else if (const auto *Concept = dyn_cast<ConceptDecl>(D)) {
+ Out << "concept " << Concept->getName() << " = " ;
+ Concept->getConstraintExpr()->printPretty(Out, nullptr, Policy,
+ Indentation);
+ Out << ";";
}
}
@@ -1389,6 +1438,13 @@ void DeclPrinter::VisitObjCCompatibleAliasDecl(ObjCCompatibleAliasDecl *AID) {
/// PrintObjCPropertyDecl - print a property declaration.
///
+/// Print attributes in the following order:
+/// - class
+/// - nonatomic | atomic
+/// - assign | retain | strong | copy | weak | unsafe_unretained
+/// - readwrite | readonly
+/// - getter & setter
+/// - nullability
void DeclPrinter::VisitObjCPropertyDecl(ObjCPropertyDecl *PDecl) {
if (PDecl->getPropertyImplementation() == ObjCPropertyDecl::Required)
Out << "@required\n";
@@ -1400,58 +1456,69 @@ void DeclPrinter::VisitObjCPropertyDecl(ObjCPropertyDecl *PDecl) {
Out << "@property";
if (PDecl->getPropertyAttributes() != ObjCPropertyDecl::OBJC_PR_noattr) {
bool first = true;
- Out << " (";
- if (PDecl->getPropertyAttributes() &
- ObjCPropertyDecl::OBJC_PR_readonly) {
- Out << (first ? ' ' : ',') << "readonly";
+ Out << "(";
+ if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_class) {
+ Out << (first ? "" : ", ") << "class";
first = false;
}
- if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_getter) {
- Out << (first ? ' ' : ',') << "getter = ";
- PDecl->getGetterName().print(Out);
+ if (PDecl->getPropertyAttributes() &
+ ObjCPropertyDecl::OBJC_PR_nonatomic) {
+ Out << (first ? "" : ", ") << "nonatomic";
first = false;
}
- if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_setter) {
- Out << (first ? ' ' : ',') << "setter = ";
- PDecl->getSetterName().print(Out);
+ if (PDecl->getPropertyAttributes() &
+ ObjCPropertyDecl::OBJC_PR_atomic) {
+ Out << (first ? "" : ", ") << "atomic";
first = false;
}
if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_assign) {
- Out << (first ? ' ' : ',') << "assign";
- first = false;
- }
-
- if (PDecl->getPropertyAttributes() &
- ObjCPropertyDecl::OBJC_PR_readwrite) {
- Out << (first ? ' ' : ',') << "readwrite";
+ Out << (first ? "" : ", ") << "assign";
first = false;
}
-
if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_retain) {
- Out << (first ? ' ' : ',') << "retain";
+ Out << (first ? "" : ", ") << "retain";
first = false;
}
if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_strong) {
- Out << (first ? ' ' : ',') << "strong";
+ Out << (first ? "" : ", ") << "strong";
first = false;
}
-
if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_copy) {
- Out << (first ? ' ' : ',') << "copy";
+ Out << (first ? "" : ", ") << "copy";
+ first = false;
+ }
+ if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_weak) {
+ Out << (first ? "" : ", ") << "weak";
+ first = false;
+ }
+ if (PDecl->getPropertyAttributes()
+ & ObjCPropertyDecl::OBJC_PR_unsafe_unretained) {
+ Out << (first ? "" : ", ") << "unsafe_unretained";
first = false;
}
if (PDecl->getPropertyAttributes() &
- ObjCPropertyDecl::OBJC_PR_nonatomic) {
- Out << (first ? ' ' : ',') << "nonatomic";
+ ObjCPropertyDecl::OBJC_PR_readwrite) {
+ Out << (first ? "" : ", ") << "readwrite";
first = false;
}
if (PDecl->getPropertyAttributes() &
- ObjCPropertyDecl::OBJC_PR_atomic) {
- Out << (first ? ' ' : ',') << "atomic";
+ ObjCPropertyDecl::OBJC_PR_readonly) {
+ Out << (first ? "" : ", ") << "readonly";
+ first = false;
+ }
+
+ if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_getter) {
+ Out << (first ? "" : ", ") << "getter = ";
+ PDecl->getGetterName().print(Out);
+ first = false;
+ }
+ if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_setter) {
+ Out << (first ? "" : ", ") << "setter = ";
+ PDecl->getSetterName().print(Out);
first = false;
}
@@ -1461,25 +1528,24 @@ void DeclPrinter::VisitObjCPropertyDecl(ObjCPropertyDecl *PDecl) {
if (*nullability == NullabilityKind::Unspecified &&
(PDecl->getPropertyAttributes() &
ObjCPropertyDecl::OBJC_PR_null_resettable)) {
- Out << (first ? ' ' : ',') << "null_resettable";
+ Out << (first ? "" : ", ") << "null_resettable";
} else {
- Out << (first ? ' ' : ',')
+ Out << (first ? "" : ", ")
<< getNullabilitySpelling(*nullability, true);
}
first = false;
}
}
- if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_class) {
- Out << (first ? ' ' : ',') << "class";
- first = false;
- }
-
(void) first; // Silence dead store warning due to idiomatic code.
- Out << " )";
+ Out << ")";
}
- Out << ' ' << PDecl->getASTContext().getUnqualifiedObjCPointerType(T).
- getAsString(Policy) << ' ' << *PDecl;
+ std::string TypeStr = PDecl->getASTContext().getUnqualifiedObjCPointerType(T).
+ getAsString(Policy);
+ Out << ' ' << TypeStr;
+ if (!StringRef(TypeStr).endswith("*"))
+ Out << ' ';
+ Out << *PDecl;
if (Policy.PolishForDeclaration)
Out << ';';
}
@@ -1546,6 +1612,26 @@ void DeclPrinter::VisitOMPThreadPrivateDecl(OMPThreadPrivateDecl *D) {
}
}
+void DeclPrinter::VisitOMPAllocateDecl(OMPAllocateDecl *D) {
+ Out << "#pragma omp allocate";
+ if (!D->varlist_empty()) {
+ for (OMPAllocateDecl::varlist_iterator I = D->varlist_begin(),
+ E = D->varlist_end();
+ I != E; ++I) {
+ Out << (I == D->varlist_begin() ? '(' : ',');
+ NamedDecl *ND = cast<DeclRefExpr>(*I)->getDecl();
+ ND->printQualifiedName(Out);
+ }
+ Out << ")";
+ }
+ if (!D->clauselist_empty()) {
+ Out << " ";
+ OMPClausePrinter Printer(Out, Policy);
+ for (OMPClause *C : D->clauselists())
+ Printer.Visit(C);
+ }
+}
+
void DeclPrinter::VisitOMPRequiresDecl(OMPRequiresDecl *D) {
Out << "#pragma omp requires ";
if (!D->clauselist_empty()) {
@@ -1559,14 +1645,8 @@ void DeclPrinter::VisitOMPDeclareReductionDecl(OMPDeclareReductionDecl *D) {
if (!D->isInvalidDecl()) {
Out << "#pragma omp declare reduction (";
if (D->getDeclName().getNameKind() == DeclarationName::CXXOperatorName) {
- static const char *const OperatorNames[NUM_OVERLOADED_OPERATORS] = {
- nullptr,
-#define OVERLOADED_OPERATOR(Name, Spelling, Token, Unary, Binary, MemberOnly) \
- Spelling,
-#include "clang/Basic/OperatorKinds.def"
- };
const char *OpName =
- OperatorNames[D->getDeclName().getCXXOverloadedOperator()];
+ getOperatorSpelling(D->getDeclName().getCXXOverloadedOperator());
assert(OpName && "not an overloaded operator");
Out << OpName;
} else {
@@ -1598,6 +1678,25 @@ void DeclPrinter::VisitOMPDeclareReductionDecl(OMPDeclareReductionDecl *D) {
}
}
+void DeclPrinter::VisitOMPDeclareMapperDecl(OMPDeclareMapperDecl *D) {
+ if (!D->isInvalidDecl()) {
+ Out << "#pragma omp declare mapper (";
+ D->printName(Out);
+ Out << " : ";
+ D->getType().print(Out, Policy);
+ Out << " ";
+ Out << D->getVarName();
+ Out << ")";
+ if (!D->clauselist_empty()) {
+ OMPClausePrinter Printer(Out, Policy);
+ for (auto *C : D->clauselists()) {
+ Out << " ";
+ Printer.Visit(C);
+ }
+ }
+ }
+}
+
void DeclPrinter::VisitOMPCapturedExprDecl(OMPCapturedExprDecl *D) {
D->getInit()->printPretty(Out, nullptr, Policy, Indentation);
}
diff --git a/lib/AST/DeclTemplate.cpp b/lib/AST/DeclTemplate.cpp
index 76f29dac1647..40c39c845db6 100644
--- a/lib/AST/DeclTemplate.cpp
+++ b/lib/AST/DeclTemplate.cpp
@@ -1,9 +1,8 @@
//===- DeclTemplate.cpp - Template Declaration AST Node Implementation ----===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -688,22 +687,20 @@ TemplateArgumentList::CreateCopy(ASTContext &Context,
return new (Mem) TemplateArgumentList(Args);
}
-FunctionTemplateSpecializationInfo *
-FunctionTemplateSpecializationInfo::Create(ASTContext &C, FunctionDecl *FD,
- FunctionTemplateDecl *Template,
- TemplateSpecializationKind TSK,
- const TemplateArgumentList *TemplateArgs,
- const TemplateArgumentListInfo *TemplateArgsAsWritten,
- SourceLocation POI) {
+FunctionTemplateSpecializationInfo *FunctionTemplateSpecializationInfo::Create(
+ ASTContext &C, FunctionDecl *FD, FunctionTemplateDecl *Template,
+ TemplateSpecializationKind TSK, const TemplateArgumentList *TemplateArgs,
+ const TemplateArgumentListInfo *TemplateArgsAsWritten, SourceLocation POI,
+ MemberSpecializationInfo *MSInfo) {
const ASTTemplateArgumentListInfo *ArgsAsWritten = nullptr;
if (TemplateArgsAsWritten)
ArgsAsWritten = ASTTemplateArgumentListInfo::Create(C,
*TemplateArgsAsWritten);
- return new (C) FunctionTemplateSpecializationInfo(FD, Template, TSK,
- TemplateArgs,
- ArgsAsWritten,
- POI);
+ void *Mem =
+ C.Allocate(totalSizeToAlloc<MemberSpecializationInfo *>(MSInfo ? 1 : 0));
+ return new (Mem) FunctionTemplateSpecializationInfo(
+ FD, Template, TSK, TemplateArgs, ArgsAsWritten, POI, MSInfo);
}
//===----------------------------------------------------------------------===//
@@ -825,6 +822,26 @@ ClassTemplateSpecializationDecl::getSourceRange() const {
}
//===----------------------------------------------------------------------===//
+// ConceptDecl Implementation
+//===----------------------------------------------------------------------===//
+ConceptDecl *ConceptDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation L, DeclarationName Name,
+ TemplateParameterList *Params,
+ Expr *ConstraintExpr) {
+ AdoptTemplateParameterList(Params, DC);
+ return new (C, DC) ConceptDecl(DC, L, Name, Params, ConstraintExpr);
+}
+
+ConceptDecl *ConceptDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ ConceptDecl *Result = new (C, ID) ConceptDecl(nullptr, SourceLocation(),
+ DeclarationName(),
+ nullptr, nullptr);
+
+ return Result;
+}
+
+//===----------------------------------------------------------------------===//
// ClassTemplatePartialSpecializationDecl Implementation
//===----------------------------------------------------------------------===//
void ClassTemplatePartialSpecializationDecl::anchor() {}
@@ -936,7 +953,7 @@ ClassScopeFunctionSpecializationDecl *
ClassScopeFunctionSpecializationDecl::CreateDeserialized(ASTContext &C,
unsigned ID) {
return new (C, ID) ClassScopeFunctionSpecializationDecl(
- nullptr, SourceLocation(), nullptr, false, TemplateArgumentListInfo());
+ nullptr, SourceLocation(), nullptr, nullptr);
}
//===----------------------------------------------------------------------===//
@@ -957,6 +974,7 @@ VarTemplateDecl *VarTemplateDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation L, DeclarationName Name,
TemplateParameterList *Params,
VarDecl *Decl) {
+ AdoptTemplateParameterList(Params, DC);
return new (C, DC) VarTemplateDecl(C, DC, L, Name, Params, Decl);
}
diff --git a/lib/AST/DeclarationName.cpp b/lib/AST/DeclarationName.cpp
index f2c152f918eb..fe69c71aa3dd 100644
--- a/lib/AST/DeclarationName.cpp
+++ b/lib/AST/DeclarationName.cpp
@@ -1,9 +1,8 @@
//===- DeclarationName.cpp - Declaration names implementation -------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -162,13 +161,7 @@ void DeclarationName::print(raw_ostream &OS, const PrintingPolicy &Policy) {
return;
case DeclarationName::CXXOperatorName: {
- static const char *const OperatorNames[NUM_OVERLOADED_OPERATORS] = {
- nullptr,
-#define OVERLOADED_OPERATOR(Name, Spelling, Token, Unary, Binary, MemberOnly) \
- Spelling,
-#include "clang/Basic/OperatorKinds.def"
- };
- const char *OpName = OperatorNames[getCXXOverloadedOperator()];
+ const char *OpName = getOperatorSpelling(getCXXOverloadedOperator());
assert(OpName && "not an overloaded operator");
OS << "operator";
diff --git a/lib/AST/Expr.cpp b/lib/AST/Expr.cpp
index 7cdd3b2c2a30..6ef77b8aee68 100644
--- a/lib/AST/Expr.cpp
+++ b/lib/AST/Expr.cpp
@@ -1,9 +1,8 @@
//===--- Expr.cpp - Expression AST Node Implementation --------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -11,13 +10,14 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/AST/Expr.h"
+#include "clang/AST/APValue.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/EvaluatedExprVisitor.h"
-#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/Mangle.h"
#include "clang/AST/RecordLayout.h"
@@ -229,6 +229,133 @@ SourceLocation Expr::getExprLoc() const {
// Primary Expressions.
//===----------------------------------------------------------------------===//
+static void AssertResultStorageKind(ConstantExpr::ResultStorageKind Kind) {
+ assert((Kind == ConstantExpr::RSK_APValue ||
+ Kind == ConstantExpr::RSK_Int64 || Kind == ConstantExpr::RSK_None) &&
+ "Invalid StorageKind Value");
+}
+
+ConstantExpr::ResultStorageKind
+ConstantExpr::getStorageKind(const APValue &Value) {
+ switch (Value.getKind()) {
+ case APValue::None:
+ case APValue::Indeterminate:
+ return ConstantExpr::RSK_None;
+ case APValue::Int:
+ if (!Value.getInt().needsCleanup())
+ return ConstantExpr::RSK_Int64;
+ LLVM_FALLTHROUGH;
+ default:
+ return ConstantExpr::RSK_APValue;
+ }
+}
+
+ConstantExpr::ResultStorageKind
+ConstantExpr::getStorageKind(const Type *T, const ASTContext &Context) {
+ if (T->isIntegralOrEnumerationType() && Context.getTypeInfo(T).Width <= 64)
+ return ConstantExpr::RSK_Int64;
+ return ConstantExpr::RSK_APValue;
+}
+
+void ConstantExpr::DefaultInit(ResultStorageKind StorageKind) {
+ ConstantExprBits.ResultKind = StorageKind;
+ ConstantExprBits.APValueKind = APValue::None;
+ ConstantExprBits.HasCleanup = false;
+ if (StorageKind == ConstantExpr::RSK_APValue)
+ ::new (getTrailingObjects<APValue>()) APValue();
+}
+
+ConstantExpr::ConstantExpr(Expr *subexpr, ResultStorageKind StorageKind)
+ : FullExpr(ConstantExprClass, subexpr) {
+ DefaultInit(StorageKind);
+}
+
+ConstantExpr *ConstantExpr::Create(const ASTContext &Context, Expr *E,
+ ResultStorageKind StorageKind) {
+ assert(!isa<ConstantExpr>(E));
+ AssertResultStorageKind(StorageKind);
+ unsigned Size = totalSizeToAlloc<APValue, uint64_t>(
+ StorageKind == ConstantExpr::RSK_APValue,
+ StorageKind == ConstantExpr::RSK_Int64);
+ void *Mem = Context.Allocate(Size, alignof(ConstantExpr));
+ ConstantExpr *Self = new (Mem) ConstantExpr(E, StorageKind);
+ return Self;
+}
+
+ConstantExpr *ConstantExpr::Create(const ASTContext &Context, Expr *E,
+ const APValue &Result) {
+ ResultStorageKind StorageKind = getStorageKind(Result);
+ ConstantExpr *Self = Create(Context, E, StorageKind);
+ Self->SetResult(Result, Context);
+ return Self;
+}
+
+ConstantExpr::ConstantExpr(ResultStorageKind StorageKind, EmptyShell Empty)
+ : FullExpr(ConstantExprClass, Empty) {
+ DefaultInit(StorageKind);
+}
+
+ConstantExpr *ConstantExpr::CreateEmpty(const ASTContext &Context,
+ ResultStorageKind StorageKind,
+ EmptyShell Empty) {
+ AssertResultStorageKind(StorageKind);
+ unsigned Size = totalSizeToAlloc<APValue, uint64_t>(
+ StorageKind == ConstantExpr::RSK_APValue,
+ StorageKind == ConstantExpr::RSK_Int64);
+ void *Mem = Context.Allocate(Size, alignof(ConstantExpr));
+ ConstantExpr *Self = new (Mem) ConstantExpr(StorageKind, Empty);
+ return Self;
+}
+
+void ConstantExpr::MoveIntoResult(APValue &Value, const ASTContext &Context) {
+ assert(getStorageKind(Value) == ConstantExprBits.ResultKind &&
+ "Invalid storage for this value kind");
+ ConstantExprBits.APValueKind = Value.getKind();
+ switch (ConstantExprBits.ResultKind) {
+ case RSK_None:
+ return;
+ case RSK_Int64:
+ Int64Result() = *Value.getInt().getRawData();
+ ConstantExprBits.BitWidth = Value.getInt().getBitWidth();
+ ConstantExprBits.IsUnsigned = Value.getInt().isUnsigned();
+ return;
+ case RSK_APValue:
+ if (!ConstantExprBits.HasCleanup && Value.needsCleanup()) {
+ ConstantExprBits.HasCleanup = true;
+ Context.addDestruction(&APValueResult());
+ }
+ APValueResult() = std::move(Value);
+ return;
+ }
+ llvm_unreachable("Invalid ResultKind Bits");
+}
+
+llvm::APSInt ConstantExpr::getResultAsAPSInt() const {
+ switch (ConstantExprBits.ResultKind) {
+ case ConstantExpr::RSK_APValue:
+ return APValueResult().getInt();
+ case ConstantExpr::RSK_Int64:
+ return llvm::APSInt(llvm::APInt(ConstantExprBits.BitWidth, Int64Result()),
+ ConstantExprBits.IsUnsigned);
+ default:
+ llvm_unreachable("invalid Accessor");
+ }
+}
+
+APValue ConstantExpr::getAPValueResult() const {
+ switch (ConstantExprBits.ResultKind) {
+ case ConstantExpr::RSK_APValue:
+ return APValueResult();
+ case ConstantExpr::RSK_Int64:
+ return APValue(
+ llvm::APSInt(llvm::APInt(ConstantExprBits.BitWidth, Int64Result()),
+ ConstantExprBits.IsUnsigned));
+ case ConstantExpr::RSK_None:
+ return APValue();
+ }
+ llvm_unreachable("invalid ResultKind");
+}
+
/// Compute the type-, value-, and instantiation-dependence of a
/// declaration reference
/// based on the declaration being referenced.
@@ -344,7 +471,8 @@ void DeclRefExpr::computeDependence(const ASTContext &Ctx) {
DeclRefExpr::DeclRefExpr(const ASTContext &Ctx, ValueDecl *D,
bool RefersToEnclosingVariableOrCapture, QualType T,
ExprValueKind VK, SourceLocation L,
- const DeclarationNameLoc &LocInfo)
+ const DeclarationNameLoc &LocInfo,
+ NonOdrUseReason NOUR)
: Expr(DeclRefExprClass, T, VK, OK_Ordinary, false, false, false, false),
D(D), DNLoc(LocInfo) {
DeclRefExprBits.HasQualifier = false;
@@ -353,6 +481,7 @@ DeclRefExpr::DeclRefExpr(const ASTContext &Ctx, ValueDecl *D,
DeclRefExprBits.HadMultipleCandidates = false;
DeclRefExprBits.RefersToEnclosingVariableOrCapture =
RefersToEnclosingVariableOrCapture;
+ DeclRefExprBits.NonOdrUseReason = NOUR;
DeclRefExprBits.Loc = L;
computeDependence(Ctx);
}
@@ -363,7 +492,7 @@ DeclRefExpr::DeclRefExpr(const ASTContext &Ctx,
bool RefersToEnclosingVariableOrCapture,
const DeclarationNameInfo &NameInfo, NamedDecl *FoundD,
const TemplateArgumentListInfo *TemplateArgs,
- QualType T, ExprValueKind VK)
+ QualType T, ExprValueKind VK, NonOdrUseReason NOUR)
: Expr(DeclRefExprClass, T, VK, OK_Ordinary, false, false, false, false),
D(D), DNLoc(NameInfo.getInfo()) {
DeclRefExprBits.Loc = NameInfo.getLoc();
@@ -384,6 +513,7 @@ DeclRefExpr::DeclRefExpr(const ASTContext &Ctx,
= (TemplateArgs || TemplateKWLoc.isValid()) ? 1 : 0;
DeclRefExprBits.RefersToEnclosingVariableOrCapture =
RefersToEnclosingVariableOrCapture;
+ DeclRefExprBits.NonOdrUseReason = NOUR;
if (TemplateArgs) {
bool Dependent = false;
bool InstantiationDependent = false;
@@ -405,30 +535,27 @@ DeclRefExpr::DeclRefExpr(const ASTContext &Ctx,
DeclRefExpr *DeclRefExpr::Create(const ASTContext &Context,
NestedNameSpecifierLoc QualifierLoc,
- SourceLocation TemplateKWLoc,
- ValueDecl *D,
+ SourceLocation TemplateKWLoc, ValueDecl *D,
bool RefersToEnclosingVariableOrCapture,
- SourceLocation NameLoc,
- QualType T,
- ExprValueKind VK,
- NamedDecl *FoundD,
- const TemplateArgumentListInfo *TemplateArgs) {
+ SourceLocation NameLoc, QualType T,
+ ExprValueKind VK, NamedDecl *FoundD,
+ const TemplateArgumentListInfo *TemplateArgs,
+ NonOdrUseReason NOUR) {
return Create(Context, QualifierLoc, TemplateKWLoc, D,
RefersToEnclosingVariableOrCapture,
DeclarationNameInfo(D->getDeclName(), NameLoc),
- T, VK, FoundD, TemplateArgs);
+ T, VK, FoundD, TemplateArgs, NOUR);
}
DeclRefExpr *DeclRefExpr::Create(const ASTContext &Context,
NestedNameSpecifierLoc QualifierLoc,
- SourceLocation TemplateKWLoc,
- ValueDecl *D,
+ SourceLocation TemplateKWLoc, ValueDecl *D,
bool RefersToEnclosingVariableOrCapture,
const DeclarationNameInfo &NameInfo,
- QualType T,
- ExprValueKind VK,
+ QualType T, ExprValueKind VK,
NamedDecl *FoundD,
- const TemplateArgumentListInfo *TemplateArgs) {
+ const TemplateArgumentListInfo *TemplateArgs,
+ NonOdrUseReason NOUR) {
// Filter out cases where the found Decl is the same as the value refenenced.
if (D == FoundD)
FoundD = nullptr;
@@ -443,8 +570,8 @@ DeclRefExpr *DeclRefExpr::Create(const ASTContext &Context,
void *Mem = Context.Allocate(Size, alignof(DeclRefExpr));
return new (Mem) DeclRefExpr(Context, QualifierLoc, TemplateKWLoc, D,
- RefersToEnclosingVariableOrCapture,
- NameInfo, FoundD, TemplateArgs, T, VK);
+ RefersToEnclosingVariableOrCapture, NameInfo,
+ FoundD, TemplateArgs, T, VK, NOUR);
}
DeclRefExpr *DeclRefExpr::CreateEmpty(const ASTContext &Context,
@@ -840,7 +967,7 @@ FloatingLiteral::FloatingLiteral(const ASTContext &C, const llvm::APFloat &V,
FloatingLiteral::FloatingLiteral(const ASTContext &C, EmptyShell Empty)
: Expr(FloatingLiteralClass, Empty) {
- setRawSemantics(IEEEhalf);
+ setRawSemantics(llvm::APFloatBase::S_IEEEhalf);
FloatingLiteralBits.IsExact = false;
}
@@ -855,41 +982,6 @@ FloatingLiteral::Create(const ASTContext &C, EmptyShell Empty) {
return new (C) FloatingLiteral(C, Empty);
}
-const llvm::fltSemantics &FloatingLiteral::getSemantics() const {
- switch(FloatingLiteralBits.Semantics) {
- case IEEEhalf:
- return llvm::APFloat::IEEEhalf();
- case IEEEsingle:
- return llvm::APFloat::IEEEsingle();
- case IEEEdouble:
- return llvm::APFloat::IEEEdouble();
- case x87DoubleExtended:
- return llvm::APFloat::x87DoubleExtended();
- case IEEEquad:
- return llvm::APFloat::IEEEquad();
- case PPCDoubleDouble:
- return llvm::APFloat::PPCDoubleDouble();
- }
- llvm_unreachable("Unrecognised floating semantics");
-}
-
-void FloatingLiteral::setSemantics(const llvm::fltSemantics &Sem) {
- if (&Sem == &llvm::APFloat::IEEEhalf())
- FloatingLiteralBits.Semantics = IEEEhalf;
- else if (&Sem == &llvm::APFloat::IEEEsingle())
- FloatingLiteralBits.Semantics = IEEEsingle;
- else if (&Sem == &llvm::APFloat::IEEEdouble())
- FloatingLiteralBits.Semantics = IEEEdouble;
- else if (&Sem == &llvm::APFloat::x87DoubleExtended())
- FloatingLiteralBits.Semantics = x87DoubleExtended;
- else if (&Sem == &llvm::APFloat::IEEEquad())
- FloatingLiteralBits.Semantics = IEEEquad;
- else if (&Sem == &llvm::APFloat::PPCDoubleDouble())
- FloatingLiteralBits.Semantics = PPCDoubleDouble;
- else
- llvm_unreachable("Unknown floating semantics");
-}
-
/// getValueAsApproximateDouble - This returns the value as an inaccurate
/// double. Note that this may cause loss of precision, but is useful for
/// debugging dumps, etc.
@@ -1359,6 +1451,8 @@ Decl *Expr::getReferencedDeclOfCallee() {
return DRE->getDecl();
if (MemberExpr *ME = dyn_cast<MemberExpr>(CEE))
return ME->getMemberDecl();
+ if (auto *BE = dyn_cast<BlockExpr>(CEE))
+ return BE->getBlockDecl();
return nullptr;
}
@@ -1536,29 +1630,46 @@ UnaryExprOrTypeTraitExpr::UnaryExprOrTypeTraitExpr(
}
}
+MemberExpr::MemberExpr(Expr *Base, bool IsArrow, SourceLocation OperatorLoc,
+ ValueDecl *MemberDecl,
+ const DeclarationNameInfo &NameInfo, QualType T,
+ ExprValueKind VK, ExprObjectKind OK,
+ NonOdrUseReason NOUR)
+ : Expr(MemberExprClass, T, VK, OK, Base->isTypeDependent(),
+ Base->isValueDependent(), Base->isInstantiationDependent(),
+ Base->containsUnexpandedParameterPack()),
+ Base(Base), MemberDecl(MemberDecl), MemberDNLoc(NameInfo.getInfo()),
+ MemberLoc(NameInfo.getLoc()) {
+ assert(!NameInfo.getName() ||
+ MemberDecl->getDeclName() == NameInfo.getName());
+ MemberExprBits.IsArrow = IsArrow;
+ MemberExprBits.HasQualifierOrFoundDecl = false;
+ MemberExprBits.HasTemplateKWAndArgsInfo = false;
+ MemberExprBits.HadMultipleCandidates = false;
+ MemberExprBits.NonOdrUseReason = NOUR;
+ MemberExprBits.OperatorLoc = OperatorLoc;
+}
+
MemberExpr *MemberExpr::Create(
- const ASTContext &C, Expr *base, bool isarrow, SourceLocation OperatorLoc,
+ const ASTContext &C, Expr *Base, bool IsArrow, SourceLocation OperatorLoc,
NestedNameSpecifierLoc QualifierLoc, SourceLocation TemplateKWLoc,
- ValueDecl *memberdecl, DeclAccessPair founddecl,
- DeclarationNameInfo nameinfo, const TemplateArgumentListInfo *targs,
- QualType ty, ExprValueKind vk, ExprObjectKind ok) {
-
- bool hasQualOrFound = (QualifierLoc ||
- founddecl.getDecl() != memberdecl ||
- founddecl.getAccess() != memberdecl->getAccess());
-
- bool HasTemplateKWAndArgsInfo = targs || TemplateKWLoc.isValid();
+ ValueDecl *MemberDecl, DeclAccessPair FoundDecl,
+ DeclarationNameInfo NameInfo, const TemplateArgumentListInfo *TemplateArgs,
+ QualType T, ExprValueKind VK, ExprObjectKind OK, NonOdrUseReason NOUR) {
+ bool HasQualOrFound = QualifierLoc || FoundDecl.getDecl() != MemberDecl ||
+ FoundDecl.getAccess() != MemberDecl->getAccess();
+ bool HasTemplateKWAndArgsInfo = TemplateArgs || TemplateKWLoc.isValid();
std::size_t Size =
totalSizeToAlloc<MemberExprNameQualifier, ASTTemplateKWAndArgsInfo,
- TemplateArgumentLoc>(hasQualOrFound ? 1 : 0,
- HasTemplateKWAndArgsInfo ? 1 : 0,
- targs ? targs->size() : 0);
+ TemplateArgumentLoc>(
+ HasQualOrFound ? 1 : 0, HasTemplateKWAndArgsInfo ? 1 : 0,
+ TemplateArgs ? TemplateArgs->size() : 0);
void *Mem = C.Allocate(Size, alignof(MemberExpr));
- MemberExpr *E = new (Mem)
- MemberExpr(base, isarrow, OperatorLoc, memberdecl, nameinfo, ty, vk, ok);
+ MemberExpr *E = new (Mem) MemberExpr(Base, IsArrow, OperatorLoc, MemberDecl,
+ NameInfo, T, VK, OK, NOUR);
- if (hasQualOrFound) {
+ if (HasQualOrFound) {
// FIXME: Wrong. We should be looking at the member declaration we found.
if (QualifierLoc && QualifierLoc.getNestedNameSpecifier()->isDependent()) {
E->setValueDependent(true);
@@ -1574,19 +1685,20 @@ MemberExpr *MemberExpr::Create(
MemberExprNameQualifier *NQ =
E->getTrailingObjects<MemberExprNameQualifier>();
NQ->QualifierLoc = QualifierLoc;
- NQ->FoundDecl = founddecl;
+ NQ->FoundDecl = FoundDecl;
}
E->MemberExprBits.HasTemplateKWAndArgsInfo =
- (targs || TemplateKWLoc.isValid());
+ TemplateArgs || TemplateKWLoc.isValid();
- if (targs) {
+ if (TemplateArgs) {
bool Dependent = false;
bool InstantiationDependent = false;
bool ContainsUnexpandedParameterPack = false;
E->getTrailingObjects<ASTTemplateKWAndArgsInfo>()->initializeFrom(
- TemplateKWLoc, *targs, E->getTrailingObjects<TemplateArgumentLoc>(),
- Dependent, InstantiationDependent, ContainsUnexpandedParameterPack);
+ TemplateKWLoc, *TemplateArgs,
+ E->getTrailingObjects<TemplateArgumentLoc>(), Dependent,
+ InstantiationDependent, ContainsUnexpandedParameterPack);
if (InstantiationDependent)
E->setInstantiationDependent(true);
} else if (TemplateKWLoc.isValid()) {
@@ -1597,6 +1709,22 @@ MemberExpr *MemberExpr::Create(
return E;
}
+MemberExpr *MemberExpr::CreateEmpty(const ASTContext &Context,
+ bool HasQualifier, bool HasFoundDecl,
+ bool HasTemplateKWAndArgsInfo,
+ unsigned NumTemplateArgs) {
+ assert((!NumTemplateArgs || HasTemplateKWAndArgsInfo) &&
+ "template args but no template arg info?");
+ bool HasQualOrFound = HasQualifier || HasFoundDecl;
+ std::size_t Size =
+ totalSizeToAlloc<MemberExprNameQualifier, ASTTemplateKWAndArgsInfo,
+ TemplateArgumentLoc>(HasQualOrFound ? 1 : 0,
+ HasTemplateKWAndArgsInfo ? 1 : 0,
+ NumTemplateArgs);
+ void *Mem = Context.Allocate(Size, alignof(MemberExpr));
+ return new (Mem) MemberExpr(EmptyShell());
+}
+
SourceLocation MemberExpr::getBeginLoc() const {
if (isImplicitAccess()) {
if (hasQualifier())
@@ -1677,7 +1805,7 @@ bool CastExpr::CastConsistency() const {
auto Ty = getType();
auto SETy = getSubExpr()->getType();
assert(getValueKindForType(Ty) == Expr::getValueKindForType(SETy));
- if (isRValue()) {
+ if (/*isRValue()*/ !Ty->getPointeeType().isNull()) {
Ty = Ty->getPointeeType();
SETy = SETy->getPointeeType();
}
@@ -1717,6 +1845,8 @@ bool CastExpr::CastConsistency() const {
case CK_ZeroToOCLOpaqueType:
case CK_IntToOCLSampler:
case CK_FixedPointCast:
+ case CK_FixedPointToIntegral:
+ case CK_IntegralToFixedPoint:
assert(!getType()->isBooleanType() && "unheralded conversion to bool");
goto CheckNoBasePath;
@@ -1732,6 +1862,7 @@ bool CastExpr::CastConsistency() const {
case CK_FloatingComplexToBoolean:
case CK_IntegralComplexToBoolean:
case CK_LValueBitCast: // -> bool&
+ case CK_LValueToRValueBitCast:
case CK_UserDefinedConversion: // operator bool()
case CK_BuiltinFnToFnPtr:
case CK_FixedPointToBoolean:
@@ -1847,6 +1978,11 @@ ImplicitCastExpr *ImplicitCastExpr::Create(const ASTContext &C, QualType T,
ExprValueKind VK) {
unsigned PathSize = (BasePath ? BasePath->size() : 0);
void *Buffer = C.Allocate(totalSizeToAlloc<CXXBaseSpecifier *>(PathSize));
+ // Per C++ [conv.lval]p3, lvalue-to-rvalue conversions on class and
+ // std::nullptr_t have special semantics not captured by CK_LValueToRValue.
+ assert((Kind != CK_LValueToRValue ||
+ !(T->isNullPtrType() || T->getAsCXXRecordDecl())) &&
+ "invalid type for lvalue-to-rvalue conversion");
ImplicitCastExpr *E =
new (Buffer) ImplicitCastExpr(T, Kind, Operand, PathSize, VK);
if (PathSize)
@@ -1989,6 +2125,91 @@ bool BinaryOperator::isNullPointerArithmeticExtension(ASTContext &Ctx,
return true;
}
+
+static QualType getDecayedSourceLocExprType(const ASTContext &Ctx,
+ SourceLocExpr::IdentKind Kind) {
+ switch (Kind) {
+ case SourceLocExpr::File:
+ case SourceLocExpr::Function: {
+ QualType ArrTy = Ctx.getStringLiteralArrayType(Ctx.CharTy, 0);
+ return Ctx.getPointerType(ArrTy->getAsArrayTypeUnsafe()->getElementType());
+ }
+ case SourceLocExpr::Line:
+ case SourceLocExpr::Column:
+ return Ctx.UnsignedIntTy;
+ }
+ llvm_unreachable("unhandled case");
+}
+
+SourceLocExpr::SourceLocExpr(const ASTContext &Ctx, IdentKind Kind,
+ SourceLocation BLoc, SourceLocation RParenLoc,
+ DeclContext *ParentContext)
+ : Expr(SourceLocExprClass, getDecayedSourceLocExprType(Ctx, Kind),
+ VK_RValue, OK_Ordinary, false, false, false, false),
+ BuiltinLoc(BLoc), RParenLoc(RParenLoc), ParentContext(ParentContext) {
+ SourceLocExprBits.Kind = Kind;
+}
+
+StringRef SourceLocExpr::getBuiltinStr() const {
+ switch (getIdentKind()) {
+ case File:
+ return "__builtin_FILE";
+ case Function:
+ return "__builtin_FUNCTION";
+ case Line:
+ return "__builtin_LINE";
+ case Column:
+ return "__builtin_COLUMN";
+ }
+ llvm_unreachable("unexpected IdentKind!");
+}
+
+APValue SourceLocExpr::EvaluateInContext(const ASTContext &Ctx,
+ const Expr *DefaultExpr) const {
+ SourceLocation Loc;
+ const DeclContext *Context;
+
+ std::tie(Loc,
+ Context) = [&]() -> std::pair<SourceLocation, const DeclContext *> {
+ if (auto *DIE = dyn_cast_or_null<CXXDefaultInitExpr>(DefaultExpr))
+ return {DIE->getUsedLocation(), DIE->getUsedContext()};
+ if (auto *DAE = dyn_cast_or_null<CXXDefaultArgExpr>(DefaultExpr))
+ return {DAE->getUsedLocation(), DAE->getUsedContext()};
+ return {this->getLocation(), this->getParentContext()};
+ }();
+
+ PresumedLoc PLoc = Ctx.getSourceManager().getPresumedLoc(
+ Ctx.getSourceManager().getExpansionRange(Loc).getEnd());
+
+ auto MakeStringLiteral = [&](StringRef Tmp) {
+ using LValuePathEntry = APValue::LValuePathEntry;
+ StringLiteral *Res = Ctx.getPredefinedStringLiteralFromCache(Tmp);
+ // Decay the string to a pointer to the first character.
+ LValuePathEntry Path[1] = {LValuePathEntry::ArrayIndex(0)};
+ return APValue(Res, CharUnits::Zero(), Path, /*OnePastTheEnd=*/false);
+ };
+
+ switch (getIdentKind()) {
+ case SourceLocExpr::File:
+ return MakeStringLiteral(PLoc.getFilename());
+ case SourceLocExpr::Function: {
+ const Decl *CurDecl = dyn_cast_or_null<Decl>(Context);
+ return MakeStringLiteral(
+ CurDecl ? PredefinedExpr::ComputeName(PredefinedExpr::Function, CurDecl)
+ : std::string(""));
+ }
+ case SourceLocExpr::Line:
+ case SourceLocExpr::Column: {
+ llvm::APSInt IntVal(Ctx.getIntWidth(Ctx.UnsignedIntTy),
+ /*isUnsigned=*/true);
+ IntVal = getIdentKind() == SourceLocExpr::Line ? PLoc.getLine()
+ : PLoc.getColumn();
+ return APValue(IntVal);
+ }
+ }
+ llvm_unreachable("unhandled case");
+}
+
InitListExpr::InitListExpr(const ASTContext &C, SourceLocation lbraceloc,
ArrayRef<Expr*> initExprs, SourceLocation rbraceloc)
: Expr(InitListExprClass, QualType(), VK_RValue, OK_Ordinary, false, false,
@@ -2082,11 +2303,11 @@ bool InitListExpr::isTransparent() const {
bool InitListExpr::isIdiomaticZeroInitializer(const LangOptions &LangOpts) const {
assert(isSyntacticForm() && "only test syntactic form as zero initializer");
- if (LangOpts.CPlusPlus || getNumInits() != 1) {
+ if (LangOpts.CPlusPlus || getNumInits() != 1 || !getInit(0)) {
return false;
}
- const IntegerLiteral *Lit = dyn_cast<IntegerLiteral>(getInit(0));
+ const IntegerLiteral *Lit = dyn_cast<IntegerLiteral>(getInit(0)->IgnoreImplicit());
return Lit && Lit->getValue() == 0;
}
@@ -2256,12 +2477,13 @@ bool Expr::isUnusedResultAWarning(const Expr *&WarnE, SourceLocation &Loc,
// If only one of the LHS or RHS is a warning, the operator might
// be being used for control flow. Only warn if both the LHS and
// RHS are warnings.
- const ConditionalOperator *Exp = cast<ConditionalOperator>(this);
- if (!Exp->getRHS()->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx))
- return false;
- if (!Exp->getLHS())
- return true;
- return Exp->getLHS()->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx);
+ const auto *Exp = cast<ConditionalOperator>(this);
+ return Exp->getLHS()->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx) &&
+ Exp->getRHS()->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx);
+ }
+ case BinaryConditionalOperatorClass: {
+ const auto *Exp = cast<BinaryConditionalOperator>(this);
+ return Exp->getFalseExpr()->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx);
}
case MemberExprClass:
@@ -2557,205 +2779,197 @@ QualType Expr::findBoundMemberType(const Expr *expr) {
return QualType();
}
-Expr* Expr::IgnoreParens() {
- Expr* E = this;
- while (true) {
- if (ParenExpr* P = dyn_cast<ParenExpr>(E)) {
- E = P->getSubExpr();
- continue;
- }
- if (UnaryOperator* P = dyn_cast<UnaryOperator>(E)) {
- if (P->getOpcode() == UO_Extension) {
- E = P->getSubExpr();
- continue;
- }
- }
- if (GenericSelectionExpr* P = dyn_cast<GenericSelectionExpr>(E)) {
- if (!P->isResultDependent()) {
- E = P->getResultExpr();
- continue;
- }
- }
- if (ChooseExpr* P = dyn_cast<ChooseExpr>(E)) {
- if (!P->isConditionDependent()) {
- E = P->getChosenSubExpr();
- continue;
- }
- }
- if (ConstantExpr *CE = dyn_cast<ConstantExpr>(E)) {
- E = CE->getSubExpr();
- continue;
- }
- return E;
- }
+static Expr *IgnoreImpCastsSingleStep(Expr *E) {
+ if (auto *ICE = dyn_cast<ImplicitCastExpr>(E))
+ return ICE->getSubExpr();
+
+ if (auto *FE = dyn_cast<FullExpr>(E))
+ return FE->getSubExpr();
+
+ return E;
}
-/// IgnoreParenCasts - Ignore parentheses and casts. Strip off any ParenExpr
-/// or CastExprs or ImplicitCastExprs, returning their operand.
-Expr *Expr::IgnoreParenCasts() {
- Expr *E = this;
- while (true) {
- E = E->IgnoreParens();
- if (CastExpr *P = dyn_cast<CastExpr>(E)) {
- E = P->getSubExpr();
- continue;
- }
- if (MaterializeTemporaryExpr *Materialize
- = dyn_cast<MaterializeTemporaryExpr>(E)) {
- E = Materialize->GetTemporaryExpr();
- continue;
- }
- if (SubstNonTypeTemplateParmExpr *NTTP
- = dyn_cast<SubstNonTypeTemplateParmExpr>(E)) {
- E = NTTP->getReplacement();
- continue;
- }
- if (FullExpr *FE = dyn_cast<FullExpr>(E)) {
- E = FE->getSubExpr();
- continue;
- }
- return E;
- }
+static Expr *IgnoreImpCastsExtraSingleStep(Expr *E) {
+ // FIXME: Skip MaterializeTemporaryExpr and SubstNonTypeTemplateParmExpr in
+ // addition to what IgnoreImpCasts() skips to account for the current
+ // behaviour of IgnoreParenImpCasts().
+ Expr *SubE = IgnoreImpCastsSingleStep(E);
+ if (SubE != E)
+ return SubE;
+
+ if (auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E))
+ return MTE->GetTemporaryExpr();
+
+ if (auto *NTTP = dyn_cast<SubstNonTypeTemplateParmExpr>(E))
+ return NTTP->getReplacement();
+
+ return E;
}
-Expr *Expr::IgnoreCasts() {
- Expr *E = this;
- while (true) {
- if (CastExpr *P = dyn_cast<CastExpr>(E)) {
- E = P->getSubExpr();
- continue;
- }
- if (MaterializeTemporaryExpr *Materialize
- = dyn_cast<MaterializeTemporaryExpr>(E)) {
- E = Materialize->GetTemporaryExpr();
- continue;
- }
- if (SubstNonTypeTemplateParmExpr *NTTP
- = dyn_cast<SubstNonTypeTemplateParmExpr>(E)) {
- E = NTTP->getReplacement();
- continue;
- }
- if (FullExpr *FE = dyn_cast<FullExpr>(E)) {
- E = FE->getSubExpr();
- continue;
- }
- return E;
+static Expr *IgnoreCastsSingleStep(Expr *E) {
+ if (auto *CE = dyn_cast<CastExpr>(E))
+ return CE->getSubExpr();
+
+ if (auto *FE = dyn_cast<FullExpr>(E))
+ return FE->getSubExpr();
+
+ if (auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E))
+ return MTE->GetTemporaryExpr();
+
+ if (auto *NTTP = dyn_cast<SubstNonTypeTemplateParmExpr>(E))
+ return NTTP->getReplacement();
+
+ return E;
+}
+
+static Expr *IgnoreLValueCastsSingleStep(Expr *E) {
+ // Skip what IgnoreCastsSingleStep skips, except that only
+ // lvalue-to-rvalue casts are skipped.
+ if (auto *CE = dyn_cast<CastExpr>(E))
+ if (CE->getCastKind() != CK_LValueToRValue)
+ return E;
+
+ return IgnoreCastsSingleStep(E);
+}
+
+static Expr *IgnoreBaseCastsSingleStep(Expr *E) {
+ if (auto *CE = dyn_cast<CastExpr>(E))
+ if (CE->getCastKind() == CK_DerivedToBase ||
+ CE->getCastKind() == CK_UncheckedDerivedToBase ||
+ CE->getCastKind() == CK_NoOp)
+ return CE->getSubExpr();
+
+ return E;
+}
+
+static Expr *IgnoreImplicitSingleStep(Expr *E) {
+ Expr *SubE = IgnoreImpCastsSingleStep(E);
+ if (SubE != E)
+ return SubE;
+
+ if (auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E))
+ return MTE->GetTemporaryExpr();
+
+ if (auto *BTE = dyn_cast<CXXBindTemporaryExpr>(E))
+ return BTE->getSubExpr();
+
+ return E;
+}
+
+static Expr *IgnoreParensSingleStep(Expr *E) {
+ if (auto *PE = dyn_cast<ParenExpr>(E))
+ return PE->getSubExpr();
+
+ if (auto *UO = dyn_cast<UnaryOperator>(E)) {
+ if (UO->getOpcode() == UO_Extension)
+ return UO->getSubExpr();
+ }
+
+ else if (auto *GSE = dyn_cast<GenericSelectionExpr>(E)) {
+ if (!GSE->isResultDependent())
+ return GSE->getResultExpr();
}
+
+ else if (auto *CE = dyn_cast<ChooseExpr>(E)) {
+ if (!CE->isConditionDependent())
+ return CE->getChosenSubExpr();
+ }
+
+ else if (auto *CE = dyn_cast<ConstantExpr>(E))
+ return CE->getSubExpr();
+
+ return E;
}
-/// IgnoreParenLValueCasts - Ignore parentheses and lvalue-to-rvalue
-/// casts. This is intended purely as a temporary workaround for code
-/// that hasn't yet been rewritten to do the right thing about those
-/// casts, and may disappear along with the last internal use.
-Expr *Expr::IgnoreParenLValueCasts() {
- Expr *E = this;
- while (true) {
- E = E->IgnoreParens();
- if (CastExpr *P = dyn_cast<CastExpr>(E)) {
- if (P->getCastKind() == CK_LValueToRValue) {
- E = P->getSubExpr();
- continue;
- }
- } else if (MaterializeTemporaryExpr *Materialize
- = dyn_cast<MaterializeTemporaryExpr>(E)) {
- E = Materialize->GetTemporaryExpr();
- continue;
- } else if (SubstNonTypeTemplateParmExpr *NTTP
- = dyn_cast<SubstNonTypeTemplateParmExpr>(E)) {
- E = NTTP->getReplacement();
- continue;
- } else if (FullExpr *FE = dyn_cast<FullExpr>(E)) {
- E = FE->getSubExpr();
- continue;
- }
- break;
+static Expr *IgnoreNoopCastsSingleStep(const ASTContext &Ctx, Expr *E) {
+ if (auto *CE = dyn_cast<CastExpr>(E)) {
+ // We ignore integer <-> casts that are of the same width, ptr<->ptr and
+ // ptr<->int casts of the same width. We also ignore all identity casts.
+ Expr *SubExpr = CE->getSubExpr();
+ bool IsIdentityCast =
+ Ctx.hasSameUnqualifiedType(E->getType(), SubExpr->getType());
+ bool IsSameWidthCast =
+ (E->getType()->isPointerType() || E->getType()->isIntegralType(Ctx)) &&
+ (SubExpr->getType()->isPointerType() ||
+ SubExpr->getType()->isIntegralType(Ctx)) &&
+ (Ctx.getTypeSize(E->getType()) == Ctx.getTypeSize(SubExpr->getType()));
+
+ if (IsIdentityCast || IsSameWidthCast)
+ return SubExpr;
}
+
+ else if (auto *NTTP = dyn_cast<SubstNonTypeTemplateParmExpr>(E))
+ return NTTP->getReplacement();
+
return E;
}
-Expr *Expr::ignoreParenBaseCasts() {
- Expr *E = this;
- while (true) {
- E = E->IgnoreParens();
- if (CastExpr *CE = dyn_cast<CastExpr>(E)) {
- if (CE->getCastKind() == CK_DerivedToBase ||
- CE->getCastKind() == CK_UncheckedDerivedToBase ||
- CE->getCastKind() == CK_NoOp) {
- E = CE->getSubExpr();
- continue;
- }
- }
+static Expr *IgnoreExprNodesImpl(Expr *E) { return E; }
+template <typename FnTy, typename... FnTys>
+static Expr *IgnoreExprNodesImpl(Expr *E, FnTy &&Fn, FnTys &&... Fns) {
+ return IgnoreExprNodesImpl(Fn(E), std::forward<FnTys>(Fns)...);
+}
- return E;
+/// Given an expression E and functions Fn_1,...,Fn_n : Expr * -> Expr *,
+/// Recursively apply each of the functions to E until reaching a fixed point.
+/// Note that a null E is valid; in this case nothing is done.
+template <typename... FnTys>
+static Expr *IgnoreExprNodes(Expr *E, FnTys &&... Fns) {
+ Expr *LastE = nullptr;
+ while (E != LastE) {
+ LastE = E;
+ E = IgnoreExprNodesImpl(E, std::forward<FnTys>(Fns)...);
}
+ return E;
+}
+
+Expr *Expr::IgnoreImpCasts() {
+ return IgnoreExprNodes(this, IgnoreImpCastsSingleStep);
+}
+
+Expr *Expr::IgnoreCasts() {
+ return IgnoreExprNodes(this, IgnoreCastsSingleStep);
+}
+
+Expr *Expr::IgnoreImplicit() {
+ return IgnoreExprNodes(this, IgnoreImplicitSingleStep);
+}
+
+Expr *Expr::IgnoreParens() {
+ return IgnoreExprNodes(this, IgnoreParensSingleStep);
}
Expr *Expr::IgnoreParenImpCasts() {
- Expr *E = this;
- while (true) {
- E = E->IgnoreParens();
- if (ImplicitCastExpr *P = dyn_cast<ImplicitCastExpr>(E)) {
- E = P->getSubExpr();
- continue;
- }
- if (MaterializeTemporaryExpr *Materialize
- = dyn_cast<MaterializeTemporaryExpr>(E)) {
- E = Materialize->GetTemporaryExpr();
- continue;
- }
- if (SubstNonTypeTemplateParmExpr *NTTP
- = dyn_cast<SubstNonTypeTemplateParmExpr>(E)) {
- E = NTTP->getReplacement();
- continue;
- }
- return E;
- }
+ return IgnoreExprNodes(this, IgnoreParensSingleStep,
+ IgnoreImpCastsExtraSingleStep);
+}
+
+Expr *Expr::IgnoreParenCasts() {
+ return IgnoreExprNodes(this, IgnoreParensSingleStep, IgnoreCastsSingleStep);
}
Expr *Expr::IgnoreConversionOperator() {
- if (CXXMemberCallExpr *MCE = dyn_cast<CXXMemberCallExpr>(this)) {
+ if (auto *MCE = dyn_cast<CXXMemberCallExpr>(this)) {
if (MCE->getMethodDecl() && isa<CXXConversionDecl>(MCE->getMethodDecl()))
return MCE->getImplicitObjectArgument();
}
return this;
}
-/// IgnoreParenNoopCasts - Ignore parentheses and casts that do not change the
-/// value (including ptr->int casts of the same size). Strip off any
-/// ParenExpr or CastExprs, returning their operand.
-Expr *Expr::IgnoreParenNoopCasts(ASTContext &Ctx) {
- Expr *E = this;
- while (true) {
- E = E->IgnoreParens();
-
- if (CastExpr *P = dyn_cast<CastExpr>(E)) {
- // We ignore integer <-> casts that are of the same width, ptr<->ptr and
- // ptr<->int casts of the same width. We also ignore all identity casts.
- Expr *SE = P->getSubExpr();
-
- if (Ctx.hasSameUnqualifiedType(E->getType(), SE->getType())) {
- E = SE;
- continue;
- }
-
- if ((E->getType()->isPointerType() ||
- E->getType()->isIntegralType(Ctx)) &&
- (SE->getType()->isPointerType() ||
- SE->getType()->isIntegralType(Ctx)) &&
- Ctx.getTypeSize(E->getType()) == Ctx.getTypeSize(SE->getType())) {
- E = SE;
- continue;
- }
- }
+Expr *Expr::IgnoreParenLValueCasts() {
+ return IgnoreExprNodes(this, IgnoreParensSingleStep,
+ IgnoreLValueCastsSingleStep);
+}
- if (SubstNonTypeTemplateParmExpr *NTTP
- = dyn_cast<SubstNonTypeTemplateParmExpr>(E)) {
- E = NTTP->getReplacement();
- continue;
- }
+Expr *Expr::ignoreParenBaseCasts() {
+ return IgnoreExprNodes(this, IgnoreParensSingleStep,
+ IgnoreBaseCastsSingleStep);
+}
- return E;
- }
+Expr *Expr::IgnoreParenNoopCasts(const ASTContext &Ctx) {
+ return IgnoreExprNodes(this, IgnoreParensSingleStep, [&Ctx](Expr *E) {
+ return IgnoreNoopCastsSingleStep(Ctx, E);
+ });
}
bool Expr::isDefaultArgument() const {
@@ -2893,6 +3107,9 @@ bool Expr::hasAnyTypeDependentArguments(ArrayRef<Expr *> Exprs) {
bool Expr::isConstantInitializer(ASTContext &Ctx, bool IsForRef,
const Expr **Culprit) const {
+ assert(!isValueDependent() &&
+ "Expression evaluator can't be called on a dependent expression.");
+
// This function is attempting whether an expression is an initializer
// which can be evaluated at compile-time. It very closely parallels
// ConstExprEmitter in CGExprConstant.cpp; if they don't match, it
@@ -2952,6 +3169,7 @@ bool Expr::isConstantInitializer(ASTContext &Ctx, bool IsForRef,
}
case InitListExprClass: {
const InitListExpr *ILE = cast<InitListExpr>(this);
+ assert(ILE->isSemanticForm() && "InitListExpr must be in semantic form");
if (ILE->getType()->isArrayType()) {
unsigned numInits = ILE->getNumInits();
for (unsigned i = 0; i < numInits; i++) {
@@ -3160,6 +3378,7 @@ bool Expr::HasSideEffects(const ASTContext &Ctx,
case ObjCAvailabilityCheckExprClass:
case CXXUuidofExprClass:
case OpaqueValueExprClass:
+ case SourceLocExprClass:
// These never have a side-effect.
return false;
@@ -3288,7 +3507,8 @@ bool Expr::HasSideEffects(const ASTContext &Ctx,
case CXXStaticCastExprClass:
case CXXReinterpretCastExprClass:
case CXXConstCastExprClass:
- case CXXFunctionalCastExprClass: {
+ case CXXFunctionalCastExprClass:
+ case BuiltinBitCastExprClass: {
// While volatile reads are side-effecting in both C and C++, we treat them
// as having possible (not definite) side-effects. This allows idiomatic
// code to behave without warning, such as sizeof(*v) for a volatile-
@@ -3775,55 +3995,95 @@ void ShuffleVectorExpr::setExprs(const ASTContext &C, ArrayRef<Expr *> Exprs) {
memcpy(SubExprs, Exprs.data(), sizeof(Expr *) * Exprs.size());
}
-GenericSelectionExpr::GenericSelectionExpr(const ASTContext &Context,
- SourceLocation GenericLoc, Expr *ControllingExpr,
- ArrayRef<TypeSourceInfo*> AssocTypes,
- ArrayRef<Expr*> AssocExprs,
- SourceLocation DefaultLoc,
- SourceLocation RParenLoc,
- bool ContainsUnexpandedParameterPack,
- unsigned ResultIndex)
- : Expr(GenericSelectionExprClass,
- AssocExprs[ResultIndex]->getType(),
- AssocExprs[ResultIndex]->getValueKind(),
- AssocExprs[ResultIndex]->getObjectKind(),
- AssocExprs[ResultIndex]->isTypeDependent(),
- AssocExprs[ResultIndex]->isValueDependent(),
- AssocExprs[ResultIndex]->isInstantiationDependent(),
- ContainsUnexpandedParameterPack),
- AssocTypes(new (Context) TypeSourceInfo*[AssocTypes.size()]),
- SubExprs(new (Context) Stmt*[END_EXPR+AssocExprs.size()]),
- NumAssocs(AssocExprs.size()), ResultIndex(ResultIndex),
- GenericLoc(GenericLoc), DefaultLoc(DefaultLoc), RParenLoc(RParenLoc) {
- SubExprs[CONTROLLING] = ControllingExpr;
- assert(AssocTypes.size() == AssocExprs.size());
- std::copy(AssocTypes.begin(), AssocTypes.end(), this->AssocTypes);
- std::copy(AssocExprs.begin(), AssocExprs.end(), SubExprs+END_EXPR);
-}
-
-GenericSelectionExpr::GenericSelectionExpr(const ASTContext &Context,
- SourceLocation GenericLoc, Expr *ControllingExpr,
- ArrayRef<TypeSourceInfo*> AssocTypes,
- ArrayRef<Expr*> AssocExprs,
- SourceLocation DefaultLoc,
- SourceLocation RParenLoc,
- bool ContainsUnexpandedParameterPack)
- : Expr(GenericSelectionExprClass,
- Context.DependentTy,
- VK_RValue,
- OK_Ordinary,
- /*isTypeDependent=*/true,
- /*isValueDependent=*/true,
- /*isInstantiationDependent=*/true,
- ContainsUnexpandedParameterPack),
- AssocTypes(new (Context) TypeSourceInfo*[AssocTypes.size()]),
- SubExprs(new (Context) Stmt*[END_EXPR+AssocExprs.size()]),
- NumAssocs(AssocExprs.size()), ResultIndex(-1U), GenericLoc(GenericLoc),
- DefaultLoc(DefaultLoc), RParenLoc(RParenLoc) {
- SubExprs[CONTROLLING] = ControllingExpr;
- assert(AssocTypes.size() == AssocExprs.size());
- std::copy(AssocTypes.begin(), AssocTypes.end(), this->AssocTypes);
- std::copy(AssocExprs.begin(), AssocExprs.end(), SubExprs+END_EXPR);
+GenericSelectionExpr::GenericSelectionExpr(
+ const ASTContext &, SourceLocation GenericLoc, Expr *ControllingExpr,
+ ArrayRef<TypeSourceInfo *> AssocTypes, ArrayRef<Expr *> AssocExprs,
+ SourceLocation DefaultLoc, SourceLocation RParenLoc,
+ bool ContainsUnexpandedParameterPack, unsigned ResultIndex)
+ : Expr(GenericSelectionExprClass, AssocExprs[ResultIndex]->getType(),
+ AssocExprs[ResultIndex]->getValueKind(),
+ AssocExprs[ResultIndex]->getObjectKind(),
+ AssocExprs[ResultIndex]->isTypeDependent(),
+ AssocExprs[ResultIndex]->isValueDependent(),
+ AssocExprs[ResultIndex]->isInstantiationDependent(),
+ ContainsUnexpandedParameterPack),
+ NumAssocs(AssocExprs.size()), ResultIndex(ResultIndex),
+ DefaultLoc(DefaultLoc), RParenLoc(RParenLoc) {
+ assert(AssocTypes.size() == AssocExprs.size() &&
+ "Must have the same number of association expressions"
+ " and TypeSourceInfo!");
+ assert(ResultIndex < NumAssocs && "ResultIndex is out-of-bounds!");
+
+ GenericSelectionExprBits.GenericLoc = GenericLoc;
+ getTrailingObjects<Stmt *>()[ControllingIndex] = ControllingExpr;
+ std::copy(AssocExprs.begin(), AssocExprs.end(),
+ getTrailingObjects<Stmt *>() + AssocExprStartIndex);
+ std::copy(AssocTypes.begin(), AssocTypes.end(),
+ getTrailingObjects<TypeSourceInfo *>());
+}
+
+GenericSelectionExpr::GenericSelectionExpr(
+ const ASTContext &Context, SourceLocation GenericLoc, Expr *ControllingExpr,
+ ArrayRef<TypeSourceInfo *> AssocTypes, ArrayRef<Expr *> AssocExprs,
+ SourceLocation DefaultLoc, SourceLocation RParenLoc,
+ bool ContainsUnexpandedParameterPack)
+ : Expr(GenericSelectionExprClass, Context.DependentTy, VK_RValue,
+ OK_Ordinary,
+ /*isTypeDependent=*/true,
+ /*isValueDependent=*/true,
+ /*isInstantiationDependent=*/true, ContainsUnexpandedParameterPack),
+ NumAssocs(AssocExprs.size()), ResultIndex(ResultDependentIndex),
+ DefaultLoc(DefaultLoc), RParenLoc(RParenLoc) {
+ assert(AssocTypes.size() == AssocExprs.size() &&
+ "Must have the same number of association expressions"
+ " and TypeSourceInfo!");
+
+ GenericSelectionExprBits.GenericLoc = GenericLoc;
+ getTrailingObjects<Stmt *>()[ControllingIndex] = ControllingExpr;
+ std::copy(AssocExprs.begin(), AssocExprs.end(),
+ getTrailingObjects<Stmt *>() + AssocExprStartIndex);
+ std::copy(AssocTypes.begin(), AssocTypes.end(),
+ getTrailingObjects<TypeSourceInfo *>());
+}
+
+GenericSelectionExpr::GenericSelectionExpr(EmptyShell Empty, unsigned NumAssocs)
+ : Expr(GenericSelectionExprClass, Empty), NumAssocs(NumAssocs) {}
+
+GenericSelectionExpr *GenericSelectionExpr::Create(
+ const ASTContext &Context, SourceLocation GenericLoc, Expr *ControllingExpr,
+ ArrayRef<TypeSourceInfo *> AssocTypes, ArrayRef<Expr *> AssocExprs,
+ SourceLocation DefaultLoc, SourceLocation RParenLoc,
+ bool ContainsUnexpandedParameterPack, unsigned ResultIndex) {
+ unsigned NumAssocs = AssocExprs.size();
+ void *Mem = Context.Allocate(
+ totalSizeToAlloc<Stmt *, TypeSourceInfo *>(1 + NumAssocs, NumAssocs),
+ alignof(GenericSelectionExpr));
+ return new (Mem) GenericSelectionExpr(
+ Context, GenericLoc, ControllingExpr, AssocTypes, AssocExprs, DefaultLoc,
+ RParenLoc, ContainsUnexpandedParameterPack, ResultIndex);
+}
+
+GenericSelectionExpr *GenericSelectionExpr::Create(
+ const ASTContext &Context, SourceLocation GenericLoc, Expr *ControllingExpr,
+ ArrayRef<TypeSourceInfo *> AssocTypes, ArrayRef<Expr *> AssocExprs,
+ SourceLocation DefaultLoc, SourceLocation RParenLoc,
+ bool ContainsUnexpandedParameterPack) {
+ unsigned NumAssocs = AssocExprs.size();
+ void *Mem = Context.Allocate(
+ totalSizeToAlloc<Stmt *, TypeSourceInfo *>(1 + NumAssocs, NumAssocs),
+ alignof(GenericSelectionExpr));
+ return new (Mem) GenericSelectionExpr(
+ Context, GenericLoc, ControllingExpr, AssocTypes, AssocExprs, DefaultLoc,
+ RParenLoc, ContainsUnexpandedParameterPack);
+}
+
+GenericSelectionExpr *
+GenericSelectionExpr::CreateEmpty(const ASTContext &Context,
+ unsigned NumAssocs) {
+ void *Mem = Context.Allocate(
+ totalSizeToAlloc<Stmt *, TypeSourceInfo *>(1 + NumAssocs, NumAssocs),
+ alignof(GenericSelectionExpr));
+ return new (Mem) GenericSelectionExpr(EmptyShell(), NumAssocs);
}
//===----------------------------------------------------------------------===//
diff --git a/lib/AST/ExprCXX.cpp b/lib/AST/ExprCXX.cpp
index 3891f45c7fc2..b30f785ba8f5 100644
--- a/lib/AST/ExprCXX.cpp
+++ b/lib/AST/ExprCXX.cpp
@@ -1,9 +1,8 @@
//===- ExprCXX.cpp - (C++) Expression AST Node Implementation -------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -98,7 +97,8 @@ CXXNewExpr::CXXNewExpr(bool IsGlobalNew, FunctionDecl *OperatorNew,
FunctionDecl *OperatorDelete, bool ShouldPassAlignment,
bool UsualArrayDeleteWantsSize,
ArrayRef<Expr *> PlacementArgs, SourceRange TypeIdParens,
- Expr *ArraySize, InitializationStyle InitializationStyle,
+ Optional<Expr *> ArraySize,
+ InitializationStyle InitializationStyle,
Expr *Initializer, QualType Ty,
TypeSourceInfo *AllocatedTypeInfo, SourceRange Range,
SourceRange DirectInitRange)
@@ -113,7 +113,7 @@ CXXNewExpr::CXXNewExpr(bool IsGlobalNew, FunctionDecl *OperatorNew,
"Only NoInit can have no initializer!");
CXXNewExprBits.IsGlobalNew = IsGlobalNew;
- CXXNewExprBits.IsArray = ArraySize != nullptr;
+ CXXNewExprBits.IsArray = ArraySize.hasValue();
CXXNewExprBits.ShouldPassAlignment = ShouldPassAlignment;
CXXNewExprBits.UsualArrayDeleteWantsSize = UsualArrayDeleteWantsSize;
CXXNewExprBits.StoredInitializationStyle =
@@ -123,12 +123,14 @@ CXXNewExpr::CXXNewExpr(bool IsGlobalNew, FunctionDecl *OperatorNew,
CXXNewExprBits.NumPlacementArgs = PlacementArgs.size();
if (ArraySize) {
- if (ArraySize->isInstantiationDependent())
- ExprBits.InstantiationDependent = true;
- if (ArraySize->containsUnexpandedParameterPack())
- ExprBits.ContainsUnexpandedParameterPack = true;
+ if (Expr *SizeExpr = *ArraySize) {
+ if (SizeExpr->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
+ if (SizeExpr->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+ }
- getTrailingObjects<Stmt *>()[arraySizeOffset()] = ArraySize;
+ getTrailingObjects<Stmt *>()[arraySizeOffset()] = *ArraySize;
}
if (Initializer) {
@@ -180,11 +182,11 @@ CXXNewExpr::Create(const ASTContext &Ctx, bool IsGlobalNew,
FunctionDecl *OperatorNew, FunctionDecl *OperatorDelete,
bool ShouldPassAlignment, bool UsualArrayDeleteWantsSize,
ArrayRef<Expr *> PlacementArgs, SourceRange TypeIdParens,
- Expr *ArraySize, InitializationStyle InitializationStyle,
- Expr *Initializer, QualType Ty,
- TypeSourceInfo *AllocatedTypeInfo, SourceRange Range,
- SourceRange DirectInitRange) {
- bool IsArray = ArraySize != nullptr;
+ Optional<Expr *> ArraySize,
+ InitializationStyle InitializationStyle, Expr *Initializer,
+ QualType Ty, TypeSourceInfo *AllocatedTypeInfo,
+ SourceRange Range, SourceRange DirectInitRange) {
+ bool IsArray = ArraySize.hasValue();
bool HasInit = Initializer != nullptr;
unsigned NumPlacementArgs = PlacementArgs.size();
bool IsParenTypeId = TypeIdParens.isValid();
@@ -905,13 +907,14 @@ const IdentifierInfo *UserDefinedLiteral::getUDSuffix() const {
}
CXXDefaultInitExpr::CXXDefaultInitExpr(const ASTContext &Ctx, SourceLocation Loc,
- FieldDecl *Field, QualType Ty)
+ FieldDecl *Field, QualType Ty,
+ DeclContext *UsedContext)
: Expr(CXXDefaultInitExprClass, Ty.getNonLValueExprType(Ctx),
Ty->isLValueReferenceType() ? VK_LValue : Ty->isRValueReferenceType()
? VK_XValue
: VK_RValue,
/*FIXME*/ OK_Ordinary, false, false, false, false),
- Field(Field) {
+ Field(Field), UsedContext(UsedContext) {
CXXDefaultInitExprBits.Loc = Loc;
assert(Field->hasInClassInitializer());
}
@@ -1205,7 +1208,11 @@ CXXMethodDecl *LambdaExpr::getCallOperator() const {
TemplateParameterList *LambdaExpr::getTemplateParameterList() const {
CXXRecordDecl *Record = getLambdaClass();
return Record->getGenericLambdaTemplateParameterList();
+}
+ArrayRef<NamedDecl *> LambdaExpr::getExplicitTemplateParameters() const {
+ const CXXRecordDecl *Record = getLambdaClass();
+ return Record->getLambdaExplicitTemplateParameters();
}
CompoundStmt *LambdaExpr::getBody() const {
@@ -1534,30 +1541,30 @@ TemplateArgument SubstNonTypeTemplateParmPackExpr::getArgumentPack() const {
return TemplateArgument(llvm::makeArrayRef(Arguments, NumArguments));
}
-FunctionParmPackExpr::FunctionParmPackExpr(QualType T, ParmVarDecl *ParamPack,
+FunctionParmPackExpr::FunctionParmPackExpr(QualType T, VarDecl *ParamPack,
SourceLocation NameLoc,
unsigned NumParams,
- ParmVarDecl *const *Params)
+ VarDecl *const *Params)
: Expr(FunctionParmPackExprClass, T, VK_LValue, OK_Ordinary, true, true,
true, true),
ParamPack(ParamPack), NameLoc(NameLoc), NumParameters(NumParams) {
if (Params)
std::uninitialized_copy(Params, Params + NumParams,
- getTrailingObjects<ParmVarDecl *>());
+ getTrailingObjects<VarDecl *>());
}
FunctionParmPackExpr *
FunctionParmPackExpr::Create(const ASTContext &Context, QualType T,
- ParmVarDecl *ParamPack, SourceLocation NameLoc,
- ArrayRef<ParmVarDecl *> Params) {
- return new (Context.Allocate(totalSizeToAlloc<ParmVarDecl *>(Params.size())))
+ VarDecl *ParamPack, SourceLocation NameLoc,
+ ArrayRef<VarDecl *> Params) {
+ return new (Context.Allocate(totalSizeToAlloc<VarDecl *>(Params.size())))
FunctionParmPackExpr(T, ParamPack, NameLoc, Params.size(), Params.data());
}
FunctionParmPackExpr *
FunctionParmPackExpr::CreateEmpty(const ASTContext &Context,
unsigned NumParams) {
- return new (Context.Allocate(totalSizeToAlloc<ParmVarDecl *>(NumParams)))
+ return new (Context.Allocate(totalSizeToAlloc<VarDecl *>(NumParams)))
FunctionParmPackExpr(QualType(), nullptr, SourceLocation(), 0, nullptr);
}
diff --git a/lib/AST/ExprClassification.cpp b/lib/AST/ExprClassification.cpp
index e1d6a1c9edcc..c61ee703aca8 100644
--- a/lib/AST/ExprClassification.cpp
+++ b/lib/AST/ExprClassification.cpp
@@ -1,9 +1,8 @@
//===- ExprClassification.cpp - Expression AST Node Implementation --------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -192,6 +191,7 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
case Expr::ArrayInitIndexExprClass:
case Expr::NoInitExprClass:
case Expr::DesignatedInitUpdateExprClass:
+ case Expr::SourceLocExprClass:
return Cl::CL_PRValue;
case Expr::ConstantExprClass:
@@ -343,6 +343,7 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
case Expr::CXXReinterpretCastExprClass:
case Expr::CXXConstCastExprClass:
case Expr::ObjCBridgedCastExprClass:
+ case Expr::BuiltinBitCastExprClass:
// Only in C++ can casts be interesting at all.
if (!Lang.CPlusPlus) return Cl::CL_PRValue;
return ClassifyUnnamed(Ctx, cast<ExplicitCastExpr>(E)->getTypeAsWritten());
diff --git a/lib/AST/ExprConstant.cpp b/lib/AST/ExprConstant.cpp
index da093ff22c12..f01b42e7ff76 100644
--- a/lib/AST/ExprConstant.cpp
+++ b/lib/AST/ExprConstant.cpp
@@ -1,9 +1,8 @@
//===--- ExprConstant.cpp - Expression Constant Evaluator -----------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -38,13 +37,18 @@
#include "clang/AST/ASTDiagnostic.h"
#include "clang/AST/ASTLambda.h"
#include "clang/AST/CharUnits.h"
+#include "clang/AST/CurrentSourceLocExprScope.h"
+#include "clang/AST/CXXInheritance.h"
#include "clang/AST/Expr.h"
#include "clang/AST/OSLog.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/AST/TypeLoc.h"
#include "clang/Basic/Builtins.h"
+#include "clang/Basic/FixedPoint.h"
#include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallBitVector.h"
#include "llvm/Support/SaveAndRestore.h"
#include "llvm/Support/raw_ostream.h"
#include <cstring>
@@ -53,8 +57,10 @@
#define DEBUG_TYPE "exprconstant"
using namespace clang;
+using llvm::APInt;
using llvm::APSInt;
using llvm::APFloat;
+using llvm::Optional;
static bool IsGlobalLValue(APValue::LValueBase B);
@@ -63,6 +69,9 @@ namespace {
struct CallStackFrame;
struct EvalInfo;
+ using SourceLocExprScopeGuard =
+ CurrentSourceLocExprScope::SourceLocExprScopeGuard;
+
static QualType getType(APValue::LValueBase B) {
if (!B) return QualType();
if (const ValueDecl *D = B.dyn_cast<const ValueDecl*>()) {
@@ -82,6 +91,9 @@ namespace {
return D->getType();
}
+ if (B.is<TypeInfoLValue>())
+ return B.getTypeInfoType();
+
const Expr *Base = B.get<const Expr*>();
// For a materialized temporary, the type of the temporary we materialized
@@ -103,28 +115,19 @@ namespace {
}
/// Get an LValue path entry, which is known to not be an array index, as a
- /// field or base class.
- static
- APValue::BaseOrMemberType getAsBaseOrMember(APValue::LValuePathEntry E) {
- APValue::BaseOrMemberType Value;
- Value.setFromOpaqueValue(E.BaseOrMember);
- return Value;
- }
-
- /// Get an LValue path entry, which is known to not be an array index, as a
/// field declaration.
static const FieldDecl *getAsField(APValue::LValuePathEntry E) {
- return dyn_cast<FieldDecl>(getAsBaseOrMember(E).getPointer());
+ return dyn_cast_or_null<FieldDecl>(E.getAsBaseOrMember().getPointer());
}
/// Get an LValue path entry, which is known to not be an array index, as a
/// base class declaration.
static const CXXRecordDecl *getAsBaseClass(APValue::LValuePathEntry E) {
- return dyn_cast<CXXRecordDecl>(getAsBaseOrMember(E).getPointer());
+ return dyn_cast_or_null<CXXRecordDecl>(E.getAsBaseOrMember().getPointer());
}
/// Determine whether this LValue path entry for a base class names a virtual
/// base class.
static bool isVirtualBaseClass(APValue::LValuePathEntry E) {
- return getAsBaseOrMember(E).getInt();
+ return E.getAsBaseOrMember().getInt();
}
/// Given a CallExpr, try to get the alloc_size attribute. May return null.
@@ -222,7 +225,7 @@ namespace {
// The order of this enum is important for diagnostics.
enum CheckSubobjectKind {
CSK_Base, CSK_Derived, CSK_Field, CSK_ArrayToPointer, CSK_ArrayIndex,
- CSK_This, CSK_Real, CSK_Imag
+ CSK_Real, CSK_Imag
};
/// A path from a glvalue to a subobject of that glvalue.
@@ -291,6 +294,27 @@ namespace {
}
}
+ void truncate(ASTContext &Ctx, APValue::LValueBase Base,
+ unsigned NewLength) {
+ if (Invalid)
+ return;
+
+ assert(Base && "cannot truncate path for null pointer");
+ assert(NewLength <= Entries.size() && "not a truncation");
+
+ if (NewLength == Entries.size())
+ return;
+ Entries.resize(NewLength);
+
+ bool IsArray = false;
+ bool FirstIsUnsizedArray = false;
+ MostDerivedPathLength = findMostDerivedSubobject(
+ Ctx, Base, Entries, MostDerivedArraySize, MostDerivedType, IsArray,
+ FirstIsUnsizedArray);
+ MostDerivedIsArrayElement = IsArray;
+ FirstEntryIsAnUnsizedArray = FirstIsUnsizedArray;
+ }
+
void setInvalid() {
Invalid = true;
Entries.clear();
@@ -316,7 +340,8 @@ namespace {
if (IsOnePastTheEnd)
return true;
if (!isMostDerivedAnUnsizedArray() && MostDerivedIsArrayElement &&
- Entries[MostDerivedPathLength - 1].ArrayIndex == MostDerivedArraySize)
+ Entries[MostDerivedPathLength - 1].getAsArrayIndex() ==
+ MostDerivedArraySize)
return true;
return false;
}
@@ -333,8 +358,8 @@ namespace {
// an array of length one with the type of the object as its element type.
bool IsArray = MostDerivedPathLength == Entries.size() &&
MostDerivedIsArrayElement;
- uint64_t ArrayIndex =
- IsArray ? Entries.back().ArrayIndex : (uint64_t)IsOnePastTheEnd;
+ uint64_t ArrayIndex = IsArray ? Entries.back().getAsArrayIndex()
+ : (uint64_t)IsOnePastTheEnd;
uint64_t ArraySize =
IsArray ? getMostDerivedArraySize() : (uint64_t)1;
return {ArrayIndex, ArraySize - ArrayIndex};
@@ -360,9 +385,7 @@ namespace {
/// Update this designator to refer to the first element within this array.
void addArrayUnchecked(const ConstantArrayType *CAT) {
- PathEntry Entry;
- Entry.ArrayIndex = 0;
- Entries.push_back(Entry);
+ Entries.push_back(PathEntry::ArrayIndex(0));
// This is a most-derived object.
MostDerivedType = CAT->getElementType();
@@ -373,9 +396,7 @@ namespace {
/// Update this designator to refer to the first element within the array of
/// elements of type T. This is an array of unknown size.
void addUnsizedArrayUnchecked(QualType ElemTy) {
- PathEntry Entry;
- Entry.ArrayIndex = 0;
- Entries.push_back(Entry);
+ Entries.push_back(PathEntry::ArrayIndex(0));
MostDerivedType = ElemTy;
MostDerivedIsArrayElement = true;
@@ -388,10 +409,7 @@ namespace {
/// Update this designator to refer to the given base or member of this
/// object.
void addDeclUnchecked(const Decl *D, bool Virtual = false) {
- PathEntry Entry;
- APValue::BaseOrMemberType Value(D, Virtual);
- Entry.BaseOrMember = Value.getOpaqueValue();
- Entries.push_back(Entry);
+ Entries.push_back(APValue::BaseOrMemberType(D, Virtual));
// If this isn't a base class, it's a new most-derived object.
if (const FieldDecl *FD = dyn_cast<FieldDecl>(D)) {
@@ -403,9 +421,7 @@ namespace {
}
/// Update this designator to refer to the given complex component.
void addComplexUnchecked(QualType EltTy, bool Imag) {
- PathEntry Entry;
- Entry.ArrayIndex = Imag;
- Entries.push_back(Entry);
+ Entries.push_back(PathEntry::ArrayIndex(Imag));
// This is technically a most-derived object, though in practice this
// is unlikely to matter.
@@ -426,7 +442,8 @@ namespace {
// Can't verify -- trust that the user is doing the right thing (or if
// not, trust that the caller will catch the bad behavior).
// FIXME: Should we reject if this overflows, at least?
- Entries.back().ArrayIndex += TruncatedN;
+ Entries.back() = PathEntry::ArrayIndex(
+ Entries.back().getAsArrayIndex() + TruncatedN);
return;
}
@@ -435,8 +452,8 @@ namespace {
// an array of length one with the type of the object as its element type.
bool IsArray = MostDerivedPathLength == Entries.size() &&
MostDerivedIsArrayElement;
- uint64_t ArrayIndex =
- IsArray ? Entries.back().ArrayIndex : (uint64_t)IsOnePastTheEnd;
+ uint64_t ArrayIndex = IsArray ? Entries.back().getAsArrayIndex()
+ : (uint64_t)IsOnePastTheEnd;
uint64_t ArraySize =
IsArray ? getMostDerivedArraySize() : (uint64_t)1;
@@ -456,7 +473,7 @@ namespace {
"bounds check succeeded for out-of-bounds index");
if (IsArray)
- Entries.back().ArrayIndex = ArrayIndex;
+ Entries.back() = PathEntry::ArrayIndex(ArrayIndex);
else
IsOnePastTheEnd = (ArrayIndex != 0);
}
@@ -479,6 +496,10 @@ namespace {
/// parameters' function scope indices.
APValue *Arguments;
+ /// Source location information about the default argument or default
+ /// initializer expression we're evaluating, if any.
+ CurrentSourceLocExprScope CurSourceLocExprScope;
+
// Note that we intentionally use std::map here so that references to
// values are stable.
typedef std::pair<const void *, unsigned> MapKeyTy;
@@ -613,6 +634,15 @@ namespace {
}
return *this;
}
+
+ OptionalDiagnostic &operator<<(const APFixedPoint &FX) {
+ if (Diag) {
+ SmallVector<char, 32> Buffer;
+ FX.toString(Buffer);
+ *Diag << StringRef(Buffer.data(), Buffer.size());
+ }
+ return *this;
+ }
};
/// A cleanup, and a flag indicating whether it is lifetime-extended.
@@ -629,6 +659,40 @@ namespace {
}
};
+ /// A reference to an object whose construction we are currently evaluating.
+ struct ObjectUnderConstruction {
+ APValue::LValueBase Base;
+ ArrayRef<APValue::LValuePathEntry> Path;
+ friend bool operator==(const ObjectUnderConstruction &LHS,
+ const ObjectUnderConstruction &RHS) {
+ return LHS.Base == RHS.Base && LHS.Path == RHS.Path;
+ }
+ friend llvm::hash_code hash_value(const ObjectUnderConstruction &Obj) {
+ return llvm::hash_combine(Obj.Base, Obj.Path);
+ }
+ };
+ enum class ConstructionPhase { None, Bases, AfterBases };
+}
+
+namespace llvm {
+template<> struct DenseMapInfo<ObjectUnderConstruction> {
+ using Base = DenseMapInfo<APValue::LValueBase>;
+ static ObjectUnderConstruction getEmptyKey() {
+ return {Base::getEmptyKey(), {}}; }
+ static ObjectUnderConstruction getTombstoneKey() {
+ return {Base::getTombstoneKey(), {}};
+ }
+ static unsigned getHashValue(const ObjectUnderConstruction &Object) {
+ return hash_value(Object);
+ }
+ static bool isEqual(const ObjectUnderConstruction &LHS,
+ const ObjectUnderConstruction &RHS) {
+ return LHS == RHS;
+ }
+};
+}
+
+namespace {
/// EvalInfo - This is a private struct used by the evaluator to capture
/// information about a subexpression as it is folded. It retains information
/// about the AST context, but also maintains information about the folded
@@ -679,34 +743,41 @@ namespace {
/// declaration whose initializer is being evaluated, if any.
APValue *EvaluatingDeclValue;
- /// EvaluatingObject - Pair of the AST node that an lvalue represents and
- /// the call index that that lvalue was allocated in.
- typedef std::pair<APValue::LValueBase, std::pair<unsigned, unsigned>>
- EvaluatingObject;
-
- /// EvaluatingConstructors - Set of objects that are currently being
- /// constructed.
- llvm::DenseSet<EvaluatingObject> EvaluatingConstructors;
+ /// Set of objects that are currently being constructed.
+ llvm::DenseMap<ObjectUnderConstruction, ConstructionPhase>
+ ObjectsUnderConstruction;
struct EvaluatingConstructorRAII {
EvalInfo &EI;
- EvaluatingObject Object;
+ ObjectUnderConstruction Object;
bool DidInsert;
- EvaluatingConstructorRAII(EvalInfo &EI, EvaluatingObject Object)
+ EvaluatingConstructorRAII(EvalInfo &EI, ObjectUnderConstruction Object,
+ bool HasBases)
: EI(EI), Object(Object) {
- DidInsert = EI.EvaluatingConstructors.insert(Object).second;
+ DidInsert =
+ EI.ObjectsUnderConstruction
+ .insert({Object, HasBases ? ConstructionPhase::Bases
+ : ConstructionPhase::AfterBases})
+ .second;
+ }
+ void finishedConstructingBases() {
+ EI.ObjectsUnderConstruction[Object] = ConstructionPhase::AfterBases;
}
~EvaluatingConstructorRAII() {
- if (DidInsert) EI.EvaluatingConstructors.erase(Object);
+ if (DidInsert) EI.ObjectsUnderConstruction.erase(Object);
}
};
- bool isEvaluatingConstructor(APValue::LValueBase Decl, unsigned CallIndex,
- unsigned Version) {
- return EvaluatingConstructors.count(
- EvaluatingObject(Decl, {CallIndex, Version}));
+ ConstructionPhase
+ isEvaluatingConstructor(APValue::LValueBase Base,
+ ArrayRef<APValue::LValuePathEntry> Path) {
+ return ObjectsUnderConstruction.lookup({Base, Path});
}
+ /// If we're currently speculatively evaluating, the outermost call stack
+ /// depth at which we can mutate state, otherwise 0.
+ unsigned SpeculativeEvaluationDepth = 0;
+
/// The current array initialization index, if we're performing array
/// initialization.
uint64_t ArrayInitIndex = -1;
@@ -719,9 +790,6 @@ namespace {
/// fold (not just why it's not strictly a constant expression)?
bool HasFoldFailureDiagnostic;
- /// Whether or not we're currently speculatively evaluating.
- bool IsSpeculativelyEvaluating;
-
/// Whether or not we're in a context where the front end requires a
/// constant value.
bool InConstantContext;
@@ -786,13 +854,12 @@ namespace {
BottomFrame(*this, SourceLocation(), nullptr, nullptr, nullptr),
EvaluatingDecl((const ValueDecl *)nullptr),
EvaluatingDeclValue(nullptr), HasActiveDiagnostic(false),
- HasFoldFailureDiagnostic(false), IsSpeculativelyEvaluating(false),
+ HasFoldFailureDiagnostic(false),
InConstantContext(false), EvalMode(Mode) {}
void setEvaluatingDecl(APValue::LValueBase Base, APValue &Value) {
EvaluatingDecl = Base;
EvaluatingDeclValue = &Value;
- EvaluatingConstructors.insert({Base, {0, 0}});
}
const LangOptions &getLangOpts() const { return Ctx.getLangOpts(); }
@@ -814,14 +881,20 @@ namespace {
return false;
}
- CallStackFrame *getCallFrame(unsigned CallIndex) {
- assert(CallIndex && "no call index in getCallFrame");
+ std::pair<CallStackFrame *, unsigned>
+ getCallFrameAndDepth(unsigned CallIndex) {
+ assert(CallIndex && "no call index in getCallFrameAndDepth");
// We will eventually hit BottomFrame, which has Index 1, so Frame can't
// be null in this loop.
+ unsigned Depth = CallStackDepth;
CallStackFrame *Frame = CurrentCall;
- while (Frame->Index > CallIndex)
+ while (Frame->Index > CallIndex) {
Frame = Frame->Caller;
- return (Frame->Index == CallIndex) ? Frame : nullptr;
+ --Depth;
+ }
+ if (Frame->Index == CallIndex)
+ return {Frame, Depth};
+ return {nullptr, 0};
}
bool nextStep(const Stmt *S) {
@@ -1102,12 +1175,12 @@ namespace {
class SpeculativeEvaluationRAII {
EvalInfo *Info = nullptr;
Expr::EvalStatus OldStatus;
- bool OldIsSpeculativelyEvaluating;
+ unsigned OldSpeculativeEvaluationDepth;
void moveFromAndCancel(SpeculativeEvaluationRAII &&Other) {
Info = Other.Info;
OldStatus = Other.OldStatus;
- OldIsSpeculativelyEvaluating = Other.OldIsSpeculativelyEvaluating;
+ OldSpeculativeEvaluationDepth = Other.OldSpeculativeEvaluationDepth;
Other.Info = nullptr;
}
@@ -1116,7 +1189,7 @@ namespace {
return;
Info->EvalStatus = OldStatus;
- Info->IsSpeculativelyEvaluating = OldIsSpeculativelyEvaluating;
+ Info->SpeculativeEvaluationDepth = OldSpeculativeEvaluationDepth;
}
public:
@@ -1125,9 +1198,9 @@ namespace {
SpeculativeEvaluationRAII(
EvalInfo &Info, SmallVectorImpl<PartialDiagnosticAt> *NewDiag = nullptr)
: Info(&Info), OldStatus(Info.EvalStatus),
- OldIsSpeculativelyEvaluating(Info.IsSpeculativelyEvaluating) {
+ OldSpeculativeEvaluationDepth(Info.SpeculativeEvaluationDepth) {
Info.EvalStatus.Diag = NewDiag;
- Info.IsSpeculativelyEvaluating = true;
+ Info.SpeculativeEvaluationDepth = Info.CallStackDepth + 1;
}
SpeculativeEvaluationRAII(const SpeculativeEvaluationRAII &Other) = delete;
@@ -1243,7 +1316,7 @@ APValue &CallStackFrame::createTemporary(const void *Key,
bool IsLifetimeExtended) {
unsigned Version = Info.CurrentCall->getTempVersion();
APValue &Result = Temporaries[MapKeyTy(Key, Version)];
- assert(Result.isUninit() && "temporary created multiple times");
+ assert(Result.isAbsent() && "temporary created multiple times");
Info.CleanupStack.push_back(Cleanup(&Result, IsLifetimeExtended));
return Result;
}
@@ -1290,14 +1363,40 @@ void EvalInfo::addCallStack(unsigned Limit) {
}
}
-/// Kinds of access we can perform on an object, for diagnostics.
+/// Kinds of access we can perform on an object, for diagnostics. Note that
+/// we consider a member function call to be a kind of access, even though
+/// it is not formally an access of the object, because it has (largely) the
+/// same set of semantic restrictions.
enum AccessKinds {
AK_Read,
AK_Assign,
AK_Increment,
- AK_Decrement
+ AK_Decrement,
+ AK_MemberCall,
+ AK_DynamicCast,
+ AK_TypeId,
};
+static bool isModification(AccessKinds AK) {
+ switch (AK) {
+ case AK_Read:
+ case AK_MemberCall:
+ case AK_DynamicCast:
+ case AK_TypeId:
+ return false;
+ case AK_Assign:
+ case AK_Increment:
+ case AK_Decrement:
+ return true;
+ }
+ llvm_unreachable("unknown access kind");
+}
+
+/// Is this an access per the C++ definition?
+static bool isFormalAccess(AccessKinds AK) {
+ return AK == AK_Read || isModification(AK);
+}
+
namespace {
struct ComplexValue {
private:
@@ -1613,6 +1712,14 @@ static bool EvaluateAtomic(const Expr *E, const LValue *This, APValue &Result,
EvalInfo &Info);
static bool EvaluateAsRValue(EvalInfo &Info, const Expr *E, APValue &Result);
+/// Evaluate an integer or fixed point expression into an APResult.
+static bool EvaluateFixedPointOrInteger(const Expr *E, APFixedPoint &Result,
+ EvalInfo &Info);
+
+/// Evaluate only a fixed point expression into an APResult.
+static bool EvaluateFixedPoint(const Expr *E, APFixedPoint &Result,
+ EvalInfo &Info);
+
//===----------------------------------------------------------------------===//
// Misc utilities
//===----------------------------------------------------------------------===//
@@ -1706,6 +1813,9 @@ static bool IsGlobalLValue(APValue::LValueBase B) {
return isa<FunctionDecl>(D);
}
+ if (B.is<TypeInfoLValue>())
+ return true;
+
const Expr *E = B.get<const Expr*>();
switch (E->getStmtClass()) {
default:
@@ -1723,9 +1833,10 @@ static bool IsGlobalLValue(APValue::LValueBase B) {
case Expr::PredefinedExprClass:
case Expr::ObjCStringLiteralClass:
case Expr::ObjCEncodeExprClass:
- case Expr::CXXTypeidExprClass:
case Expr::CXXUuidofExprClass:
return true;
+ case Expr::ObjCBoxedExprClass:
+ return cast<ObjCBoxedExpr>(E)->isExpressibleAsConstantInitializer();
case Expr::CallExprClass:
return IsStringLiteralCall(cast<CallExpr>(E));
// For GCC compatibility, &&label has static storage duration.
@@ -1799,9 +1910,9 @@ static void NoteLValueLocation(EvalInfo &Info, APValue::LValueBase Base) {
const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>();
if (VD)
Info.Note(VD->getLocation(), diag::note_declared_at);
- else
- Info.Note(Base.get<const Expr*>()->getExprLoc(),
- diag::note_constexpr_temporary_here);
+ else if (const Expr *E = Base.dyn_cast<const Expr*>())
+ Info.Note(E->getExprLoc(), diag::note_constexpr_temporary_here);
+ // We have no information to show for a typeid(T) object.
}
/// Check that this reference or pointer core constant expression is a valid
@@ -1938,10 +2049,13 @@ static bool CheckLiteralType(EvalInfo &Info, const Expr *E,
static bool
CheckConstantExpression(EvalInfo &Info, SourceLocation DiagLoc, QualType Type,
const APValue &Value,
- Expr::ConstExprUsage Usage = Expr::EvaluateForCodeGen) {
- if (Value.isUninit()) {
+ Expr::ConstExprUsage Usage = Expr::EvaluateForCodeGen,
+ SourceLocation SubobjectLoc = SourceLocation()) {
+ if (!Value.hasValue()) {
Info.FFDiag(DiagLoc, diag::note_constexpr_uninitialized)
<< true << Type;
+ if (SubobjectLoc.isValid())
+ Info.Note(SubobjectLoc, diag::note_constexpr_subobject_declared_here);
return false;
}
@@ -1957,18 +2071,20 @@ CheckConstantExpression(EvalInfo &Info, SourceLocation DiagLoc, QualType Type,
QualType EltTy = Type->castAsArrayTypeUnsafe()->getElementType();
for (unsigned I = 0, N = Value.getArrayInitializedElts(); I != N; ++I) {
if (!CheckConstantExpression(Info, DiagLoc, EltTy,
- Value.getArrayInitializedElt(I), Usage))
+ Value.getArrayInitializedElt(I), Usage,
+ SubobjectLoc))
return false;
}
if (!Value.hasArrayFiller())
return true;
return CheckConstantExpression(Info, DiagLoc, EltTy, Value.getArrayFiller(),
- Usage);
+ Usage, SubobjectLoc);
}
if (Value.isUnion() && Value.getUnionField()) {
return CheckConstantExpression(Info, DiagLoc,
Value.getUnionField()->getType(),
- Value.getUnionValue(), Usage);
+ Value.getUnionValue(), Usage,
+ Value.getUnionField()->getLocation());
}
if (Value.isStruct()) {
RecordDecl *RD = Type->castAs<RecordType>()->getDecl();
@@ -1976,7 +2092,8 @@ CheckConstantExpression(EvalInfo &Info, SourceLocation DiagLoc, QualType Type,
unsigned BaseIndex = 0;
for (const CXXBaseSpecifier &BS : CD->bases()) {
if (!CheckConstantExpression(Info, DiagLoc, BS.getType(),
- Value.getStructBase(BaseIndex), Usage))
+ Value.getStructBase(BaseIndex), Usage,
+ BS.getBeginLoc()))
return false;
++BaseIndex;
}
@@ -1987,7 +2104,7 @@ CheckConstantExpression(EvalInfo &Info, SourceLocation DiagLoc, QualType Type,
if (!CheckConstantExpression(Info, DiagLoc, I->getType(),
Value.getStructField(I->getFieldIndex()),
- Usage))
+ Usage, I->getLocation()))
return false;
}
}
@@ -2022,11 +2139,15 @@ static bool EvalPointerValueAsBool(const APValue &Value, bool &Result) {
static bool HandleConversionToBool(const APValue &Val, bool &Result) {
switch (Val.getKind()) {
- case APValue::Uninitialized:
+ case APValue::None:
+ case APValue::Indeterminate:
return false;
case APValue::Int:
Result = Val.getInt().getBoolValue();
return true;
+ case APValue::FixedPoint:
+ Result = Val.getFixedPoint().getBoolValue();
+ return true;
case APValue::Float:
Result = !Val.getFloat().isZero();
return true;
@@ -2091,10 +2212,8 @@ static bool HandleFloatToFloatCast(EvalInfo &Info, const Expr *E,
APFloat &Result) {
APFloat Value = Result;
bool ignored;
- if (Result.convert(Info.Ctx.getFloatTypeSemantics(DestType),
- APFloat::rmNearestTiesToEven, &ignored)
- & APFloat::opOverflow)
- return HandleOverflow(Info, E, Value, DestType);
+ Result.convert(Info.Ctx.getFloatTypeSemantics(DestType),
+ APFloat::rmNearestTiesToEven, &ignored);
return true;
}
@@ -2115,10 +2234,8 @@ static bool HandleIntToFloatCast(EvalInfo &Info, const Expr *E,
QualType SrcType, const APSInt &Value,
QualType DestType, APFloat &Result) {
Result = APFloat(Info.Ctx.getFloatTypeSemantics(DestType), 1);
- if (Result.convertFromAPInt(Value, Value.isSigned(),
- APFloat::rmNearestTiesToEven)
- & APFloat::opOverflow)
- return HandleOverflow(Info, E, Value, DestType);
+ Result.convertFromAPInt(Value, Value.isSigned(),
+ APFloat::rmNearestTiesToEven);
return true;
}
@@ -2270,9 +2387,11 @@ static bool handleIntIntBinOp(EvalInfo &Info, const Expr *E, const APSInt &LHS,
if (SA != RHS) {
Info.CCEDiag(E, diag::note_constexpr_large_shift)
<< RHS << E->getType() << LHS.getBitWidth();
- } else if (LHS.isSigned()) {
+ } else if (LHS.isSigned() && !Info.getLangOpts().CPlusPlus2a) {
// C++11 [expr.shift]p2: A signed left shift must have a non-negative
// operand, and must not overflow the corresponding unsigned type.
+ // C++2a [expr.shift]p2: E1 << E2 is the unique value congruent to
+ // E1 x 2^E2 module 2^N.
if (LHS.isNegative())
Info.CCEDiag(E, diag::note_constexpr_lshift_of_negative) << LHS;
else if (LHS.countLeadingZeros() < SA)
@@ -2334,11 +2453,19 @@ static bool handleFloatFloatBinOp(EvalInfo &Info, const Expr *E,
LHS.subtract(RHS, APFloat::rmNearestTiesToEven);
break;
case BO_Div:
+ // [expr.mul]p4:
+ // If the second operand of / or % is zero the behavior is undefined.
+ if (RHS.isZero())
+ Info.CCEDiag(E, diag::note_expr_divide_by_zero);
LHS.divide(RHS, APFloat::rmNearestTiesToEven);
break;
}
- if (LHS.isInfinity() || LHS.isNaN()) {
+ // [expr.pre]p4:
+ // If during the evaluation of an expression, the result is not
+ // mathematically defined [...], the behavior is undefined.
+ // FIXME: C++ rules require us to not conform to IEEE 754 here.
+ if (LHS.isNaN()) {
Info.CCEDiag(E, diag::note_constexpr_float_arithmetic) << LHS.isNaN();
return Info.noteUndefinedBehavior();
}
@@ -2428,6 +2555,21 @@ static bool HandleLValueBasePath(EvalInfo &Info, const CastExpr *E,
return true;
}
+/// Cast an lvalue referring to a derived class to a known base subobject.
+static bool CastToBaseClass(EvalInfo &Info, const Expr *E, LValue &Result,
+ const CXXRecordDecl *DerivedRD,
+ const CXXRecordDecl *BaseRD) {
+ CXXBasePaths Paths(/*FindAmbiguities=*/false,
+ /*RecordPaths=*/true, /*DetectVirtual=*/false);
+ if (!DerivedRD->isDerivedFrom(BaseRD, Paths))
+ llvm_unreachable("Class must be derived from the passed in base class!");
+
+ for (CXXBasePathElement &Elem : Paths.front())
+ if (!HandleLValueBase(Info, E, Result, Elem.Class, Elem.Base))
+ return false;
+ return true;
+}
+
/// Update LVal to refer to the given field, which must be a member of the type
/// currently described by LVal.
static bool HandleLValueMember(EvalInfo &Info, const Expr *E, LValue &LVal,
@@ -2522,10 +2664,6 @@ static bool HandleLValueComplexElement(EvalInfo &Info, const Expr *E,
return true;
}
-static bool handleLValueToRValueConversion(EvalInfo &Info, const Expr *Conv,
- QualType Type, const LValue &LVal,
- APValue &RVal);
-
/// Try to evaluate the initializer for a variable declaration.
///
/// \param Info Information about the ongoing evaluation.
@@ -2643,6 +2781,9 @@ static unsigned getBaseIndex(const CXXRecordDecl *Derived,
/// Extract the value of a character from a string literal.
static APSInt extractStringLiteralCharacter(EvalInfo &Info, const Expr *Lit,
uint64_t Index) {
+ assert(!isa<SourceLocExpr>(Lit) &&
+ "SourceLocExpr should have already been converted to a StringLiteral");
+
// FIXME: Support MakeStringConstant
if (const auto *ObjCEnc = dyn_cast<ObjCEncodeExpr>(Lit)) {
std::string Str;
@@ -2668,9 +2809,11 @@ static APSInt extractStringLiteralCharacter(EvalInfo &Info, const Expr *Lit,
}
// Expand a string literal into an array of characters.
-static void expandStringLiteral(EvalInfo &Info, const Expr *Lit,
+//
+// FIXME: This is inefficient; we should probably introduce something similar
+// to the LLVM ConstantDataArray to make this cheaper.
+static void expandStringLiteral(EvalInfo &Info, const StringLiteral *S,
APValue &Result) {
- const StringLiteral *S = cast<StringLiteral>(Lit);
const ConstantArrayType *CAT =
Info.Ctx.getAsConstantArrayType(S->getType());
assert(CAT && "string literal isn't an array");
@@ -2769,28 +2912,73 @@ static bool diagnoseUnreadableFields(EvalInfo &Info, const Expr *E,
return false;
}
+static bool lifetimeStartedInEvaluation(EvalInfo &Info,
+ APValue::LValueBase Base) {
+ // A temporary we created.
+ if (Base.getCallIndex())
+ return true;
+
+ auto *Evaluating = Info.EvaluatingDecl.dyn_cast<const ValueDecl*>();
+ if (!Evaluating)
+ return false;
+
+ // The variable whose initializer we're evaluating.
+ if (auto *BaseD = Base.dyn_cast<const ValueDecl*>())
+ if (declaresSameEntity(Evaluating, BaseD))
+ return true;
+
+ // A temporary lifetime-extended by the variable whose initializer we're
+ // evaluating.
+ if (auto *BaseE = Base.dyn_cast<const Expr *>())
+ if (auto *BaseMTE = dyn_cast<MaterializeTemporaryExpr>(BaseE))
+ if (declaresSameEntity(BaseMTE->getExtendingDecl(), Evaluating))
+ return true;
+
+ return false;
+}
+
namespace {
/// A handle to a complete object (an object that is not a subobject of
/// another object).
struct CompleteObject {
+ /// The identity of the object.
+ APValue::LValueBase Base;
/// The value of the complete object.
APValue *Value;
/// The type of the complete object.
QualType Type;
- bool LifetimeStartedInEvaluation;
CompleteObject() : Value(nullptr) {}
- CompleteObject(APValue *Value, QualType Type,
- bool LifetimeStartedInEvaluation)
- : Value(Value), Type(Type),
- LifetimeStartedInEvaluation(LifetimeStartedInEvaluation) {
- assert(Value && "missing value for complete object");
+ CompleteObject(APValue::LValueBase Base, APValue *Value, QualType Type)
+ : Base(Base), Value(Value), Type(Type) {}
+
+ bool mayReadMutableMembers(EvalInfo &Info) const {
+ // In C++14 onwards, it is permitted to read a mutable member whose
+ // lifetime began within the evaluation.
+ // FIXME: Should we also allow this in C++11?
+ if (!Info.getLangOpts().CPlusPlus14)
+ return false;
+ return lifetimeStartedInEvaluation(Info, Base);
}
- explicit operator bool() const { return Value; }
+ explicit operator bool() const { return !Type.isNull(); }
};
} // end anonymous namespace
+static QualType getSubobjectType(QualType ObjType, QualType SubobjType,
+ bool IsMutable = false) {
+ // C++ [basic.type.qualifier]p1:
+ // - A const object is an object of type const T or a non-mutable subobject
+ // of a const object.
+ if (ObjType.isConstQualified() && !IsMutable)
+ SubobjType.addConst();
+ // - A volatile object is an object of type const T or a subobject of a
+ // volatile object.
+ if (ObjType.isVolatileQualified())
+ SubobjType.addVolatile();
+ return SubobjType;
+}
+
/// Find the designated sub-object of an rvalue.
template<typename SubobjectHandler>
typename SubobjectHandler::result_type
@@ -2813,31 +3001,78 @@ findSubobject(EvalInfo &Info, const Expr *E, const CompleteObject &Obj,
APValue *O = Obj.Value;
QualType ObjType = Obj.Type;
const FieldDecl *LastField = nullptr;
- const bool MayReadMutableMembers =
- Obj.LifetimeStartedInEvaluation && Info.getLangOpts().CPlusPlus14;
+ const FieldDecl *VolatileField = nullptr;
// Walk the designator's path to find the subobject.
for (unsigned I = 0, N = Sub.Entries.size(); /**/; ++I) {
- if (O->isUninit()) {
+ // Reading an indeterminate value is undefined, but assigning over one is OK.
+ if (O->isAbsent() || (O->isIndeterminate() && handler.AccessKind != AK_Assign)) {
if (!Info.checkingPotentialConstantExpression())
- Info.FFDiag(E, diag::note_constexpr_access_uninit) << handler.AccessKind;
+ Info.FFDiag(E, diag::note_constexpr_access_uninit)
+ << handler.AccessKind << O->isIndeterminate();
return handler.failed();
}
- if (I == N) {
+ // C++ [class.ctor]p5:
+ // const and volatile semantics are not applied on an object under
+ // construction.
+ if ((ObjType.isConstQualified() || ObjType.isVolatileQualified()) &&
+ ObjType->isRecordType() &&
+ Info.isEvaluatingConstructor(
+ Obj.Base, llvm::makeArrayRef(Sub.Entries.begin(),
+ Sub.Entries.begin() + I)) !=
+ ConstructionPhase::None) {
+ ObjType = Info.Ctx.getCanonicalType(ObjType);
+ ObjType.removeLocalConst();
+ ObjType.removeLocalVolatile();
+ }
+
+ // If this is our last pass, check that the final object type is OK.
+ if (I == N || (I == N - 1 && ObjType->isAnyComplexType())) {
+ // Accesses to volatile objects are prohibited.
+ if (ObjType.isVolatileQualified() && isFormalAccess(handler.AccessKind)) {
+ if (Info.getLangOpts().CPlusPlus) {
+ int DiagKind;
+ SourceLocation Loc;
+ const NamedDecl *Decl = nullptr;
+ if (VolatileField) {
+ DiagKind = 2;
+ Loc = VolatileField->getLocation();
+ Decl = VolatileField;
+ } else if (auto *VD = Obj.Base.dyn_cast<const ValueDecl*>()) {
+ DiagKind = 1;
+ Loc = VD->getLocation();
+ Decl = VD;
+ } else {
+ DiagKind = 0;
+ if (auto *E = Obj.Base.dyn_cast<const Expr *>())
+ Loc = E->getExprLoc();
+ }
+ Info.FFDiag(E, diag::note_constexpr_access_volatile_obj, 1)
+ << handler.AccessKind << DiagKind << Decl;
+ Info.Note(Loc, diag::note_constexpr_volatile_here) << DiagKind;
+ } else {
+ Info.FFDiag(E, diag::note_invalid_subexpr_in_const_expr);
+ }
+ return handler.failed();
+ }
+
// If we are reading an object of class type, there may still be more
// things we need to check: if there are any mutable subobjects, we
// cannot perform this read. (This only happens when performing a trivial
// copy or assignment.)
if (ObjType->isRecordType() && handler.AccessKind == AK_Read &&
- !MayReadMutableMembers && diagnoseUnreadableFields(Info, E, ObjType))
+ !Obj.mayReadMutableMembers(Info) &&
+ diagnoseUnreadableFields(Info, E, ObjType))
return handler.failed();
+ }
+ if (I == N) {
if (!handler.found(*O, ObjType))
return false;
// If we modified a bit-field, truncate it to the right width.
- if (handler.AccessKind != AK_Read &&
+ if (isModification(handler.AccessKind) &&
LastField && LastField->isBitField() &&
!truncateBitfieldValue(Info, E, *O, LastField))
return false;
@@ -2850,7 +3085,7 @@ findSubobject(EvalInfo &Info, const Expr *E, const CompleteObject &Obj,
// Next subobject is an array element.
const ConstantArrayType *CAT = Info.Ctx.getAsConstantArrayType(ObjType);
assert(CAT && "vla in literal type?");
- uint64_t Index = Sub.Entries[I].ArrayIndex;
+ uint64_t Index = Sub.Entries[I].getAsArrayIndex();
if (CAT->getSize().ule(Index)) {
// Note, it should not be possible to form a pointer with a valid
// designator which points more than one past the end of the array.
@@ -2864,18 +3099,6 @@ findSubobject(EvalInfo &Info, const Expr *E, const CompleteObject &Obj,
ObjType = CAT->getElementType();
- // An array object is represented as either an Array APValue or as an
- // LValue which refers to a string literal.
- if (O->isLValue()) {
- assert(I == N - 1 && "extracting subobject of character?");
- assert(!O->hasLValuePath() || O->getLValuePath().empty());
- if (handler.AccessKind != AK_Read)
- expandStringLiteral(Info, O->getLValueBase().get<const Expr *>(),
- *O);
- else
- return handler.foundString(*O, ObjType, Index);
- }
-
if (O->getArrayInitializedElts() > Index)
O = &O->getArrayInitializedElt(Index);
else if (handler.AccessKind != AK_Read) {
@@ -2885,7 +3108,7 @@ findSubobject(EvalInfo &Info, const Expr *E, const CompleteObject &Obj,
O = &O->getArrayFiller();
} else if (ObjType->isAnyComplexType()) {
// Next subobject is a complex number.
- uint64_t Index = Sub.Entries[I].ArrayIndex;
+ uint64_t Index = Sub.Entries[I].getAsArrayIndex();
if (Index > 1) {
if (Info.getLangOpts().CPlusPlus11)
Info.FFDiag(E, diag::note_constexpr_access_past_end)
@@ -2895,10 +3118,8 @@ findSubobject(EvalInfo &Info, const Expr *E, const CompleteObject &Obj,
return handler.failed();
}
- bool WasConstQualified = ObjType.isConstQualified();
- ObjType = ObjType->castAs<ComplexType>()->getElementType();
- if (WasConstQualified)
- ObjType.addConst();
+ ObjType = getSubobjectType(
+ ObjType, ObjType->castAs<ComplexType>()->getElementType());
assert(I == N - 1 && "extracting subobject of scalar?");
if (O->isComplexInt()) {
@@ -2910,11 +3131,8 @@ findSubobject(EvalInfo &Info, const Expr *E, const CompleteObject &Obj,
: O->getComplexFloatReal(), ObjType);
}
} else if (const FieldDecl *Field = getAsField(Sub.Entries[I])) {
- // In C++14 onwards, it is permitted to read a mutable member whose
- // lifetime began within the evaluation.
- // FIXME: Should we also allow this in C++11?
if (Field->isMutable() && handler.AccessKind == AK_Read &&
- !MayReadMutableMembers) {
+ !Obj.mayReadMutableMembers(Info)) {
Info.FFDiag(E, diag::note_constexpr_ltor_mutable, 1)
<< Field;
Info.Note(Field->getLocation(), diag::note_declared_at);
@@ -2935,34 +3153,17 @@ findSubobject(EvalInfo &Info, const Expr *E, const CompleteObject &Obj,
} else
O = &O->getStructField(Field->getFieldIndex());
- bool WasConstQualified = ObjType.isConstQualified();
- ObjType = Field->getType();
- if (WasConstQualified && !Field->isMutable())
- ObjType.addConst();
-
- if (ObjType.isVolatileQualified()) {
- if (Info.getLangOpts().CPlusPlus) {
- // FIXME: Include a description of the path to the volatile subobject.
- Info.FFDiag(E, diag::note_constexpr_access_volatile_obj, 1)
- << handler.AccessKind << 2 << Field;
- Info.Note(Field->getLocation(), diag::note_declared_at);
- } else {
- Info.FFDiag(E, diag::note_invalid_subexpr_in_const_expr);
- }
- return handler.failed();
- }
-
+ ObjType = getSubobjectType(ObjType, Field->getType(), Field->isMutable());
LastField = Field;
+ if (Field->getType().isVolatileQualified())
+ VolatileField = Field;
} else {
// Next subobject is a base class.
const CXXRecordDecl *Derived = ObjType->getAsCXXRecordDecl();
const CXXRecordDecl *Base = getAsBaseClass(Sub.Entries[I]);
O = &O->getStructBase(getBaseIndex(Derived, Base));
- bool WasConstQualified = ObjType.isConstQualified();
- ObjType = Info.Ctx.getRecordType(Base);
- if (WasConstQualified)
- ObjType.addConst();
+ ObjType = getSubobjectType(ObjType, Info.Ctx.getRecordType(Base));
}
}
}
@@ -2988,11 +3189,6 @@ struct ExtractSubobjectHandler {
Result = APValue(Value);
return true;
}
- bool foundString(APValue &Subobj, QualType SubobjType, uint64_t Character) {
- Result = APValue(extractStringLiteralCharacter(
- Info, Subobj.getLValueBase().get<const Expr *>(), Character));
- return true;
- }
};
} // end anonymous namespace
@@ -3050,9 +3246,6 @@ struct ModifySubobjectHandler {
Value = NewVal.getFloat();
return true;
}
- bool foundString(APValue &Subobj, QualType SubobjType, uint64_t Character) {
- llvm_unreachable("shouldn't encounter string elements with ExpandArrays");
- }
};
} // end anonymous namespace
@@ -3078,7 +3271,7 @@ static unsigned FindDesignatorMismatch(QualType ObjType,
if (!ObjType.isNull() &&
(ObjType->isArrayType() || ObjType->isAnyComplexType())) {
// Next subobject is an array element.
- if (A.Entries[I].ArrayIndex != B.Entries[I].ArrayIndex) {
+ if (A.Entries[I].getAsArrayIndex() != B.Entries[I].getAsArrayIndex()) {
WasArrayIndex = true;
return I;
}
@@ -3087,7 +3280,8 @@ static unsigned FindDesignatorMismatch(QualType ObjType,
else
ObjType = ObjType->castAsArrayTypeUnsafe()->getElementType();
} else {
- if (A.Entries[I].BaseOrMember != B.Entries[I].BaseOrMember) {
+ if (A.Entries[I].getAsBaseOrMember() !=
+ B.Entries[I].getAsBaseOrMember()) {
WasArrayIndex = false;
return I;
}
@@ -3128,14 +3322,21 @@ static bool AreElementsOfSameArray(QualType ObjType,
static CompleteObject findCompleteObject(EvalInfo &Info, const Expr *E,
AccessKinds AK, const LValue &LVal,
QualType LValType) {
+ if (LVal.InvalidBase) {
+ Info.FFDiag(E);
+ return CompleteObject();
+ }
+
if (!LVal.Base) {
Info.FFDiag(E, diag::note_constexpr_access_null) << AK;
return CompleteObject();
}
CallStackFrame *Frame = nullptr;
+ unsigned Depth = 0;
if (LVal.getLValueCallIndex()) {
- Frame = Info.getCallFrame(LVal.getLValueCallIndex());
+ std::tie(Frame, Depth) =
+ Info.getCallFrameAndDepth(LVal.getLValueCallIndex());
if (!Frame) {
Info.FFDiag(E, diag::note_constexpr_lifetime_ended, 1)
<< AK << LVal.Base.is<const ValueDecl*>();
@@ -3144,11 +3345,13 @@ static CompleteObject findCompleteObject(EvalInfo &Info, const Expr *E,
}
}
+ bool IsAccess = isFormalAccess(AK);
+
// C++11 DR1311: An lvalue-to-rvalue conversion on a volatile-qualified type
// is not a constant expression (even if the object is non-volatile). We also
// apply this rule to C++98, in order to conform to the expected 'volatile'
// semantics.
- if (LValType.isVolatileQualified()) {
+ if (IsAccess && LValType.isVolatileQualified()) {
if (Info.getLangOpts().CPlusPlus)
Info.FFDiag(E, diag::note_constexpr_access_volatile_type)
<< AK << LValType;
@@ -3160,7 +3363,6 @@ static CompleteObject findCompleteObject(EvalInfo &Info, const Expr *E,
// Compute value storage location and type of base object.
APValue *BaseVal = nullptr;
QualType BaseType = getType(LVal.Base);
- bool LifetimeStartedInEvaluation = Frame;
if (const ValueDecl *D = LVal.Base.dyn_cast<const ValueDecl*>()) {
// In C++98, const, non-volatile integers initialized with ICEs are ICEs.
@@ -3180,37 +3382,29 @@ static CompleteObject findCompleteObject(EvalInfo &Info, const Expr *E,
return CompleteObject();
}
- // Accesses of volatile-qualified objects are not allowed.
- if (BaseType.isVolatileQualified()) {
- if (Info.getLangOpts().CPlusPlus) {
- Info.FFDiag(E, diag::note_constexpr_access_volatile_obj, 1)
- << AK << 1 << VD;
- Info.Note(VD->getLocation(), diag::note_declared_at);
- } else {
- Info.FFDiag(E);
- }
- return CompleteObject();
- }
-
// Unless we're looking at a local variable or argument in a constexpr call,
// the variable we're reading must be const.
if (!Frame) {
if (Info.getLangOpts().CPlusPlus14 &&
- VD == Info.EvaluatingDecl.dyn_cast<const ValueDecl *>()) {
+ declaresSameEntity(
+ VD, Info.EvaluatingDecl.dyn_cast<const ValueDecl *>())) {
// OK, we can read and modify an object if we're in the process of
// evaluating its initializer, because its lifetime began in this
// evaluation.
- } else if (AK != AK_Read) {
- // All the remaining cases only permit reading.
+ } else if (isModification(AK)) {
+ // All the remaining cases do not permit modification of the object.
Info.FFDiag(E, diag::note_constexpr_modify_global);
return CompleteObject();
} else if (VD->isConstexpr()) {
// OK, we can read this variable.
} else if (BaseType->isIntegralOrEnumerationType()) {
- // In OpenCL if a variable is in constant address space it is a const value.
+ // In OpenCL if a variable is in constant address space it is a const
+ // value.
if (!(BaseType.isConstQualified() ||
(Info.getLangOpts().OpenCL &&
BaseType.getAddressSpace() == LangAS::opencl_constant))) {
+ if (!IsAccess)
+ return CompleteObject(LVal.getLValueBase(), nullptr, BaseType);
if (Info.getLangOpts().CPlusPlus) {
Info.FFDiag(E, diag::note_constexpr_ltor_non_const_int, 1) << VD;
Info.Note(VD->getLocation(), diag::note_declared_at);
@@ -3219,6 +3413,8 @@ static CompleteObject findCompleteObject(EvalInfo &Info, const Expr *E,
}
return CompleteObject();
}
+ } else if (!IsAccess) {
+ return CompleteObject(LVal.getLValueBase(), nullptr, BaseType);
} else if (BaseType->isFloatingType() && BaseType.isConstQualified()) {
// We support folding of const floating-point types, in order to make
// static const data members of such types (supported as an extension)
@@ -3255,7 +3451,7 @@ static CompleteObject findCompleteObject(EvalInfo &Info, const Expr *E,
if (!Frame) {
if (const MaterializeTemporaryExpr *MTE =
- dyn_cast<MaterializeTemporaryExpr>(Base)) {
+ dyn_cast_or_null<MaterializeTemporaryExpr>(Base)) {
assert(MTE->getStorageDuration() == SD_Static &&
"should have a frame for a non-global materialized temporary");
@@ -3278,6 +3474,8 @@ static CompleteObject findCompleteObject(EvalInfo &Info, const Expr *E,
if (!(BaseType.isConstQualified() &&
BaseType->isIntegralOrEnumerationType()) &&
!(VD && VD->getCanonicalDecl() == ED->getCanonicalDecl())) {
+ if (!IsAccess)
+ return CompleteObject(LVal.getLValueBase(), nullptr, BaseType);
Info.FFDiag(E, diag::note_constexpr_access_static_temporary, 1) << AK;
Info.Note(MTE->getExprLoc(), diag::note_constexpr_temporary_here);
return CompleteObject();
@@ -3285,38 +3483,22 @@ static CompleteObject findCompleteObject(EvalInfo &Info, const Expr *E,
BaseVal = Info.Ctx.getMaterializedTemporaryValue(MTE, false);
assert(BaseVal && "got reference to unevaluated temporary");
- LifetimeStartedInEvaluation = true;
} else {
- Info.FFDiag(E);
+ if (!IsAccess)
+ return CompleteObject(LVal.getLValueBase(), nullptr, BaseType);
+ APValue Val;
+ LVal.moveInto(Val);
+ Info.FFDiag(E, diag::note_constexpr_access_unreadable_object)
+ << AK
+ << Val.getAsString(Info.Ctx,
+ Info.Ctx.getLValueReferenceType(LValType));
+ NoteLValueLocation(Info, LVal.Base);
return CompleteObject();
}
} else {
BaseVal = Frame->getTemporary(Base, LVal.Base.getVersion());
assert(BaseVal && "missing value for temporary");
}
-
- // Volatile temporary objects cannot be accessed in constant expressions.
- if (BaseType.isVolatileQualified()) {
- if (Info.getLangOpts().CPlusPlus) {
- Info.FFDiag(E, diag::note_constexpr_access_volatile_obj, 1)
- << AK << 0;
- Info.Note(Base->getExprLoc(), diag::note_constexpr_temporary_here);
- } else {
- Info.FFDiag(E);
- }
- return CompleteObject();
- }
- }
-
- // During the construction of an object, it is not yet 'const'.
- // FIXME: This doesn't do quite the right thing for const subobjects of the
- // object under construction.
- if (Info.isEvaluatingConstructor(LVal.getLValueBase(),
- LVal.getLValueCallIndex(),
- LVal.getLValueVersion())) {
- BaseType = Info.Ctx.getCanonicalType(BaseType);
- BaseType.removeLocalConst();
- LifetimeStartedInEvaluation = true;
}
// In C++14, we can't safely access any mutable state when we might be
@@ -3326,10 +3508,10 @@ static CompleteObject findCompleteObject(EvalInfo &Info, const Expr *E,
// to be read here (but take care with 'mutable' fields).
if ((Frame && Info.getLangOpts().CPlusPlus14 &&
Info.EvalStatus.HasSideEffects) ||
- (AK != AK_Read && Info.IsSpeculativelyEvaluating))
+ (isModification(AK) && Depth < Info.SpeculativeEvaluationDepth))
return CompleteObject();
- return CompleteObject(BaseVal, BaseType, LifetimeStartedInEvaluation);
+ return CompleteObject(LVal.getLValueBase(), BaseVal, BaseType);
}
/// Perform an lvalue-to-rvalue conversion on the given glvalue. This
@@ -3351,6 +3533,7 @@ static bool handleLValueToRValueConversion(EvalInfo &Info, const Expr *Conv,
// Check for special cases where there is no existing APValue to look at.
const Expr *Base = LVal.Base.dyn_cast<const Expr*>();
+
if (Base && !LVal.getLValueCallIndex() && !Type.isVolatileQualified()) {
if (const CompoundLiteralExpr *CLE = dyn_cast<CompoundLiteralExpr>(Base)) {
// In C99, a CompoundLiteralExpr is an lvalue, and we defer evaluating the
@@ -3363,15 +3546,30 @@ static bool handleLValueToRValueConversion(EvalInfo &Info, const Expr *Conv,
APValue Lit;
if (!Evaluate(Lit, Info, CLE->getInitializer()))
return false;
- CompleteObject LitObj(&Lit, Base->getType(), false);
+ CompleteObject LitObj(LVal.Base, &Lit, Base->getType());
return extractSubobject(Info, Conv, LitObj, LVal.Designator, RVal);
} else if (isa<StringLiteral>(Base) || isa<PredefinedExpr>(Base)) {
- // We represent a string literal array as an lvalue pointing at the
- // corresponding expression, rather than building an array of chars.
- // FIXME: Support ObjCEncodeExpr, MakeStringConstant
- APValue Str(Base, CharUnits::Zero(), APValue::NoLValuePath(), 0);
- CompleteObject StrObj(&Str, Base->getType(), false);
- return extractSubobject(Info, Conv, StrObj, LVal.Designator, RVal);
+ // Special-case character extraction so we don't have to construct an
+ // APValue for the whole string.
+ assert(LVal.Designator.Entries.size() <= 1 &&
+ "Can only read characters from string literals");
+ if (LVal.Designator.Entries.empty()) {
+ // Fail for now for LValue to RValue conversion of an array.
+ // (This shouldn't show up in C/C++, but it could be triggered by a
+ // weird EvaluateAsRValue call from a tool.)
+ Info.FFDiag(Conv);
+ return false;
+ }
+ if (LVal.Designator.isOnePastTheEnd()) {
+ if (Info.getLangOpts().CPlusPlus11)
+ Info.FFDiag(Conv, diag::note_constexpr_access_past_end) << AK_Read;
+ else
+ Info.FFDiag(Conv);
+ return false;
+ }
+ uint64_t CharIndex = LVal.Designator.Entries[0].getAsArrayIndex();
+ RVal = APValue(extractStringLiteralCharacter(Info, Base, CharIndex));
+ return true;
}
}
@@ -3497,9 +3695,6 @@ struct CompoundAssignSubobjectHandler {
LVal.moveInto(Subobj);
return true;
}
- bool foundString(APValue &Subobj, QualType SubobjType, uint64_t Character) {
- llvm_unreachable("shouldn't encounter string elements here");
- }
};
} // end anonymous namespace
@@ -3648,9 +3843,6 @@ struct IncDecSubobjectHandler {
LVal.moveInto(Subobj);
return true;
}
- bool foundString(APValue &Subobj, QualType SubobjType, uint64_t Character) {
- llvm_unreachable("shouldn't encounter string elements here");
- }
};
} // end anonymous namespace
@@ -4359,9 +4551,20 @@ static bool CheckConstexprFunction(EvalInfo &Info, SourceLocation CallLoc,
return false;
}
+ // DR1872: An instantiated virtual constexpr function can't be called in a
+ // constant expression (prior to C++20). We can still constant-fold such a
+ // call.
+ if (!Info.Ctx.getLangOpts().CPlusPlus2a && isa<CXXMethodDecl>(Declaration) &&
+ cast<CXXMethodDecl>(Declaration)->isVirtual())
+ Info.CCEDiag(CallLoc, diag::note_constexpr_virtual_call);
+
+ if (Definition && Definition->isInvalidDecl()) {
+ Info.FFDiag(CallLoc, diag::note_invalid_subexpr_in_const_expr);
+ return false;
+ }
+
// Can we evaluate this function call?
- if (Definition && Definition->isConstexpr() &&
- !Definition->isInvalidDecl() && Body)
+ if (Definition && Definition->isConstexpr() && Body)
return true;
if (Info.getLangOpts().CPlusPlus11) {
@@ -4392,6 +4595,487 @@ static bool CheckConstexprFunction(EvalInfo &Info, SourceLocation CallLoc,
return false;
}
+namespace {
+struct CheckDynamicTypeHandler {
+ AccessKinds AccessKind;
+ typedef bool result_type;
+ bool failed() { return false; }
+ bool found(APValue &Subobj, QualType SubobjType) { return true; }
+ bool found(APSInt &Value, QualType SubobjType) { return true; }
+ bool found(APFloat &Value, QualType SubobjType) { return true; }
+};
+} // end anonymous namespace
+
+/// Check that we can access the notional vptr of an object / determine its
+/// dynamic type.
+static bool checkDynamicType(EvalInfo &Info, const Expr *E, const LValue &This,
+ AccessKinds AK, bool Polymorphic) {
+ if (This.Designator.Invalid)
+ return false;
+
+ CompleteObject Obj = findCompleteObject(Info, E, AK, This, QualType());
+
+ if (!Obj)
+ return false;
+
+ if (!Obj.Value) {
+ // The object is not usable in constant expressions, so we can't inspect
+ // its value to see if it's in-lifetime or what the active union members
+ // are. We can still check for a one-past-the-end lvalue.
+ if (This.Designator.isOnePastTheEnd() ||
+ This.Designator.isMostDerivedAnUnsizedArray()) {
+ Info.FFDiag(E, This.Designator.isOnePastTheEnd()
+ ? diag::note_constexpr_access_past_end
+ : diag::note_constexpr_access_unsized_array)
+ << AK;
+ return false;
+ } else if (Polymorphic) {
+ // Conservatively refuse to perform a polymorphic operation if we would
+ // not be able to read a notional 'vptr' value.
+ APValue Val;
+ This.moveInto(Val);
+ QualType StarThisType =
+ Info.Ctx.getLValueReferenceType(This.Designator.getType(Info.Ctx));
+ Info.FFDiag(E, diag::note_constexpr_polymorphic_unknown_dynamic_type)
+ << AK << Val.getAsString(Info.Ctx, StarThisType);
+ return false;
+ }
+ return true;
+ }
+
+ CheckDynamicTypeHandler Handler{AK};
+ return Obj && findSubobject(Info, E, Obj, This.Designator, Handler);
+}
+
+/// Check that the pointee of the 'this' pointer in a member function call is
+/// either within its lifetime or in its period of construction or destruction.
+static bool checkNonVirtualMemberCallThisPointer(EvalInfo &Info, const Expr *E,
+ const LValue &This) {
+ return checkDynamicType(Info, E, This, AK_MemberCall, false);
+}
+
+struct DynamicType {
+ /// The dynamic class type of the object.
+ const CXXRecordDecl *Type;
+ /// The corresponding path length in the lvalue.
+ unsigned PathLength;
+};
+
+static const CXXRecordDecl *getBaseClassType(SubobjectDesignator &Designator,
+ unsigned PathLength) {
+ assert(PathLength >= Designator.MostDerivedPathLength && PathLength <=
+ Designator.Entries.size() && "invalid path length");
+ return (PathLength == Designator.MostDerivedPathLength)
+ ? Designator.MostDerivedType->getAsCXXRecordDecl()
+ : getAsBaseClass(Designator.Entries[PathLength - 1]);
+}
+
+/// Determine the dynamic type of an object.
+static Optional<DynamicType> ComputeDynamicType(EvalInfo &Info, const Expr *E,
+ LValue &This, AccessKinds AK) {
+ // If we don't have an lvalue denoting an object of class type, there is no
+ // meaningful dynamic type. (We consider objects of non-class type to have no
+ // dynamic type.)
+ if (!checkDynamicType(Info, E, This, AK, true))
+ return None;
+
+ // Refuse to compute a dynamic type in the presence of virtual bases. This
+ // shouldn't happen other than in constant-folding situations, since literal
+ // types can't have virtual bases.
+ //
+ // Note that consumers of DynamicType assume that the type has no virtual
+ // bases, and will need modifications if this restriction is relaxed.
+ const CXXRecordDecl *Class =
+ This.Designator.MostDerivedType->getAsCXXRecordDecl();
+ if (!Class || Class->getNumVBases()) {
+ Info.FFDiag(E);
+ return None;
+ }
+
+ // FIXME: For very deep class hierarchies, it might be beneficial to use a
+ // binary search here instead. But the overwhelmingly common case is that
+ // we're not in the middle of a constructor, so it probably doesn't matter
+ // in practice.
+ ArrayRef<APValue::LValuePathEntry> Path = This.Designator.Entries;
+ for (unsigned PathLength = This.Designator.MostDerivedPathLength;
+ PathLength <= Path.size(); ++PathLength) {
+ switch (Info.isEvaluatingConstructor(This.getLValueBase(),
+ Path.slice(0, PathLength))) {
+ case ConstructionPhase::Bases:
+ // We're constructing a base class. This is not the dynamic type.
+ break;
+
+ case ConstructionPhase::None:
+ case ConstructionPhase::AfterBases:
+ // We've finished constructing the base classes, so this is the dynamic
+ // type.
+ return DynamicType{getBaseClassType(This.Designator, PathLength),
+ PathLength};
+ }
+ }
+
+ // CWG issue 1517: we're constructing a base class of the object described by
+ // 'This', so that object has not yet begun its period of construction and
+ // any polymorphic operation on it results in undefined behavior.
+ Info.FFDiag(E);
+ return None;
+}
+
+/// Perform virtual dispatch.
+static const CXXMethodDecl *HandleVirtualDispatch(
+ EvalInfo &Info, const Expr *E, LValue &This, const CXXMethodDecl *Found,
+ llvm::SmallVectorImpl<QualType> &CovariantAdjustmentPath) {
+ Optional<DynamicType> DynType =
+ ComputeDynamicType(Info, E, This, AK_MemberCall);
+ if (!DynType)
+ return nullptr;
+
+ // Find the final overrider. It must be declared in one of the classes on the
+ // path from the dynamic type to the static type.
+ // FIXME: If we ever allow literal types to have virtual base classes, that
+ // won't be true.
+ const CXXMethodDecl *Callee = Found;
+ unsigned PathLength = DynType->PathLength;
+ for (/**/; PathLength <= This.Designator.Entries.size(); ++PathLength) {
+ const CXXRecordDecl *Class = getBaseClassType(This.Designator, PathLength);
+ const CXXMethodDecl *Overrider =
+ Found->getCorrespondingMethodDeclaredInClass(Class, false);
+ if (Overrider) {
+ Callee = Overrider;
+ break;
+ }
+ }
+
+ // C++2a [class.abstract]p6:
+ // the effect of making a virtual call to a pure virtual function [...] is
+ // undefined
+ if (Callee->isPure()) {
+ Info.FFDiag(E, diag::note_constexpr_pure_virtual_call, 1) << Callee;
+ Info.Note(Callee->getLocation(), diag::note_declared_at);
+ return nullptr;
+ }
+
+ // If necessary, walk the rest of the path to determine the sequence of
+ // covariant adjustment steps to apply.
+ if (!Info.Ctx.hasSameUnqualifiedType(Callee->getReturnType(),
+ Found->getReturnType())) {
+ CovariantAdjustmentPath.push_back(Callee->getReturnType());
+ for (unsigned CovariantPathLength = PathLength + 1;
+ CovariantPathLength != This.Designator.Entries.size();
+ ++CovariantPathLength) {
+ const CXXRecordDecl *NextClass =
+ getBaseClassType(This.Designator, CovariantPathLength);
+ const CXXMethodDecl *Next =
+ Found->getCorrespondingMethodDeclaredInClass(NextClass, false);
+ if (Next && !Info.Ctx.hasSameUnqualifiedType(
+ Next->getReturnType(), CovariantAdjustmentPath.back()))
+ CovariantAdjustmentPath.push_back(Next->getReturnType());
+ }
+ if (!Info.Ctx.hasSameUnqualifiedType(Found->getReturnType(),
+ CovariantAdjustmentPath.back()))
+ CovariantAdjustmentPath.push_back(Found->getReturnType());
+ }
+
+ // Perform 'this' adjustment.
+ if (!CastToDerivedClass(Info, E, This, Callee->getParent(), PathLength))
+ return nullptr;
+
+ return Callee;
+}
+
+/// Perform the adjustment from a value returned by a virtual function to
+/// a value of the statically expected type, which may be a pointer or
+/// reference to a base class of the returned type.
+static bool HandleCovariantReturnAdjustment(EvalInfo &Info, const Expr *E,
+ APValue &Result,
+ ArrayRef<QualType> Path) {
+ assert(Result.isLValue() &&
+ "unexpected kind of APValue for covariant return");
+ if (Result.isNullPointer())
+ return true;
+
+ LValue LVal;
+ LVal.setFrom(Info.Ctx, Result);
+
+ const CXXRecordDecl *OldClass = Path[0]->getPointeeCXXRecordDecl();
+ for (unsigned I = 1; I != Path.size(); ++I) {
+ const CXXRecordDecl *NewClass = Path[I]->getPointeeCXXRecordDecl();
+ assert(OldClass && NewClass && "unexpected kind of covariant return");
+ if (OldClass != NewClass &&
+ !CastToBaseClass(Info, E, LVal, OldClass, NewClass))
+ return false;
+ OldClass = NewClass;
+ }
+
+ LVal.moveInto(Result);
+ return true;
+}
+
+/// Determine whether \p Base, which is known to be a direct base class of
+/// \p Derived, is a public base class.
+static bool isBaseClassPublic(const CXXRecordDecl *Derived,
+ const CXXRecordDecl *Base) {
+ for (const CXXBaseSpecifier &BaseSpec : Derived->bases()) {
+ auto *BaseClass = BaseSpec.getType()->getAsCXXRecordDecl();
+ if (BaseClass && declaresSameEntity(BaseClass, Base))
+ return BaseSpec.getAccessSpecifier() == AS_public;
+ }
+ llvm_unreachable("Base is not a direct base of Derived");
+}
+
+/// Apply the given dynamic cast operation on the provided lvalue.
+///
+/// This implements the hard case of dynamic_cast, requiring a "runtime check"
+/// to find a suitable target subobject.
+static bool HandleDynamicCast(EvalInfo &Info, const ExplicitCastExpr *E,
+ LValue &Ptr) {
+ // We can't do anything with a non-symbolic pointer value.
+ SubobjectDesignator &D = Ptr.Designator;
+ if (D.Invalid)
+ return false;
+
+ // C++ [expr.dynamic.cast]p6:
+ // If v is a null pointer value, the result is a null pointer value.
+ if (Ptr.isNullPointer() && !E->isGLValue())
+ return true;
+
+ // For all the other cases, we need the pointer to point to an object within
+ // its lifetime / period of construction / destruction, and we need to know
+ // its dynamic type.
+ Optional<DynamicType> DynType =
+ ComputeDynamicType(Info, E, Ptr, AK_DynamicCast);
+ if (!DynType)
+ return false;
+
+ // C++ [expr.dynamic.cast]p7:
+ // If T is "pointer to cv void", then the result is a pointer to the most
+ // derived object
+ if (E->getType()->isVoidPointerType())
+ return CastToDerivedClass(Info, E, Ptr, DynType->Type, DynType->PathLength);
+
+ const CXXRecordDecl *C = E->getTypeAsWritten()->getPointeeCXXRecordDecl();
+ assert(C && "dynamic_cast target is not void pointer nor class");
+ CanQualType CQT = Info.Ctx.getCanonicalType(Info.Ctx.getRecordType(C));
+
+ auto RuntimeCheckFailed = [&] (CXXBasePaths *Paths) {
+ // C++ [expr.dynamic.cast]p9:
+ if (!E->isGLValue()) {
+ // The value of a failed cast to pointer type is the null pointer value
+ // of the required result type.
+ auto TargetVal = Info.Ctx.getTargetNullPointerValue(E->getType());
+ Ptr.setNull(E->getType(), TargetVal);
+ return true;
+ }
+
+ // A failed cast to reference type throws [...] std::bad_cast.
+ unsigned DiagKind;
+ if (!Paths && (declaresSameEntity(DynType->Type, C) ||
+ DynType->Type->isDerivedFrom(C)))
+ DiagKind = 0;
+ else if (!Paths || Paths->begin() == Paths->end())
+ DiagKind = 1;
+ else if (Paths->isAmbiguous(CQT))
+ DiagKind = 2;
+ else {
+ assert(Paths->front().Access != AS_public && "why did the cast fail?");
+ DiagKind = 3;
+ }
+ Info.FFDiag(E, diag::note_constexpr_dynamic_cast_to_reference_failed)
+ << DiagKind << Ptr.Designator.getType(Info.Ctx)
+ << Info.Ctx.getRecordType(DynType->Type)
+ << E->getType().getUnqualifiedType();
+ return false;
+ };
+
+ // Runtime check, phase 1:
+ // Walk from the base subobject towards the derived object looking for the
+ // target type.
+ for (int PathLength = Ptr.Designator.Entries.size();
+ PathLength >= (int)DynType->PathLength; --PathLength) {
+ const CXXRecordDecl *Class = getBaseClassType(Ptr.Designator, PathLength);
+ if (declaresSameEntity(Class, C))
+ return CastToDerivedClass(Info, E, Ptr, Class, PathLength);
+ // We can only walk across public inheritance edges.
+ if (PathLength > (int)DynType->PathLength &&
+ !isBaseClassPublic(getBaseClassType(Ptr.Designator, PathLength - 1),
+ Class))
+ return RuntimeCheckFailed(nullptr);
+ }
+
+ // Runtime check, phase 2:
+ // Search the dynamic type for an unambiguous public base of type C.
+ CXXBasePaths Paths(/*FindAmbiguities=*/true,
+ /*RecordPaths=*/true, /*DetectVirtual=*/false);
+ if (DynType->Type->isDerivedFrom(C, Paths) && !Paths.isAmbiguous(CQT) &&
+ Paths.front().Access == AS_public) {
+ // Downcast to the dynamic type...
+ if (!CastToDerivedClass(Info, E, Ptr, DynType->Type, DynType->PathLength))
+ return false;
+ // ... then upcast to the chosen base class subobject.
+ for (CXXBasePathElement &Elem : Paths.front())
+ if (!HandleLValueBase(Info, E, Ptr, Elem.Class, Elem.Base))
+ return false;
+ return true;
+ }
+
+ // Otherwise, the runtime check fails.
+ return RuntimeCheckFailed(&Paths);
+}
+
+namespace {
+struct StartLifetimeOfUnionMemberHandler {
+ const FieldDecl *Field;
+
+ static const AccessKinds AccessKind = AK_Assign;
+
+ APValue getDefaultInitValue(QualType SubobjType) {
+ if (auto *RD = SubobjType->getAsCXXRecordDecl()) {
+ if (RD->isUnion())
+ return APValue((const FieldDecl*)nullptr);
+
+ APValue Struct(APValue::UninitStruct(), RD->getNumBases(),
+ std::distance(RD->field_begin(), RD->field_end()));
+
+ unsigned Index = 0;
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ End = RD->bases_end(); I != End; ++I, ++Index)
+ Struct.getStructBase(Index) = getDefaultInitValue(I->getType());
+
+ for (const auto *I : RD->fields()) {
+ if (I->isUnnamedBitfield())
+ continue;
+ Struct.getStructField(I->getFieldIndex()) =
+ getDefaultInitValue(I->getType());
+ }
+ return Struct;
+ }
+
+ if (auto *AT = dyn_cast_or_null<ConstantArrayType>(
+ SubobjType->getAsArrayTypeUnsafe())) {
+ APValue Array(APValue::UninitArray(), 0, AT->getSize().getZExtValue());
+ if (Array.hasArrayFiller())
+ Array.getArrayFiller() = getDefaultInitValue(AT->getElementType());
+ return Array;
+ }
+
+ return APValue::IndeterminateValue();
+ }
+
+ typedef bool result_type;
+ bool failed() { return false; }
+ bool found(APValue &Subobj, QualType SubobjType) {
+ // We are supposed to perform no initialization but begin the lifetime of
+ // the object. We interpret that as meaning to do what default
+ // initialization of the object would do if all constructors involved were
+ // trivial:
+ // * All base, non-variant member, and array element subobjects' lifetimes
+ // begin
+ // * No variant members' lifetimes begin
+ // * All scalar subobjects whose lifetimes begin have indeterminate values
+ assert(SubobjType->isUnionType());
+ if (!declaresSameEntity(Subobj.getUnionField(), Field))
+ Subobj.setUnion(Field, getDefaultInitValue(Field->getType()));
+ return true;
+ }
+ bool found(APSInt &Value, QualType SubobjType) {
+ llvm_unreachable("wrong value kind for union object");
+ }
+ bool found(APFloat &Value, QualType SubobjType) {
+ llvm_unreachable("wrong value kind for union object");
+ }
+};
+} // end anonymous namespace
+
+const AccessKinds StartLifetimeOfUnionMemberHandler::AccessKind;
+
+/// Handle a builtin simple-assignment or a call to a trivial assignment
+/// operator whose left-hand side might involve a union member access. If it
+/// does, implicitly start the lifetime of any accessed union elements per
+/// C++20 [class.union]5.
+static bool HandleUnionActiveMemberChange(EvalInfo &Info, const Expr *LHSExpr,
+ const LValue &LHS) {
+ if (LHS.InvalidBase || LHS.Designator.Invalid)
+ return false;
+
+ llvm::SmallVector<std::pair<unsigned, const FieldDecl*>, 4> UnionPathLengths;
+ // C++ [class.union]p5:
+ // define the set S(E) of subexpressions of E as follows:
+ unsigned PathLength = LHS.Designator.Entries.size();
+ for (const Expr *E = LHSExpr; E != nullptr;) {
+ // -- If E is of the form A.B, S(E) contains the elements of S(A)...
+ if (auto *ME = dyn_cast<MemberExpr>(E)) {
+ auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl());
+ if (!FD)
+ break;
+
+ // ... and also contains A.B if B names a union member
+ if (FD->getParent()->isUnion())
+ UnionPathLengths.push_back({PathLength - 1, FD});
+
+ E = ME->getBase();
+ --PathLength;
+ assert(declaresSameEntity(FD,
+ LHS.Designator.Entries[PathLength]
+ .getAsBaseOrMember().getPointer()));
+
+ // -- If E is of the form A[B] and is interpreted as a built-in array
+ // subscripting operator, S(E) is [S(the array operand, if any)].
+ } else if (auto *ASE = dyn_cast<ArraySubscriptExpr>(E)) {
+ // Step over an ArrayToPointerDecay implicit cast.
+ auto *Base = ASE->getBase()->IgnoreImplicit();
+ if (!Base->getType()->isArrayType())
+ break;
+
+ E = Base;
+ --PathLength;
+
+ } else if (auto *ICE = dyn_cast<ImplicitCastExpr>(E)) {
+ // Step over a derived-to-base conversion.
+ E = ICE->getSubExpr();
+ if (ICE->getCastKind() == CK_NoOp)
+ continue;
+ if (ICE->getCastKind() != CK_DerivedToBase &&
+ ICE->getCastKind() != CK_UncheckedDerivedToBase)
+ break;
+ // Walk path backwards as we walk up from the base to the derived class.
+ for (const CXXBaseSpecifier *Elt : llvm::reverse(ICE->path())) {
+ --PathLength;
+ (void)Elt;
+ assert(declaresSameEntity(Elt->getType()->getAsCXXRecordDecl(),
+ LHS.Designator.Entries[PathLength]
+ .getAsBaseOrMember().getPointer()));
+ }
+
+ // -- Otherwise, S(E) is empty.
+ } else {
+ break;
+ }
+ }
+
+ // Common case: no unions' lifetimes are started.
+ if (UnionPathLengths.empty())
+ return true;
+
+ // if modification of X [would access an inactive union member], an object
+ // of the type of X is implicitly created
+ CompleteObject Obj =
+ findCompleteObject(Info, LHSExpr, AK_Assign, LHS, LHSExpr->getType());
+ if (!Obj)
+ return false;
+ for (std::pair<unsigned, const FieldDecl *> LengthAndField :
+ llvm::reverse(UnionPathLengths)) {
+ // Form a designator for the union object.
+ SubobjectDesignator D = LHS.Designator;
+ D.truncate(Info.Ctx, LHS.Base, LengthAndField.first);
+
+ StartLifetimeOfUnionMemberHandler StartLifetime{LengthAndField.second};
+ if (!findSubobject(Info, LHSExpr, Obj, D, StartLifetime))
+ return false;
+ }
+
+ return true;
+}
+
/// Determine if a class has any fields that might need to be copied by a
/// trivial copy or move operation.
static bool hasFields(const CXXRecordDecl *RD) {
@@ -4413,9 +5097,25 @@ typedef SmallVector<APValue, 8> ArgVector;
}
/// EvaluateArgs - Evaluate the arguments to a function call.
-static bool EvaluateArgs(ArrayRef<const Expr*> Args, ArgVector &ArgValues,
- EvalInfo &Info) {
+static bool EvaluateArgs(ArrayRef<const Expr *> Args, ArgVector &ArgValues,
+ EvalInfo &Info, const FunctionDecl *Callee) {
bool Success = true;
+ llvm::SmallBitVector ForbiddenNullArgs;
+ if (Callee->hasAttr<NonNullAttr>()) {
+ ForbiddenNullArgs.resize(Args.size());
+ for (const auto *Attr : Callee->specific_attrs<NonNullAttr>()) {
+ if (!Attr->args_size()) {
+ ForbiddenNullArgs.set();
+ break;
+ } else
+ for (auto Idx : Attr->args()) {
+ unsigned ASTIdx = Idx.getASTIndex();
+ if (ASTIdx >= Args.size())
+ continue;
+ ForbiddenNullArgs[ASTIdx] = 1;
+ }
+ }
+ }
for (ArrayRef<const Expr*>::iterator I = Args.begin(), E = Args.end();
I != E; ++I) {
if (!Evaluate(ArgValues[I - Args.begin()], Info, *I)) {
@@ -4424,6 +5124,13 @@ static bool EvaluateArgs(ArrayRef<const Expr*> Args, ArgVector &ArgValues,
if (!Info.noteFailure())
return false;
Success = false;
+ } else if (!ForbiddenNullArgs.empty() &&
+ ForbiddenNullArgs[I - Args.begin()] &&
+ ArgValues[I - Args.begin()].isNullPointer()) {
+ Info.CCEDiag(*I, diag::note_non_null_attribute_failed);
+ if (!Info.noteFailure())
+ return false;
+ Success = false;
}
}
return Success;
@@ -4436,7 +5143,7 @@ static bool HandleFunctionCall(SourceLocation CallLoc,
EvalInfo &Info, APValue &Result,
const LValue *ResultSlot) {
ArgVector ArgValues(Args.size());
- if (!EvaluateArgs(Args, ArgValues, Info))
+ if (!EvaluateArgs(Args, ArgValues, Info, Callee))
return false;
if (!Info.CheckCallLimit(CallLoc))
@@ -4462,6 +5169,9 @@ static bool HandleFunctionCall(SourceLocation CallLoc,
if (!handleLValueToRValueConversion(Info, Args[0], Args[0]->getType(),
RHS, RHSValue))
return false;
+ if (Info.getLangOpts().CPlusPlus2a && MD->isTrivial() &&
+ !HandleUnionActiveMemberChange(Info, Args[0], *This))
+ return false;
if (!handleAssignment(Info, Args[0], *This, MD->getThisType(),
RHSValue))
return false;
@@ -4505,8 +5215,9 @@ static bool HandleConstructorCall(const Expr *E, const LValue &This,
}
EvalInfo::EvaluatingConstructorRAII EvalObj(
- Info, {This.getLValueBase(),
- {This.getLValueCallIndex(), This.getLValueVersion()}});
+ Info,
+ ObjectUnderConstruction{This.getLValueBase(), This.Designator.Entries},
+ RD->getNumBases());
CallStackFrame Frame(Info, CallLoc, Definition, &This, ArgValues);
// FIXME: Creating an APValue just to hold a nonexistent return value is
@@ -4544,7 +5255,7 @@ static bool HandleConstructorCall(const Expr *E, const LValue &This,
}
// Reserve space for the struct members.
- if (!RD->isUnion() && Result.isUninit())
+ if (!RD->isUnion() && !Result.hasValue())
Result = APValue(APValue::UninitStruct(), RD->getNumBases(),
std::distance(RD->field_begin(), RD->field_end()));
@@ -4601,7 +5312,7 @@ static bool HandleConstructorCall(const Expr *E, const LValue &This,
// subobject other than the first.
// FIXME: In this case, the values of the other subobjects are
// specified, since zero-initialization sets all padding bits to zero.
- if (Value->isUninit() ||
+ if (!Value->hasValue() ||
(Value->isUnion() && Value->getUnionField() != FD)) {
if (CD->isUnion())
*Value = APValue(FD);
@@ -4640,6 +5351,11 @@ static bool HandleConstructorCall(const Expr *E, const LValue &This,
return false;
Success = false;
}
+
+ // This is the point at which the dynamic type of the object becomes this
+ // class type.
+ if (I->isBaseInitializer() && BasesSeen == RD->getNumBases())
+ EvalObj.finishedConstructingBases();
}
return Success &&
@@ -4651,7 +5367,7 @@ static bool HandleConstructorCall(const Expr *E, const LValue &This,
const CXXConstructorDecl *Definition,
EvalInfo &Info, APValue &Result) {
ArgVector ArgValues(Args.size());
- if (!EvaluateArgs(Args, ArgValues, Info))
+ if (!EvaluateArgs(Args, ArgValues, Info, Definition))
return false;
return HandleConstructorCall(E, This, ArgValues.data(), Definition,
@@ -4663,6 +5379,491 @@ static bool HandleConstructorCall(const Expr *E, const LValue &This,
//===----------------------------------------------------------------------===//
namespace {
+class BitCastBuffer {
+ // FIXME: We're going to need bit-level granularity when we support
+ // bit-fields.
+ // FIXME: Its possible under the C++ standard for 'char' to not be 8 bits, but
+ // we don't support a host or target where that is the case. Still, we should
+ // use a more generic type in case we ever do.
+ SmallVector<Optional<unsigned char>, 32> Bytes;
+
+ static_assert(std::numeric_limits<unsigned char>::digits >= 8,
+ "Need at least 8 bit unsigned char");
+
+ bool TargetIsLittleEndian;
+
+public:
+ BitCastBuffer(CharUnits Width, bool TargetIsLittleEndian)
+ : Bytes(Width.getQuantity()),
+ TargetIsLittleEndian(TargetIsLittleEndian) {}
+
+ LLVM_NODISCARD
+ bool readObject(CharUnits Offset, CharUnits Width,
+ SmallVectorImpl<unsigned char> &Output) const {
+ for (CharUnits I = Offset, E = Offset + Width; I != E; ++I) {
+ // If a byte of an integer is uninitialized, then the whole integer is
+ // uninitalized.
+ if (!Bytes[I.getQuantity()])
+ return false;
+ Output.push_back(*Bytes[I.getQuantity()]);
+ }
+ if (llvm::sys::IsLittleEndianHost != TargetIsLittleEndian)
+ std::reverse(Output.begin(), Output.end());
+ return true;
+ }
+
+ void writeObject(CharUnits Offset, SmallVectorImpl<unsigned char> &Input) {
+ if (llvm::sys::IsLittleEndianHost != TargetIsLittleEndian)
+ std::reverse(Input.begin(), Input.end());
+
+ size_t Index = 0;
+ for (unsigned char Byte : Input) {
+ assert(!Bytes[Offset.getQuantity() + Index] && "overwriting a byte?");
+ Bytes[Offset.getQuantity() + Index] = Byte;
+ ++Index;
+ }
+ }
+
+ size_t size() { return Bytes.size(); }
+};
+
+/// Traverse an APValue to produce an BitCastBuffer, emulating how the current
+/// target would represent the value at runtime.
+class APValueToBufferConverter {
+ EvalInfo &Info;
+ BitCastBuffer Buffer;
+ const CastExpr *BCE;
+
+ APValueToBufferConverter(EvalInfo &Info, CharUnits ObjectWidth,
+ const CastExpr *BCE)
+ : Info(Info),
+ Buffer(ObjectWidth, Info.Ctx.getTargetInfo().isLittleEndian()),
+ BCE(BCE) {}
+
+ bool visit(const APValue &Val, QualType Ty) {
+ return visit(Val, Ty, CharUnits::fromQuantity(0));
+ }
+
+ // Write out Val with type Ty into Buffer starting at Offset.
+ bool visit(const APValue &Val, QualType Ty, CharUnits Offset) {
+ assert((size_t)Offset.getQuantity() <= Buffer.size());
+
+ // As a special case, nullptr_t has an indeterminate value.
+ if (Ty->isNullPtrType())
+ return true;
+
+ // Dig through Src to find the byte at SrcOffset.
+ switch (Val.getKind()) {
+ case APValue::Indeterminate:
+ case APValue::None:
+ return true;
+
+ case APValue::Int:
+ return visitInt(Val.getInt(), Ty, Offset);
+ case APValue::Float:
+ return visitFloat(Val.getFloat(), Ty, Offset);
+ case APValue::Array:
+ return visitArray(Val, Ty, Offset);
+ case APValue::Struct:
+ return visitRecord(Val, Ty, Offset);
+
+ case APValue::ComplexInt:
+ case APValue::ComplexFloat:
+ case APValue::Vector:
+ case APValue::FixedPoint:
+ // FIXME: We should support these.
+
+ case APValue::Union:
+ case APValue::MemberPointer:
+ case APValue::AddrLabelDiff: {
+ Info.FFDiag(BCE->getBeginLoc(),
+ diag::note_constexpr_bit_cast_unsupported_type)
+ << Ty;
+ return false;
+ }
+
+ case APValue::LValue:
+ llvm_unreachable("LValue subobject in bit_cast?");
+ }
+ llvm_unreachable("Unhandled APValue::ValueKind");
+ }
+
+ bool visitRecord(const APValue &Val, QualType Ty, CharUnits Offset) {
+ const RecordDecl *RD = Ty->getAsRecordDecl();
+ const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD);
+
+ // Visit the base classes.
+ if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ for (size_t I = 0, E = CXXRD->getNumBases(); I != E; ++I) {
+ const CXXBaseSpecifier &BS = CXXRD->bases_begin()[I];
+ CXXRecordDecl *BaseDecl = BS.getType()->getAsCXXRecordDecl();
+
+ if (!visitRecord(Val.getStructBase(I), BS.getType(),
+ Layout.getBaseClassOffset(BaseDecl) + Offset))
+ return false;
+ }
+ }
+
+ // Visit the fields.
+ unsigned FieldIdx = 0;
+ for (FieldDecl *FD : RD->fields()) {
+ if (FD->isBitField()) {
+ Info.FFDiag(BCE->getBeginLoc(),
+ diag::note_constexpr_bit_cast_unsupported_bitfield);
+ return false;
+ }
+
+ uint64_t FieldOffsetBits = Layout.getFieldOffset(FieldIdx);
+
+ assert(FieldOffsetBits % Info.Ctx.getCharWidth() == 0 &&
+ "only bit-fields can have sub-char alignment");
+ CharUnits FieldOffset =
+ Info.Ctx.toCharUnitsFromBits(FieldOffsetBits) + Offset;
+ QualType FieldTy = FD->getType();
+ if (!visit(Val.getStructField(FieldIdx), FieldTy, FieldOffset))
+ return false;
+ ++FieldIdx;
+ }
+
+ return true;
+ }
+
+ bool visitArray(const APValue &Val, QualType Ty, CharUnits Offset) {
+ const auto *CAT =
+ dyn_cast_or_null<ConstantArrayType>(Ty->getAsArrayTypeUnsafe());
+ if (!CAT)
+ return false;
+
+ CharUnits ElemWidth = Info.Ctx.getTypeSizeInChars(CAT->getElementType());
+ unsigned NumInitializedElts = Val.getArrayInitializedElts();
+ unsigned ArraySize = Val.getArraySize();
+ // First, initialize the initialized elements.
+ for (unsigned I = 0; I != NumInitializedElts; ++I) {
+ const APValue &SubObj = Val.getArrayInitializedElt(I);
+ if (!visit(SubObj, CAT->getElementType(), Offset + I * ElemWidth))
+ return false;
+ }
+
+ // Next, initialize the rest of the array using the filler.
+ if (Val.hasArrayFiller()) {
+ const APValue &Filler = Val.getArrayFiller();
+ for (unsigned I = NumInitializedElts; I != ArraySize; ++I) {
+ if (!visit(Filler, CAT->getElementType(), Offset + I * ElemWidth))
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ bool visitInt(const APSInt &Val, QualType Ty, CharUnits Offset) {
+ CharUnits Width = Info.Ctx.getTypeSizeInChars(Ty);
+ SmallVector<unsigned char, 8> Bytes(Width.getQuantity());
+ llvm::StoreIntToMemory(Val, &*Bytes.begin(), Width.getQuantity());
+ Buffer.writeObject(Offset, Bytes);
+ return true;
+ }
+
+ bool visitFloat(const APFloat &Val, QualType Ty, CharUnits Offset) {
+ APSInt AsInt(Val.bitcastToAPInt());
+ return visitInt(AsInt, Ty, Offset);
+ }
+
+public:
+ static Optional<BitCastBuffer> convert(EvalInfo &Info, const APValue &Src,
+ const CastExpr *BCE) {
+ CharUnits DstSize = Info.Ctx.getTypeSizeInChars(BCE->getType());
+ APValueToBufferConverter Converter(Info, DstSize, BCE);
+ if (!Converter.visit(Src, BCE->getSubExpr()->getType()))
+ return None;
+ return Converter.Buffer;
+ }
+};
+
+/// Write an BitCastBuffer into an APValue.
+class BufferToAPValueConverter {
+ EvalInfo &Info;
+ const BitCastBuffer &Buffer;
+ const CastExpr *BCE;
+
+ BufferToAPValueConverter(EvalInfo &Info, const BitCastBuffer &Buffer,
+ const CastExpr *BCE)
+ : Info(Info), Buffer(Buffer), BCE(BCE) {}
+
+ // Emit an unsupported bit_cast type error. Sema refuses to build a bit_cast
+ // with an invalid type, so anything left is a deficiency on our part (FIXME).
+ // Ideally this will be unreachable.
+ llvm::NoneType unsupportedType(QualType Ty) {
+ Info.FFDiag(BCE->getBeginLoc(),
+ diag::note_constexpr_bit_cast_unsupported_type)
+ << Ty;
+ return None;
+ }
+
+ Optional<APValue> visit(const BuiltinType *T, CharUnits Offset,
+ const EnumType *EnumSugar = nullptr) {
+ if (T->isNullPtrType()) {
+ uint64_t NullValue = Info.Ctx.getTargetNullPointerValue(QualType(T, 0));
+ return APValue((Expr *)nullptr,
+ /*Offset=*/CharUnits::fromQuantity(NullValue),
+ APValue::NoLValuePath{}, /*IsNullPtr=*/true);
+ }
+
+ CharUnits SizeOf = Info.Ctx.getTypeSizeInChars(T);
+ SmallVector<uint8_t, 8> Bytes;
+ if (!Buffer.readObject(Offset, SizeOf, Bytes)) {
+ // If this is std::byte or unsigned char, then its okay to store an
+ // indeterminate value.
+ bool IsStdByte = EnumSugar && EnumSugar->isStdByteType();
+ bool IsUChar =
+ !EnumSugar && (T->isSpecificBuiltinType(BuiltinType::UChar) ||
+ T->isSpecificBuiltinType(BuiltinType::Char_U));
+ if (!IsStdByte && !IsUChar) {
+ QualType DisplayType(EnumSugar ? (const Type *)EnumSugar : T, 0);
+ Info.FFDiag(BCE->getExprLoc(),
+ diag::note_constexpr_bit_cast_indet_dest)
+ << DisplayType << Info.Ctx.getLangOpts().CharIsSigned;
+ return None;
+ }
+
+ return APValue::IndeterminateValue();
+ }
+
+ APSInt Val(SizeOf.getQuantity() * Info.Ctx.getCharWidth(), true);
+ llvm::LoadIntFromMemory(Val, &*Bytes.begin(), Bytes.size());
+
+ if (T->isIntegralOrEnumerationType()) {
+ Val.setIsSigned(T->isSignedIntegerOrEnumerationType());
+ return APValue(Val);
+ }
+
+ if (T->isRealFloatingType()) {
+ const llvm::fltSemantics &Semantics =
+ Info.Ctx.getFloatTypeSemantics(QualType(T, 0));
+ return APValue(APFloat(Semantics, Val));
+ }
+
+ return unsupportedType(QualType(T, 0));
+ }
+
+ Optional<APValue> visit(const RecordType *RTy, CharUnits Offset) {
+ const RecordDecl *RD = RTy->getAsRecordDecl();
+ const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD);
+
+ unsigned NumBases = 0;
+ if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RD))
+ NumBases = CXXRD->getNumBases();
+
+ APValue ResultVal(APValue::UninitStruct(), NumBases,
+ std::distance(RD->field_begin(), RD->field_end()));
+
+ // Visit the base classes.
+ if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ for (size_t I = 0, E = CXXRD->getNumBases(); I != E; ++I) {
+ const CXXBaseSpecifier &BS = CXXRD->bases_begin()[I];
+ CXXRecordDecl *BaseDecl = BS.getType()->getAsCXXRecordDecl();
+ if (BaseDecl->isEmpty() ||
+ Info.Ctx.getASTRecordLayout(BaseDecl).getNonVirtualSize().isZero())
+ continue;
+
+ Optional<APValue> SubObj = visitType(
+ BS.getType(), Layout.getBaseClassOffset(BaseDecl) + Offset);
+ if (!SubObj)
+ return None;
+ ResultVal.getStructBase(I) = *SubObj;
+ }
+ }
+
+ // Visit the fields.
+ unsigned FieldIdx = 0;
+ for (FieldDecl *FD : RD->fields()) {
+ // FIXME: We don't currently support bit-fields. A lot of the logic for
+ // this is in CodeGen, so we need to factor it around.
+ if (FD->isBitField()) {
+ Info.FFDiag(BCE->getBeginLoc(),
+ diag::note_constexpr_bit_cast_unsupported_bitfield);
+ return None;
+ }
+
+ uint64_t FieldOffsetBits = Layout.getFieldOffset(FieldIdx);
+ assert(FieldOffsetBits % Info.Ctx.getCharWidth() == 0);
+
+ CharUnits FieldOffset =
+ CharUnits::fromQuantity(FieldOffsetBits / Info.Ctx.getCharWidth()) +
+ Offset;
+ QualType FieldTy = FD->getType();
+ Optional<APValue> SubObj = visitType(FieldTy, FieldOffset);
+ if (!SubObj)
+ return None;
+ ResultVal.getStructField(FieldIdx) = *SubObj;
+ ++FieldIdx;
+ }
+
+ return ResultVal;
+ }
+
+ Optional<APValue> visit(const EnumType *Ty, CharUnits Offset) {
+ QualType RepresentationType = Ty->getDecl()->getIntegerType();
+ assert(!RepresentationType.isNull() &&
+ "enum forward decl should be caught by Sema");
+ const BuiltinType *AsBuiltin =
+ RepresentationType.getCanonicalType()->getAs<BuiltinType>();
+ assert(AsBuiltin && "non-integral enum underlying type?");
+ // Recurse into the underlying type. Treat std::byte transparently as
+ // unsigned char.
+ return visit(AsBuiltin, Offset, /*EnumTy=*/Ty);
+ }
+
+ Optional<APValue> visit(const ConstantArrayType *Ty, CharUnits Offset) {
+ size_t Size = Ty->getSize().getLimitedValue();
+ CharUnits ElementWidth = Info.Ctx.getTypeSizeInChars(Ty->getElementType());
+
+ APValue ArrayValue(APValue::UninitArray(), Size, Size);
+ for (size_t I = 0; I != Size; ++I) {
+ Optional<APValue> ElementValue =
+ visitType(Ty->getElementType(), Offset + I * ElementWidth);
+ if (!ElementValue)
+ return None;
+ ArrayValue.getArrayInitializedElt(I) = std::move(*ElementValue);
+ }
+
+ return ArrayValue;
+ }
+
+ Optional<APValue> visit(const Type *Ty, CharUnits Offset) {
+ return unsupportedType(QualType(Ty, 0));
+ }
+
+ Optional<APValue> visitType(QualType Ty, CharUnits Offset) {
+ QualType Can = Ty.getCanonicalType();
+
+ switch (Can->getTypeClass()) {
+#define TYPE(Class, Base) \
+ case Type::Class: \
+ return visit(cast<Class##Type>(Can.getTypePtr()), Offset);
+#define ABSTRACT_TYPE(Class, Base)
+#define NON_CANONICAL_TYPE(Class, Base) \
+ case Type::Class: \
+ llvm_unreachable("non-canonical type should be impossible!");
+#define DEPENDENT_TYPE(Class, Base) \
+ case Type::Class: \
+ llvm_unreachable( \
+ "dependent types aren't supported in the constant evaluator!");
+#define NON_CANONICAL_UNLESS_DEPENDENT(Class, Base) \
+ case Type::Class: \
+ llvm_unreachable("either dependent or not canonical!");
+#include "clang/AST/TypeNodes.def"
+ }
+ llvm_unreachable("Unhandled Type::TypeClass");
+ }
+
+public:
+ // Pull out a full value of type DstType.
+ static Optional<APValue> convert(EvalInfo &Info, BitCastBuffer &Buffer,
+ const CastExpr *BCE) {
+ BufferToAPValueConverter Converter(Info, Buffer, BCE);
+ return Converter.visitType(BCE->getType(), CharUnits::fromQuantity(0));
+ }
+};
+
+static bool checkBitCastConstexprEligibilityType(SourceLocation Loc,
+ QualType Ty, EvalInfo *Info,
+ const ASTContext &Ctx,
+ bool CheckingDest) {
+ Ty = Ty.getCanonicalType();
+
+ auto diag = [&](int Reason) {
+ if (Info)
+ Info->FFDiag(Loc, diag::note_constexpr_bit_cast_invalid_type)
+ << CheckingDest << (Reason == 4) << Reason;
+ return false;
+ };
+ auto note = [&](int Construct, QualType NoteTy, SourceLocation NoteLoc) {
+ if (Info)
+ Info->Note(NoteLoc, diag::note_constexpr_bit_cast_invalid_subtype)
+ << NoteTy << Construct << Ty;
+ return false;
+ };
+
+ if (Ty->isUnionType())
+ return diag(0);
+ if (Ty->isPointerType())
+ return diag(1);
+ if (Ty->isMemberPointerType())
+ return diag(2);
+ if (Ty.isVolatileQualified())
+ return diag(3);
+
+ if (RecordDecl *Record = Ty->getAsRecordDecl()) {
+ if (auto *CXXRD = dyn_cast<CXXRecordDecl>(Record)) {
+ for (CXXBaseSpecifier &BS : CXXRD->bases())
+ if (!checkBitCastConstexprEligibilityType(Loc, BS.getType(), Info, Ctx,
+ CheckingDest))
+ return note(1, BS.getType(), BS.getBeginLoc());
+ }
+ for (FieldDecl *FD : Record->fields()) {
+ if (FD->getType()->isReferenceType())
+ return diag(4);
+ if (!checkBitCastConstexprEligibilityType(Loc, FD->getType(), Info, Ctx,
+ CheckingDest))
+ return note(0, FD->getType(), FD->getBeginLoc());
+ }
+ }
+
+ if (Ty->isArrayType() &&
+ !checkBitCastConstexprEligibilityType(Loc, Ctx.getBaseElementType(Ty),
+ Info, Ctx, CheckingDest))
+ return false;
+
+ return true;
+}
+
+static bool checkBitCastConstexprEligibility(EvalInfo *Info,
+ const ASTContext &Ctx,
+ const CastExpr *BCE) {
+ bool DestOK = checkBitCastConstexprEligibilityType(
+ BCE->getBeginLoc(), BCE->getType(), Info, Ctx, true);
+ bool SourceOK = DestOK && checkBitCastConstexprEligibilityType(
+ BCE->getBeginLoc(),
+ BCE->getSubExpr()->getType(), Info, Ctx, false);
+ return SourceOK;
+}
+
+static bool handleLValueToRValueBitCast(EvalInfo &Info, APValue &DestValue,
+ APValue &SourceValue,
+ const CastExpr *BCE) {
+ assert(CHAR_BIT == 8 && Info.Ctx.getTargetInfo().getCharWidth() == 8 &&
+ "no host or target supports non 8-bit chars");
+ assert(SourceValue.isLValue() &&
+ "LValueToRValueBitcast requires an lvalue operand!");
+
+ if (!checkBitCastConstexprEligibility(&Info, Info.Ctx, BCE))
+ return false;
+
+ LValue SourceLValue;
+ APValue SourceRValue;
+ SourceLValue.setFrom(Info.Ctx, SourceValue);
+ if (!handleLValueToRValueConversion(Info, BCE,
+ BCE->getSubExpr()->getType().withConst(),
+ SourceLValue, SourceRValue))
+ return false;
+
+ // Read out SourceValue into a char buffer.
+ Optional<BitCastBuffer> Buffer =
+ APValueToBufferConverter::convert(Info, SourceRValue, BCE);
+ if (!Buffer)
+ return false;
+
+ // Write out the buffer into a new APValue.
+ Optional<APValue> MaybeDestValue =
+ BufferToAPValueConverter::convert(Info, *Buffer, BCE);
+ if (!MaybeDestValue)
+ return false;
+
+ DestValue = std::move(*MaybeDestValue);
+ return true;
+}
+
template <class Derived>
class ExprEvaluatorBase
: public ConstStmtVisitor<Derived, bool> {
@@ -4771,6 +5972,7 @@ public:
{ return StmtVisitorTy::Visit(E->getReplacement()); }
bool VisitCXXDefaultArgExpr(const CXXDefaultArgExpr *E) {
TempVersionRAII RAII(*Info.CurrentCall);
+ SourceLocExprScopeGuard Guard(E, Info.CurrentCall->CurSourceLocExprScope);
return StmtVisitorTy::Visit(E->getExpr());
}
bool VisitCXXDefaultInitExpr(const CXXDefaultInitExpr *E) {
@@ -4778,8 +5980,10 @@ public:
// The initializer may not have been parsed yet, or might be erroneous.
if (!E->getExpr())
return Error(E);
+ SourceLocExprScopeGuard Guard(E, Info.CurrentCall->CurSourceLocExprScope);
return StmtVisitorTy::Visit(E->getExpr());
}
+
// We cannot create any objects for which cleanups are required, so there is
// nothing to do here; all cleanups must come from unevaluated subexpressions.
bool VisitExprWithCleanups(const ExprWithCleanups *E)
@@ -4790,7 +5994,11 @@ public:
return static_cast<Derived*>(this)->VisitCastExpr(E);
}
bool VisitCXXDynamicCastExpr(const CXXDynamicCastExpr *E) {
- CCEDiag(E, diag::note_constexpr_invalid_cast) << 1;
+ if (!Info.Ctx.getLangOpts().CPlusPlus2a)
+ CCEDiag(E, diag::note_constexpr_invalid_cast) << 1;
+ return static_cast<Derived*>(this)->VisitCastExpr(E);
+ }
+ bool VisitBuiltinBitCastExpr(const BuiltinBitCastExpr *E) {
return static_cast<Derived*>(this)->VisitCastExpr(E);
}
@@ -4884,25 +6092,26 @@ public:
// Extract function decl and 'this' pointer from the callee.
if (CalleeType->isSpecificBuiltinType(BuiltinType::BoundMember)) {
- const ValueDecl *Member = nullptr;
+ const CXXMethodDecl *Member = nullptr;
if (const MemberExpr *ME = dyn_cast<MemberExpr>(Callee)) {
// Explicit bound member calls, such as x.f() or p->g();
if (!EvaluateObjectArgument(Info, ME->getBase(), ThisVal))
return false;
- Member = ME->getMemberDecl();
+ Member = dyn_cast<CXXMethodDecl>(ME->getMemberDecl());
+ if (!Member)
+ return Error(Callee);
This = &ThisVal;
HasQualifier = ME->hasQualifier();
} else if (const BinaryOperator *BE = dyn_cast<BinaryOperator>(Callee)) {
// Indirect bound member calls ('.*' or '->*').
- Member = HandleMemberPointerAccess(Info, BE, ThisVal, false);
- if (!Member) return false;
+ Member = dyn_cast_or_null<CXXMethodDecl>(
+ HandleMemberPointerAccess(Info, BE, ThisVal, false));
+ if (!Member)
+ return Error(Callee);
This = &ThisVal;
} else
return Error(Callee);
-
- FD = dyn_cast<FunctionDecl>(Member);
- if (!FD)
- return Error(Callee);
+ FD = Member;
} else if (CalleeType->isFunctionPointerType()) {
LValue Call;
if (!EvaluatePointer(Callee, Call, Info))
@@ -4969,19 +6178,24 @@ public:
} else
FD = LambdaCallOp;
}
-
-
} else
return Error(E);
- if (This && !This->checkSubobject(Info, E, CSK_This))
- return false;
-
- // DR1358 allows virtual constexpr functions in some cases. Don't allow
- // calls to such functions in constant expressions.
- if (This && !HasQualifier &&
- isa<CXXMethodDecl>(FD) && cast<CXXMethodDecl>(FD)->isVirtual())
- return Error(E, diag::note_constexpr_virtual_call);
+ SmallVector<QualType, 4> CovariantAdjustmentPath;
+ if (This) {
+ auto *NamedMember = dyn_cast<CXXMethodDecl>(FD);
+ if (NamedMember && NamedMember->isVirtual() && !HasQualifier) {
+ // Perform virtual dispatch, if necessary.
+ FD = HandleVirtualDispatch(Info, E, *This, NamedMember,
+ CovariantAdjustmentPath);
+ if (!FD)
+ return false;
+ } else {
+ // Check that the 'this' pointer points to an object of the right type.
+ if (!checkNonVirtualMemberCallThisPointer(Info, E, *This))
+ return false;
+ }
+ }
const FunctionDecl *Definition = nullptr;
Stmt *Body = FD->getBody(Definition);
@@ -4991,6 +6205,11 @@ public:
Result, ResultSlot))
return false;
+ if (!CovariantAdjustmentPath.empty() &&
+ !HandleCovariantReturnAdjustment(Info, E, Result,
+ CovariantAdjustmentPath))
+ return false;
+
return true;
}
@@ -5016,6 +6235,8 @@ public:
/// A member expression where the object is a prvalue is itself a prvalue.
bool VisitMemberExpr(const MemberExpr *E) {
+ assert(!Info.Ctx.getLangOpts().CPlusPlus11 &&
+ "missing temporary materialization conversion");
assert(!E->isArrow() && "missing call to bound member function?");
APValue Val;
@@ -5030,7 +6251,10 @@ public:
assert(BaseTy->castAs<RecordType>()->getDecl()->getCanonicalDecl() ==
FD->getParent()->getCanonicalDecl() && "record / field mismatch");
- CompleteObject Obj(&Val, BaseTy, true);
+ // Note: there is no lvalue base here. But this case should only ever
+ // happen in C or in C++98, where we cannot be evaluating a constexpr
+ // constructor, which is the only case the base matters.
+ CompleteObject Obj(APValue::LValueBase(), &Val, BaseTy);
SubobjectDesignator Designator(BaseTy);
Designator.addDeclUnchecked(FD);
@@ -5069,6 +6293,14 @@ public:
return false;
return DerivedSuccess(RVal, E);
}
+ case CK_LValueToRValueBitCast: {
+ APValue DestValue, SourceValue;
+ if (!Evaluate(SourceValue, Info, E->getSubExpr()))
+ return false;
+ if (!handleLValueToRValueBitCast(Info, DestValue, SourceValue, E))
+ return false;
+ return DerivedSuccess(DestValue, E);
+ }
}
return Error(E);
@@ -5274,13 +6506,13 @@ public:
// - Literals
// * CompoundLiteralExpr in C (and in global scope in C++)
// * StringLiteral
-// * CXXTypeidExpr
// * PredefinedExpr
// * ObjCStringLiteralExpr
// * ObjCEncodeExpr
// * AddrLabelExpr
// * BlockExpr
// * CallExpr for a MakeStringConstant builtin
+// - typeid(T) expressions, as TypeInfoLValues
// - Locals and temporaries
// * MaterializeTemporaryExpr
// * Any Expr, with a CallIndex indicating the function in which the temporary
@@ -5338,6 +6570,11 @@ public:
if (!Visit(E->getSubExpr()))
return false;
return HandleBaseToDerivedCast(Info, E, Result);
+
+ case CK_Dynamic:
+ if (!Visit(E->getSubExpr()))
+ return false;
+ return HandleDynamicCast(Info, cast<ExplicitCastExpr>(E), Result);
}
}
};
@@ -5427,7 +6664,9 @@ bool LValueExprEvaluator::VisitVarDecl(const Expr *E, const VarDecl *VD) {
APValue *V;
if (!evaluateVarDeclInit(Info, E, VD, Frame, V, nullptr))
return false;
- if (V->isUninit()) {
+ if (!V->hasValue()) {
+ // FIXME: Is it possible for V to be indeterminate here? If so, we should
+ // adjust the diagnostic to say that.
if (!Info.checkingPotentialConstantExpression())
Info.FFDiag(E, diag::note_constexpr_use_uninit_reference);
return false;
@@ -5510,13 +6749,33 @@ LValueExprEvaluator::VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) {
}
bool LValueExprEvaluator::VisitCXXTypeidExpr(const CXXTypeidExpr *E) {
- if (!E->isPotentiallyEvaluated())
- return Success(E);
+ TypeInfoLValue TypeInfo;
- Info.FFDiag(E, diag::note_constexpr_typeid_polymorphic)
- << E->getExprOperand()->getType()
- << E->getExprOperand()->getSourceRange();
- return false;
+ if (!E->isPotentiallyEvaluated()) {
+ if (E->isTypeOperand())
+ TypeInfo = TypeInfoLValue(E->getTypeOperand(Info.Ctx).getTypePtr());
+ else
+ TypeInfo = TypeInfoLValue(E->getExprOperand()->getType().getTypePtr());
+ } else {
+ if (!Info.Ctx.getLangOpts().CPlusPlus2a) {
+ Info.CCEDiag(E, diag::note_constexpr_typeid_polymorphic)
+ << E->getExprOperand()->getType()
+ << E->getExprOperand()->getSourceRange();
+ }
+
+ if (!Visit(E->getExprOperand()))
+ return false;
+
+ Optional<DynamicType> DynType =
+ ComputeDynamicType(Info, E, Result, AK_TypeId);
+ if (!DynType)
+ return false;
+
+ TypeInfo =
+ TypeInfoLValue(Info.Ctx.getRecordType(DynType->Type).getTypePtr());
+ }
+
+ return Success(APValue::LValueBase::getTypeInfo(TypeInfo, E->getType()));
}
bool LValueExprEvaluator::VisitCXXUuidofExpr(const CXXUuidofExpr *E) {
@@ -5634,6 +6893,10 @@ bool LValueExprEvaluator::VisitBinAssign(const BinaryOperator *E) {
if (!Evaluate(NewVal, this->Info, E->getRHS()))
return false;
+ if (Info.getLangOpts().CPlusPlus2a &&
+ !HandleUnionActiveMemberChange(Info, E->getLHS(), Result))
+ return false;
+
return handleAssignment(this->Info, E, Result, E->getLHS()->getType(),
NewVal);
}
@@ -5783,6 +7046,8 @@ public:
bool VisitObjCStringLiteral(const ObjCStringLiteral *E)
{ return Success(E); }
bool VisitObjCBoxedExpr(const ObjCBoxedExpr *E) {
+ if (E->isExpressibleAsConstantInitializer())
+ return Success(E);
if (Info.noteFailure())
EvaluateIgnoredValue(Info, E->getSubExpr());
return Error(E);
@@ -5832,6 +7097,14 @@ public:
return true;
}
+ bool VisitSourceLocExpr(const SourceLocExpr *E) {
+ assert(E->isStringType() && "SourceLocExpr isn't a pointer type?");
+ APValue LValResult = E->EvaluateInContext(
+ Info.Ctx, Info.CurrentCall->CurSourceLocExprScope.getDefaultExpr());
+ Result.setFrom(Info.Ctx, LValResult);
+ return true;
+ }
+
// FIXME: Missing: @protocol, @selector
};
} // end anonymous namespace
@@ -5877,7 +7150,6 @@ bool PointerExprEvaluator::VisitCastExpr(const CastExpr *E) {
switch (E->getCastKind()) {
default:
break;
-
case CK_BitCast:
case CK_CPointerToObjCPointerCast:
case CK_BlockPointerToObjCPointerCast:
@@ -5920,6 +7192,11 @@ bool PointerExprEvaluator::VisitCastExpr(const CastExpr *E) {
return true;
return HandleBaseToDerivedCast(Info, E, Result);
+ case CK_Dynamic:
+ if (!Visit(E->getSubExpr()))
+ return false;
+ return HandleDynamicCast(Info, cast<ExplicitCastExpr>(E), Result);
+
case CK_NullToPointer:
VisitIgnoredValue(E->getSubExpr());
return ZeroInitialization(E);
@@ -6092,9 +7369,11 @@ bool PointerExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
if (const ValueDecl *VD =
OffsetResult.Base.dyn_cast<const ValueDecl*>()) {
BaseAlignment = Info.Ctx.getDeclAlign(VD);
+ } else if (const Expr *E = OffsetResult.Base.dyn_cast<const Expr *>()) {
+ BaseAlignment = GetAlignOfExpr(Info, E, UETT_AlignOf);
} else {
- BaseAlignment = GetAlignOfExpr(
- Info, OffsetResult.Base.get<const Expr *>(), UETT_AlignOf);
+ BaseAlignment = GetAlignOfType(
+ Info, OffsetResult.Base.getTypeInfoType(), UETT_AlignOf);
}
if (BaseAlignment < Align) {
@@ -6622,6 +7901,12 @@ bool RecordExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
const RecordDecl *RD = E->getType()->castAs<RecordType>()->getDecl();
if (RD->isInvalidDecl()) return false;
const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD);
+ auto *CXXRD = dyn_cast<CXXRecordDecl>(RD);
+
+ EvalInfo::EvaluatingConstructorRAII EvalObj(
+ Info,
+ ObjectUnderConstruction{This.getLValueBase(), This.Designator.Entries},
+ CXXRD && CXXRD->getNumBases());
if (RD->isUnion()) {
const FieldDecl *Field = E->getInitializedFieldInUnion();
@@ -6648,15 +7933,14 @@ bool RecordExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
return EvaluateInPlace(Result.getUnionValue(), Info, Subobject, InitExpr);
}
- auto *CXXRD = dyn_cast<CXXRecordDecl>(RD);
- if (Result.isUninit())
+ if (!Result.hasValue())
Result = APValue(APValue::UninitStruct(), CXXRD ? CXXRD->getNumBases() : 0,
std::distance(RD->field_begin(), RD->field_end()));
unsigned ElementNo = 0;
bool Success = true;
// Initialize base classes.
- if (CXXRD) {
+ if (CXXRD && CXXRD->getNumBases()) {
for (const auto &Base : CXXRD->bases()) {
assert(ElementNo < E->getNumInits() && "missing init for base class");
const Expr *Init = E->getInit(ElementNo);
@@ -6673,6 +7957,8 @@ bool RecordExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
}
++ElementNo;
}
+
+ EvalObj.finishedConstructingBases();
}
// Initialize members.
@@ -6724,7 +8010,7 @@ bool RecordExprEvaluator::VisitCXXConstructExpr(const CXXConstructExpr *E,
bool ZeroInit = E->requiresZeroInitialization();
if (CheckTrivialDefaultConstructor(Info, E->getExprLoc(), FD, ZeroInit)) {
// If we've already performed zero-initialization, we're already done.
- if (!Result.isUninit())
+ if (Result.hasValue())
return true;
// We can get here in two different ways:
@@ -7130,8 +8416,7 @@ namespace {
: ExprEvaluatorBaseTy(Info), This(This), Result(Result) {}
bool Success(const APValue &V, const Expr *E) {
- assert((V.isArray() || V.isLValue()) &&
- "expected array or string literal");
+ assert(V.isArray() && "expected array");
Result = V;
return true;
}
@@ -7162,6 +8447,10 @@ namespace {
bool VisitCXXConstructExpr(const CXXConstructExpr *E,
const LValue &Subobject,
APValue *Value, QualType Type);
+ bool VisitStringLiteral(const StringLiteral *E) {
+ expandStringLiteral(Info, E, Result);
+ return true;
+ }
};
} // end anonymous namespace
@@ -7194,14 +8483,8 @@ bool ArrayExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
// C++11 [dcl.init.string]p1: A char array [...] can be initialized by [...]
// an appropriately-typed string literal enclosed in braces.
- if (E->isStringLiteralInit()) {
- LValue LV;
- if (!EvaluateLValue(E->getInit(0), LV, Info))
- return false;
- APValue Val;
- LV.moveInto(Val);
- return Success(Val, E);
- }
+ if (E->isStringLiteralInit())
+ return Visit(E->getInit(0));
bool Success = true;
@@ -7227,7 +8510,7 @@ bool ArrayExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
// If the array was previously zero-initialized, preserve the
// zero-initialized values.
- if (!Filler.isUninit()) {
+ if (Filler.hasValue()) {
for (unsigned I = 0, E = Result.getArrayInitializedElts(); I != E; ++I)
Result.getArrayInitializedElt(I) = Filler;
if (Result.hasArrayFiller())
@@ -7296,7 +8579,7 @@ bool ArrayExprEvaluator::VisitCXXConstructExpr(const CXXConstructExpr *E,
const LValue &Subobject,
APValue *Value,
QualType Type) {
- bool HadZeroInit = !Value->isUninit();
+ bool HadZeroInit = Value->hasValue();
if (const ConstantArrayType *CAT = Info.Ctx.getAsConstantArrayType(Type)) {
unsigned N = CAT->getSize().getZExtValue();
@@ -7391,7 +8674,7 @@ public:
}
bool Success(const APValue &V, const Expr *E) {
- if (V.isLValue() || V.isAddrLabelDiff()) {
+ if (V.isLValue() || V.isAddrLabelDiff() || V.isIndeterminate()) {
Result = V;
return true;
}
@@ -7478,7 +8761,7 @@ public:
bool VisitCXXNoexceptExpr(const CXXNoexceptExpr *E);
bool VisitSizeOfPackExpr(const SizeOfPackExpr *E);
-
+ bool VisitSourceLocExpr(const SourceLocExpr *E);
// FIXME: Missing: array subscript of vector, member of vector
};
@@ -7490,53 +8773,27 @@ class FixedPointExprEvaluator
FixedPointExprEvaluator(EvalInfo &info, APValue &result)
: ExprEvaluatorBaseTy(info), Result(result) {}
- bool Success(const llvm::APSInt &SI, const Expr *E, APValue &Result) {
- assert(E->getType()->isFixedPointType() && "Invalid evaluation result.");
- assert(SI.isSigned() == E->getType()->isSignedFixedPointType() &&
- "Invalid evaluation result.");
- assert(SI.getBitWidth() == Info.Ctx.getIntWidth(E->getType()) &&
- "Invalid evaluation result.");
- Result = APValue(SI);
- return true;
- }
- bool Success(const llvm::APSInt &SI, const Expr *E) {
- return Success(SI, E, Result);
- }
-
- bool Success(const llvm::APInt &I, const Expr *E, APValue &Result) {
- assert(E->getType()->isFixedPointType() && "Invalid evaluation result.");
- assert(I.getBitWidth() == Info.Ctx.getIntWidth(E->getType()) &&
- "Invalid evaluation result.");
- Result = APValue(APSInt(I));
- Result.getInt().setIsUnsigned(E->getType()->isUnsignedFixedPointType());
- return true;
- }
bool Success(const llvm::APInt &I, const Expr *E) {
- return Success(I, E, Result);
+ return Success(
+ APFixedPoint(I, Info.Ctx.getFixedPointSemantics(E->getType())), E);
}
- bool Success(uint64_t Value, const Expr *E, APValue &Result) {
- assert(E->getType()->isFixedPointType() && "Invalid evaluation result.");
- Result = APValue(Info.Ctx.MakeIntValue(Value, E->getType()));
- return true;
- }
bool Success(uint64_t Value, const Expr *E) {
- return Success(Value, E, Result);
- }
-
- bool Success(CharUnits Size, const Expr *E) {
- return Success(Size.getQuantity(), E);
+ return Success(
+ APFixedPoint(Value, Info.Ctx.getFixedPointSemantics(E->getType())), E);
}
bool Success(const APValue &V, const Expr *E) {
- if (V.isLValue() || V.isAddrLabelDiff()) {
- Result = V;
- return true;
- }
- return Success(V.getInt(), E);
+ return Success(V.getFixedPoint(), E);
}
- bool ZeroInitialization(const Expr *E) { return Success(0, E); }
+ bool Success(const APFixedPoint &V, const Expr *E) {
+ assert(E->getType()->isFixedPointType() && "Invalid evaluation result.");
+ assert(V.getWidth() == Info.Ctx.getIntWidth(E->getType()) &&
+ "Invalid evaluation result.");
+ Result = APValue(V);
+ return true;
+ }
//===--------------------------------------------------------------------===//
// Visitor Methods
@@ -7546,7 +8803,9 @@ class FixedPointExprEvaluator
return Success(E->getValue(), E);
}
+ bool VisitCastExpr(const CastExpr *E);
bool VisitUnaryOperator(const UnaryOperator *E);
+ bool VisitBinaryOperator(const BinaryOperator *E);
};
} // end anonymous namespace
@@ -7578,6 +8837,42 @@ static bool EvaluateInteger(const Expr *E, APSInt &Result, EvalInfo &Info) {
return true;
}
+bool IntExprEvaluator::VisitSourceLocExpr(const SourceLocExpr *E) {
+ APValue Evaluated = E->EvaluateInContext(
+ Info.Ctx, Info.CurrentCall->CurSourceLocExprScope.getDefaultExpr());
+ return Success(Evaluated, E);
+}
+
+static bool EvaluateFixedPoint(const Expr *E, APFixedPoint &Result,
+ EvalInfo &Info) {
+ if (E->getType()->isFixedPointType()) {
+ APValue Val;
+ if (!FixedPointExprEvaluator(Info, Val).Visit(E))
+ return false;
+ if (!Val.isFixedPoint())
+ return false;
+
+ Result = Val.getFixedPoint();
+ return true;
+ }
+ return false;
+}
+
+static bool EvaluateFixedPointOrInteger(const Expr *E, APFixedPoint &Result,
+ EvalInfo &Info) {
+ if (E->getType()->isIntegerType()) {
+ auto FXSema = Info.Ctx.getFixedPointSemantics(E->getType());
+ APSInt Val;
+ if (!EvaluateInteger(E, Val, Info))
+ return false;
+ Result = APFixedPoint(Val, FXSema);
+ return true;
+ } else if (E->getType()->isFixedPointType()) {
+ return EvaluateFixedPoint(E, Result, Info);
+ }
+ return false;
+}
+
/// Check whether the given declaration can be directly converted to an integral
/// rvalue. If not, no diagnostic is produced; there are other things we can
/// try.
@@ -7783,19 +9078,41 @@ EvaluateBuiltinClassifyType(const CallExpr *E, const LangOptions &LangOpts) {
}
/// EvaluateBuiltinConstantPForLValue - Determine the result of
-/// __builtin_constant_p when applied to the given lvalue.
+/// __builtin_constant_p when applied to the given pointer.
///
-/// An lvalue is only "constant" if it is a pointer or reference to the first
-/// character of a string literal.
-template<typename LValue>
-static bool EvaluateBuiltinConstantPForLValue(const LValue &LV) {
- const Expr *E = LV.getLValueBase().template dyn_cast<const Expr*>();
- return E && isa<StringLiteral>(E) && LV.getLValueOffset().isZero();
+/// A pointer is only "constant" if it is null (or a pointer cast to integer)
+/// or it points to the first character of a string literal.
+static bool EvaluateBuiltinConstantPForLValue(const APValue &LV) {
+ APValue::LValueBase Base = LV.getLValueBase();
+ if (Base.isNull()) {
+ // A null base is acceptable.
+ return true;
+ } else if (const Expr *E = Base.dyn_cast<const Expr *>()) {
+ if (!isa<StringLiteral>(E))
+ return false;
+ return LV.getLValueOffset().isZero();
+ } else if (Base.is<TypeInfoLValue>()) {
+ // Surprisingly, GCC considers __builtin_constant_p(&typeid(int)) to
+ // evaluate to true.
+ return true;
+ } else {
+ // Any other base is not constant enough for GCC.
+ return false;
+ }
}
/// EvaluateBuiltinConstantP - Evaluate __builtin_constant_p as similarly to
/// GCC as we can manage.
-static bool EvaluateBuiltinConstantP(ASTContext &Ctx, const Expr *Arg) {
+static bool EvaluateBuiltinConstantP(EvalInfo &Info, const Expr *Arg) {
+ // This evaluation is not permitted to have side-effects, so evaluate it in
+ // a speculative evaluation context.
+ SpeculativeEvaluationRAII SpeculativeEval(Info);
+
+ // Constant-folding is always enabled for the operand of __builtin_constant_p
+ // (even when the enclosing evaluation context otherwise requires a strict
+ // language-specific constant expression).
+ FoldConstant Fold(Info, true);
+
QualType ArgType = Arg->getType();
// __builtin_constant_p always has one operand. The rules which gcc follows
@@ -7803,34 +9120,30 @@ static bool EvaluateBuiltinConstantP(ASTContext &Ctx, const Expr *Arg) {
//
// - If the operand is of integral, floating, complex or enumeration type,
// and can be folded to a known value of that type, it returns 1.
- // - If the operand and can be folded to a pointer to the first character
- // of a string literal (or such a pointer cast to an integral type), it
- // returns 1.
+ // - If the operand can be folded to a pointer to the first character
+ // of a string literal (or such a pointer cast to an integral type)
+ // or to a null pointer or an integer cast to a pointer, it returns 1.
//
// Otherwise, it returns 0.
//
// FIXME: GCC also intends to return 1 for literals of aggregate types, but
- // its support for this does not currently work.
- if (ArgType->isIntegralOrEnumerationType()) {
- Expr::EvalResult Result;
- if (!Arg->EvaluateAsRValue(Result, Ctx) || Result.HasSideEffects)
+ // its support for this did not work prior to GCC 9 and is not yet well
+ // understood.
+ if (ArgType->isIntegralOrEnumerationType() || ArgType->isFloatingType() ||
+ ArgType->isAnyComplexType() || ArgType->isPointerType() ||
+ ArgType->isNullPtrType()) {
+ APValue V;
+ if (!::EvaluateAsRValue(Info, Arg, V)) {
+ Fold.keepDiagnostics();
return false;
+ }
- APValue &V = Result.Val;
- if (V.getKind() == APValue::Int)
- return true;
+ // For a pointer (possibly cast to integer), there are special rules.
if (V.getKind() == APValue::LValue)
return EvaluateBuiltinConstantPForLValue(V);
- } else if (ArgType->isFloatingType() || ArgType->isAnyComplexType()) {
- return Arg->isEvaluatable(Ctx);
- } else if (ArgType->isPointerType() || Arg->isGLValue()) {
- LValue LV;
- Expr::EvalStatus Status;
- EvalInfo Info(Ctx, Status, EvalInfo::EM_ConstantFold);
- if ((Arg->isGLValue() ? EvaluateLValue(Arg, LV, Info)
- : EvaluatePointer(Arg, LV, Info)) &&
- !Status.HasSideEffects)
- return EvaluateBuiltinConstantPForLValue(LV);
+
+ // Otherwise, any constant value is good enough.
+ return V.hasValue();
}
// Anything else isn't considered to be sufficiently constant.
@@ -7846,6 +9159,8 @@ static QualType getObjectType(APValue::LValueBase B) {
} else if (const Expr *E = B.get<const Expr*>()) {
if (isa<CompoundLiteralExpr>(E))
return E->getType();
+ } else if (B.is<TypeInfoLValue>()) {
+ return B.getTypeInfoType();
}
return QualType();
@@ -7940,13 +9255,13 @@ static bool isDesignatorAtObjectEnd(const ASTContext &Ctx, const LValue &LVal) {
if (I + 1 == E)
return true;
const auto *CAT = cast<ConstantArrayType>(Ctx.getAsArrayType(BaseType));
- uint64_t Index = Entry.ArrayIndex;
+ uint64_t Index = Entry.getAsArrayIndex();
if (Index + 1 != CAT->getSize())
return false;
BaseType = CAT->getElementType();
} else if (BaseType->isAnyComplexType()) {
const auto *CT = BaseType->castAs<ComplexType>();
- uint64_t Index = Entry.ArrayIndex;
+ uint64_t Index = Entry.getAsArrayIndex();
if (Index != 1)
return false;
BaseType = CT->getElementType();
@@ -8088,7 +9403,7 @@ static bool determineEndOffset(EvalInfo &Info, SourceLocation ExprLoc,
if (Designator.MostDerivedIsArrayElement &&
Designator.Entries.size() == Designator.MostDerivedPathLength) {
uint64_t ArraySize = Designator.getMostDerivedArraySize();
- uint64_t ArrayIndex = Designator.Entries.back().ArrayIndex;
+ uint64_t ArrayIndex = Designator.Entries.back().getAsArrayIndex();
ElemsRemaining = ArraySize <= ArrayIndex ? 0 : ArraySize - ArrayIndex;
} else {
ElemsRemaining = Designator.isOnePastTheEnd() ? 0 : 1;
@@ -8148,6 +9463,8 @@ static bool tryEvaluateBuiltinObjectSize(const Expr *E, unsigned Type,
bool IntExprEvaluator::VisitConstantExpr(const ConstantExpr *E) {
llvm::SaveAndRestore<bool> InConstantContext(Info.InConstantContext, true);
+ if (E->getResultAPValueKind() != APValue::None)
+ return Success(E->getAPValueResult(), E);
return ExprEvaluatorBaseTy::VisitConstantExpr(E);
}
@@ -8164,6 +9481,7 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
default:
return ExprEvaluatorBaseTy::VisitCallExpr(E);
+ case Builtin::BI__builtin_dynamic_object_size:
case Builtin::BI__builtin_object_size: {
// The type was checked when we built the expression.
unsigned Type =
@@ -8239,20 +9557,22 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
}
case Builtin::BI__builtin_constant_p: {
- auto Arg = E->getArg(0);
- if (EvaluateBuiltinConstantP(Info.Ctx, Arg))
+ const Expr *Arg = E->getArg(0);
+ if (EvaluateBuiltinConstantP(Info, Arg))
return Success(true, E);
- auto ArgTy = Arg->IgnoreImplicit()->getType();
- if (!Info.InConstantContext && !Arg->HasSideEffects(Info.Ctx) &&
- !ArgTy->isAggregateType() && !ArgTy->isPointerType()) {
- // We can delay calculation of __builtin_constant_p until after
- // inlining. Note: This diagnostic won't be shown to the user.
- Info.FFDiag(E, diag::note_invalid_subexpr_in_const_expr);
- return false;
+ if (Info.InConstantContext || Arg->HasSideEffects(Info.Ctx)) {
+ // Outside a constant context, eagerly evaluate to false in the presence
+ // of side-effects in order to avoid -Wunsequenced false-positives in
+ // a branch on __builtin_constant_p(expr).
+ return Success(false, E);
}
- return Success(false, E);
+ Info.FFDiag(E, diag::note_invalid_subexpr_in_const_expr);
+ return false;
}
+ case Builtin::BI__builtin_is_constant_evaluated:
+ return Success(Info.InConstantContext, E);
+
case Builtin::BI__builtin_ctz:
case Builtin::BI__builtin_ctzl:
case Builtin::BI__builtin_ctzll:
@@ -8411,6 +9731,7 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
case Builtin::BIstrncmp:
case Builtin::BIwcsncmp:
case Builtin::BImemcmp:
+ case Builtin::BIbcmp:
case Builtin::BIwmemcmp:
// A call to strlen is not a constant expression.
if (Info.getLangOpts().CPlusPlus11)
@@ -8425,6 +9746,7 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
case Builtin::BI__builtin_strncmp:
case Builtin::BI__builtin_wcsncmp:
case Builtin::BI__builtin_memcmp:
+ case Builtin::BI__builtin_bcmp:
case Builtin::BI__builtin_wmemcmp: {
LValue String1, String2;
if (!EvaluatePointer(E->getArg(0), String1, Info) ||
@@ -8455,7 +9777,9 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
QualType CharTy2 = String2.Designator.getType(Info.Ctx);
bool IsRawByte = BuiltinOp == Builtin::BImemcmp ||
- BuiltinOp == Builtin::BI__builtin_memcmp;
+ BuiltinOp == Builtin::BIbcmp ||
+ BuiltinOp == Builtin::BI__builtin_memcmp ||
+ BuiltinOp == Builtin::BI__builtin_bcmp;
assert(IsRawByte ||
(Info.Ctx.hasSameUnqualifiedType(
@@ -8523,10 +9847,12 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
return Success(0, E);
}
- bool StopAtNull = (BuiltinOp != Builtin::BImemcmp &&
- BuiltinOp != Builtin::BIwmemcmp &&
- BuiltinOp != Builtin::BI__builtin_memcmp &&
- BuiltinOp != Builtin::BI__builtin_wmemcmp);
+ bool StopAtNull =
+ (BuiltinOp != Builtin::BImemcmp && BuiltinOp != Builtin::BIbcmp &&
+ BuiltinOp != Builtin::BIwmemcmp &&
+ BuiltinOp != Builtin::BI__builtin_memcmp &&
+ BuiltinOp != Builtin::BI__builtin_bcmp &&
+ BuiltinOp != Builtin::BI__builtin_wmemcmp);
bool IsWide = BuiltinOp == Builtin::BIwcscmp ||
BuiltinOp == Builtin::BIwcsncmp ||
BuiltinOp == Builtin::BIwmemcmp ||
@@ -8654,10 +9980,8 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
if (IsSigned && !AllSigned)
++MaxBits;
- LHS = APSInt(IsSigned ? LHS.sextOrSelf(MaxBits) : LHS.zextOrSelf(MaxBits),
- !IsSigned);
- RHS = APSInt(IsSigned ? RHS.sextOrSelf(MaxBits) : RHS.zextOrSelf(MaxBits),
- !IsSigned);
+ LHS = APSInt(LHS.extOrTrunc(MaxBits), !IsSigned);
+ RHS = APSInt(RHS.extOrTrunc(MaxBits), !IsSigned);
Result = APSInt(MaxBits, !IsSigned);
}
@@ -9123,6 +10447,22 @@ EvaluateComparisonBinaryOperator(EvalInfo &Info, const BinaryOperator *E,
return Success(CCR::Equal, E);
}
+ if (LHSTy->isFixedPointType() || RHSTy->isFixedPointType()) {
+ APFixedPoint LHSFX(Info.Ctx.getFixedPointSemantics(LHSTy));
+ APFixedPoint RHSFX(Info.Ctx.getFixedPointSemantics(RHSTy));
+
+ bool LHSOK = EvaluateFixedPointOrInteger(E->getLHS(), LHSFX, Info);
+ if (!LHSOK && !Info.noteFailure())
+ return false;
+ if (!EvaluateFixedPointOrInteger(E->getRHS(), RHSFX, Info) || !LHSOK)
+ return false;
+ if (LHSFX < RHSFX)
+ return Success(CCR::Less, E);
+ if (LHSFX > RHSFX)
+ return Success(CCR::Greater, E);
+ return Success(CCR::Equal, E);
+ }
+
if (LHSTy->isAnyComplexType() || RHSTy->isAnyComplexType()) {
ComplexValue LHS, RHS;
bool LHSOK;
@@ -9739,6 +11079,7 @@ bool IntExprEvaluator::VisitCastExpr(const CastExpr *E) {
case CK_AddressSpaceConversion:
case CK_IntToOCLSampler:
case CK_FixedPointCast:
+ case CK_IntegralToFixedPoint:
llvm_unreachable("invalid cast kind for integral value");
case CK_BitCast:
@@ -9755,6 +11096,7 @@ bool IntExprEvaluator::VisitCastExpr(const CastExpr *E) {
case CK_LValueToRValue:
case CK_AtomicToNonAtomic:
case CK_NoOp:
+ case CK_LValueToRValueBitCast:
return ExprEvaluatorBaseTy::VisitCastExpr(E);
case CK_MemberPointerToBoolean:
@@ -9773,12 +11115,25 @@ bool IntExprEvaluator::VisitCastExpr(const CastExpr *E) {
return Success(IntResult, E);
}
+ case CK_FixedPointToIntegral: {
+ APFixedPoint Src(Info.Ctx.getFixedPointSemantics(SrcType));
+ if (!EvaluateFixedPoint(SubExpr, Src, Info))
+ return false;
+ bool Overflowed;
+ llvm::APSInt Result = Src.convertToInt(
+ Info.Ctx.getIntWidth(DestType),
+ DestType->isSignedIntegerOrEnumerationType(), &Overflowed);
+ if (Overflowed && !HandleOverflow(Info, E, Result, DestType))
+ return false;
+ return Success(Result, E);
+ }
+
case CK_FixedPointToBoolean: {
// Unsigned padding does not affect this.
APValue Val;
if (!Evaluate(Val, Info, SubExpr))
return false;
- return Success(Val.getInt().getBoolValue(), E);
+ return Success(Val.getFixedPoint().getBoolValue(), E);
}
case CK_IntegralCast: {
@@ -9821,13 +11176,12 @@ bool IntExprEvaluator::VisitCastExpr(const CastExpr *E) {
return true;
}
- uint64_t V;
- if (LV.isNullPointer())
- V = Info.Ctx.getTargetNullPointerValue(SrcType);
- else
- V = LV.getLValueOffset().getQuantity();
+ APSInt AsInt;
+ APValue V;
+ LV.moveInto(V);
+ if (!V.toIntegralConstant(AsInt, SrcType, Info.Ctx))
+ llvm_unreachable("Can't cast this!");
- APSInt AsInt = Info.Ctx.MakeIntValue(V, SrcType);
return Success(HandleIntToIntCast(Info, E, DestType, SrcType, AsInt), E);
}
@@ -9898,16 +11252,13 @@ bool FixedPointExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) {
return Visit(E->getSubExpr());
case UO_Minus: {
if (!Visit(E->getSubExpr())) return false;
- if (!Result.isInt()) return Error(E);
- const APSInt &Value = Result.getInt();
- if (Value.isSigned() && Value.isMinSignedValue() && E->canOverflow()) {
- SmallString<64> S;
- FixedPointValueToString(S, Value,
- Info.Ctx.getTypeInfo(E->getType()).Width);
- Info.CCEDiag(E, diag::note_constexpr_overflow) << S << E->getType();
- if (Info.noteUndefinedBehavior()) return false;
- }
- return Success(-Value, E);
+ if (!Result.isFixedPoint())
+ return Error(E);
+ bool Overflowed;
+ APFixedPoint Negated = Result.getFixedPoint().negate(&Overflowed);
+ if (Overflowed && !HandleOverflow(Info, E, Negated, E->getType()))
+ return false;
+ return Success(Negated, E);
}
case UO_LNot: {
bool bres;
@@ -9918,6 +11269,75 @@ bool FixedPointExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) {
}
}
+bool FixedPointExprEvaluator::VisitCastExpr(const CastExpr *E) {
+ const Expr *SubExpr = E->getSubExpr();
+ QualType DestType = E->getType();
+ assert(DestType->isFixedPointType() &&
+ "Expected destination type to be a fixed point type");
+ auto DestFXSema = Info.Ctx.getFixedPointSemantics(DestType);
+
+ switch (E->getCastKind()) {
+ case CK_FixedPointCast: {
+ APFixedPoint Src(Info.Ctx.getFixedPointSemantics(SubExpr->getType()));
+ if (!EvaluateFixedPoint(SubExpr, Src, Info))
+ return false;
+ bool Overflowed;
+ APFixedPoint Result = Src.convert(DestFXSema, &Overflowed);
+ if (Overflowed && !HandleOverflow(Info, E, Result, DestType))
+ return false;
+ return Success(Result, E);
+ }
+ case CK_IntegralToFixedPoint: {
+ APSInt Src;
+ if (!EvaluateInteger(SubExpr, Src, Info))
+ return false;
+
+ bool Overflowed;
+ APFixedPoint IntResult = APFixedPoint::getFromIntValue(
+ Src, Info.Ctx.getFixedPointSemantics(DestType), &Overflowed);
+
+ if (Overflowed && !HandleOverflow(Info, E, IntResult, DestType))
+ return false;
+
+ return Success(IntResult, E);
+ }
+ case CK_NoOp:
+ case CK_LValueToRValue:
+ return ExprEvaluatorBaseTy::VisitCastExpr(E);
+ default:
+ return Error(E);
+ }
+}
+
+bool FixedPointExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
+ const Expr *LHS = E->getLHS();
+ const Expr *RHS = E->getRHS();
+ FixedPointSemantics ResultFXSema =
+ Info.Ctx.getFixedPointSemantics(E->getType());
+
+ APFixedPoint LHSFX(Info.Ctx.getFixedPointSemantics(LHS->getType()));
+ if (!EvaluateFixedPointOrInteger(LHS, LHSFX, Info))
+ return false;
+ APFixedPoint RHSFX(Info.Ctx.getFixedPointSemantics(RHS->getType()));
+ if (!EvaluateFixedPointOrInteger(RHS, RHSFX, Info))
+ return false;
+
+ switch (E->getOpcode()) {
+ case BO_Add: {
+ bool AddOverflow, ConversionOverflow;
+ APFixedPoint Result = LHSFX.add(RHSFX, &AddOverflow)
+ .convert(ResultFXSema, &ConversionOverflow);
+ if ((AddOverflow || ConversionOverflow) &&
+ !HandleOverflow(Info, E, Result, E->getType()))
+ return false;
+ return Success(Result, E);
+ }
+ default:
+ return false;
+ }
+ llvm_unreachable("Should've exited before this");
+}
+
//===----------------------------------------------------------------------===//
// Float Evaluation
//===----------------------------------------------------------------------===//
@@ -10282,11 +11702,14 @@ bool ComplexExprEvaluator::VisitCastExpr(const CastExpr *E) {
case CK_IntToOCLSampler:
case CK_FixedPointCast:
case CK_FixedPointToBoolean:
+ case CK_FixedPointToIntegral:
+ case CK_IntegralToFixedPoint:
llvm_unreachable("invalid cast kind for complex value");
case CK_LValueToRValue:
case CK_AtomicToNonAtomic:
case CK_NoOp:
+ case CK_LValueToRValueBitCast:
return ExprEvaluatorBaseTy::VisitCastExpr(E);
case CK_Dependent:
@@ -10929,6 +12352,23 @@ static bool EvaluateAsInt(const Expr *E, Expr::EvalResult &ExprResult,
return true;
}
+static bool EvaluateAsFixedPoint(const Expr *E, Expr::EvalResult &ExprResult,
+ const ASTContext &Ctx,
+ Expr::SideEffectsKind AllowSideEffects,
+ EvalInfo &Info) {
+ if (!E->getType()->isFixedPointType())
+ return false;
+
+ if (!::EvaluateAsRValue(E, ExprResult, Ctx, Info))
+ return false;
+
+ if (!ExprResult.Val.isFixedPoint() ||
+ hasUnacceptableSideEffect(ExprResult, AllowSideEffects))
+ return false;
+
+ return true;
+}
+
/// EvaluateAsRValue - Return true if this is a constant which we can fold using
/// any crazy technique (that has nothing to do with language standards) that
/// we want to. If this function returns true, it returns the folded constant
@@ -10936,31 +12376,54 @@ static bool EvaluateAsInt(const Expr *E, Expr::EvalResult &ExprResult,
/// will be applied to the result.
bool Expr::EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx,
bool InConstantContext) const {
+ assert(!isValueDependent() &&
+ "Expression evaluator can't be called on a dependent expression.");
EvalInfo Info(Ctx, Result, EvalInfo::EM_IgnoreSideEffects);
Info.InConstantContext = InConstantContext;
return ::EvaluateAsRValue(this, Result, Ctx, Info);
}
-bool Expr::EvaluateAsBooleanCondition(bool &Result,
- const ASTContext &Ctx) const {
+bool Expr::EvaluateAsBooleanCondition(bool &Result, const ASTContext &Ctx,
+ bool InConstantContext) const {
+ assert(!isValueDependent() &&
+ "Expression evaluator can't be called on a dependent expression.");
EvalResult Scratch;
- return EvaluateAsRValue(Scratch, Ctx) &&
+ return EvaluateAsRValue(Scratch, Ctx, InConstantContext) &&
HandleConversionToBool(Scratch.Val, Result);
}
bool Expr::EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx,
- SideEffectsKind AllowSideEffects) const {
+ SideEffectsKind AllowSideEffects,
+ bool InConstantContext) const {
+ assert(!isValueDependent() &&
+ "Expression evaluator can't be called on a dependent expression.");
EvalInfo Info(Ctx, Result, EvalInfo::EM_IgnoreSideEffects);
+ Info.InConstantContext = InConstantContext;
return ::EvaluateAsInt(this, Result, Ctx, AllowSideEffects, Info);
}
+bool Expr::EvaluateAsFixedPoint(EvalResult &Result, const ASTContext &Ctx,
+ SideEffectsKind AllowSideEffects,
+ bool InConstantContext) const {
+ assert(!isValueDependent() &&
+ "Expression evaluator can't be called on a dependent expression.");
+ EvalInfo Info(Ctx, Result, EvalInfo::EM_IgnoreSideEffects);
+ Info.InConstantContext = InConstantContext;
+ return ::EvaluateAsFixedPoint(this, Result, Ctx, AllowSideEffects, Info);
+}
+
bool Expr::EvaluateAsFloat(APFloat &Result, const ASTContext &Ctx,
- SideEffectsKind AllowSideEffects) const {
+ SideEffectsKind AllowSideEffects,
+ bool InConstantContext) const {
+ assert(!isValueDependent() &&
+ "Expression evaluator can't be called on a dependent expression.");
+
if (!getType()->isRealFloatingType())
return false;
EvalResult ExprResult;
- if (!EvaluateAsRValue(ExprResult, Ctx) || !ExprResult.Val.isFloat() ||
+ if (!EvaluateAsRValue(ExprResult, Ctx, InConstantContext) ||
+ !ExprResult.Val.isFloat() ||
hasUnacceptableSideEffect(ExprResult, AllowSideEffects))
return false;
@@ -10968,9 +12431,13 @@ bool Expr::EvaluateAsFloat(APFloat &Result, const ASTContext &Ctx,
return true;
}
-bool Expr::EvaluateAsLValue(EvalResult &Result, const ASTContext &Ctx) const {
- EvalInfo Info(Ctx, Result, EvalInfo::EM_ConstantFold);
+bool Expr::EvaluateAsLValue(EvalResult &Result, const ASTContext &Ctx,
+ bool InConstantContext) const {
+ assert(!isValueDependent() &&
+ "Expression evaluator can't be called on a dependent expression.");
+ EvalInfo Info(Ctx, Result, EvalInfo::EM_ConstantFold);
+ Info.InConstantContext = InConstantContext;
LValue LV;
if (!EvaluateLValue(this, LV, Info) || Result.HasSideEffects ||
!CheckLValueConstantExpression(Info, getExprLoc(),
@@ -10984,8 +12451,13 @@ bool Expr::EvaluateAsLValue(EvalResult &Result, const ASTContext &Ctx) const {
bool Expr::EvaluateAsConstantExpr(EvalResult &Result, ConstExprUsage Usage,
const ASTContext &Ctx) const {
+ assert(!isValueDependent() &&
+ "Expression evaluator can't be called on a dependent expression.");
+
EvalInfo::EvaluationMode EM = EvalInfo::EM_ConstantExpression;
EvalInfo Info(Ctx, Result, EM);
+ Info.InConstantContext = true;
+
if (!::Evaluate(Result.Val, Info, this))
return false;
@@ -10996,6 +12468,9 @@ bool Expr::EvaluateAsConstantExpr(EvalResult &Result, ConstExprUsage Usage,
bool Expr::EvaluateAsInitializer(APValue &Value, const ASTContext &Ctx,
const VarDecl *VD,
SmallVectorImpl<PartialDiagnosticAt> &Notes) const {
+ assert(!isValueDependent() &&
+ "Expression evaluator can't be called on a dependent expression.");
+
// FIXME: Evaluating initializers for large array and record types can cause
// performance problems. Only do so in C++11 for now.
if (isRValue() && (getType()->isArrayType() || getType()->isRecordType()) &&
@@ -11038,6 +12513,9 @@ bool Expr::EvaluateAsInitializer(APValue &Value, const ASTContext &Ctx,
/// isEvaluatable - Call EvaluateAsRValue to see if this expression can be
/// constant folded, but discard the result.
bool Expr::isEvaluatable(const ASTContext &Ctx, SideEffectsKind SEK) const {
+ assert(!isValueDependent() &&
+ "Expression evaluator can't be called on a dependent expression.");
+
EvalResult Result;
return EvaluateAsRValue(Result, Ctx, /* in constant context */ true) &&
!hasUnacceptableSideEffect(Result, SEK);
@@ -11045,6 +12523,9 @@ bool Expr::isEvaluatable(const ASTContext &Ctx, SideEffectsKind SEK) const {
APSInt Expr::EvaluateKnownConstInt(const ASTContext &Ctx,
SmallVectorImpl<PartialDiagnosticAt> *Diag) const {
+ assert(!isValueDependent() &&
+ "Expression evaluator can't be called on a dependent expression.");
+
EvalResult EVResult;
EVResult.Diag = Diag;
EvalInfo Info(Ctx, EVResult, EvalInfo::EM_IgnoreSideEffects);
@@ -11060,6 +12541,9 @@ APSInt Expr::EvaluateKnownConstInt(const ASTContext &Ctx,
APSInt Expr::EvaluateKnownConstIntCheckOverflow(
const ASTContext &Ctx, SmallVectorImpl<PartialDiagnosticAt> *Diag) const {
+ assert(!isValueDependent() &&
+ "Expression evaluator can't be called on a dependent expression.");
+
EvalResult EVResult;
EVResult.Diag = Diag;
EvalInfo Info(Ctx, EVResult, EvalInfo::EM_EvaluateForOverflow);
@@ -11074,6 +12558,9 @@ APSInt Expr::EvaluateKnownConstIntCheckOverflow(
}
void Expr::EvaluateForOverflow(const ASTContext &Ctx) const {
+ assert(!isValueDependent() &&
+ "Expression evaluator can't be called on a dependent expression.");
+
bool IsConst;
EvalResult EVResult;
if (!FastEvaluateAsRValue(this, EVResult, Ctx, IsConst)) {
@@ -11244,7 +12731,7 @@ static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
case Expr::SizeOfPackExprClass:
case Expr::GNUNullExprClass:
- // GCC considers the GNU __null value to be an integral constant expression.
+ case Expr::SourceLocExprClass:
return NoDiag();
case Expr::SubstNonTypeTemplateParmExprClass:
@@ -11525,6 +13012,11 @@ static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
case Expr::ChooseExprClass: {
return CheckICE(cast<ChooseExpr>(E)->getChosenSubExpr(), Ctx);
}
+ case Expr::BuiltinBitCastExprClass: {
+ if (!checkBitCastConstexprEligibility(nullptr, Ctx, cast<CastExpr>(E)))
+ return ICEDiag(IK_NotICE, E->getBeginLoc());
+ return CheckICE(cast<CastExpr>(E)->getSubExpr(), Ctx);
+ }
}
llvm_unreachable("Invalid StmtClass!");
@@ -11555,6 +13047,9 @@ static bool EvaluateCPlusPlus11IntegralConstantExpr(const ASTContext &Ctx,
bool Expr::isIntegerConstantExpr(const ASTContext &Ctx,
SourceLocation *Loc) const {
+ assert(!isValueDependent() &&
+ "Expression evaluator can't be called on a dependent expression.");
+
if (Ctx.getLangOpts().CPlusPlus11)
return EvaluateCPlusPlus11IntegralConstantExpr(Ctx, this, nullptr, Loc);
@@ -11568,6 +13063,9 @@ bool Expr::isIntegerConstantExpr(const ASTContext &Ctx,
bool Expr::isIntegerConstantExpr(llvm::APSInt &Value, const ASTContext &Ctx,
SourceLocation *Loc, bool isEvaluated) const {
+ assert(!isValueDependent() &&
+ "Expression evaluator can't be called on a dependent expression.");
+
if (Ctx.getLangOpts().CPlusPlus11)
return EvaluateCPlusPlus11IntegralConstantExpr(Ctx, this, &Value, Loc);
@@ -11591,11 +13089,17 @@ bool Expr::isIntegerConstantExpr(llvm::APSInt &Value, const ASTContext &Ctx,
}
bool Expr::isCXX98IntegralConstantExpr(const ASTContext &Ctx) const {
+ assert(!isValueDependent() &&
+ "Expression evaluator can't be called on a dependent expression.");
+
return CheckICE(this, Ctx).Kind == IK_ICE;
}
bool Expr::isCXX11ConstantExpr(const ASTContext &Ctx, APValue *Result,
SourceLocation *Loc) const {
+ assert(!isValueDependent() &&
+ "Expression evaluator can't be called on a dependent expression.");
+
// We support this checking in C++98 mode in order to diagnose compatibility
// issues.
assert(Ctx.getLangOpts().CPlusPlus);
@@ -11624,8 +13128,12 @@ bool Expr::EvaluateWithSubstitution(APValue &Value, ASTContext &Ctx,
const FunctionDecl *Callee,
ArrayRef<const Expr*> Args,
const Expr *This) const {
+ assert(!isValueDependent() &&
+ "Expression evaluator can't be called on a dependent expression.");
+
Expr::EvalStatus Status;
EvalInfo Info(Ctx, Status, EvalInfo::EM_ConstantExpressionUnevaluated);
+ Info.InConstantContext = true;
LValue ThisVal;
const LValue *ThisPtr = nullptr;
@@ -11704,16 +13212,20 @@ bool Expr::isPotentialConstantExprUnevaluated(Expr *E,
const FunctionDecl *FD,
SmallVectorImpl<
PartialDiagnosticAt> &Diags) {
+ assert(!E->isValueDependent() &&
+ "Expression evaluator can't be called on a dependent expression.");
+
Expr::EvalStatus Status;
Status.Diag = &Diags;
EvalInfo Info(FD->getASTContext(), Status,
EvalInfo::EM_PotentialConstantExpressionUnevaluated);
+ Info.InConstantContext = true;
// Fabricate a call stack frame to give the arguments a plausible cover story.
ArrayRef<const Expr*> Args;
ArgVector ArgValues(0);
- bool Success = EvaluateArgs(Args, ArgValues, Info);
+ bool Success = EvaluateArgs(Args, ArgValues, Info, FD);
(void)Success;
assert(Success &&
"Failed to set up arguments for potential constant evaluation");
diff --git a/lib/AST/ExprObjC.cpp b/lib/AST/ExprObjC.cpp
index e1a23f598589..53d0e873f8c9 100644
--- a/lib/AST/ExprObjC.cpp
+++ b/lib/AST/ExprObjC.cpp
@@ -1,9 +1,8 @@
//===- ExprObjC.cpp - (ObjC) Expression AST Node Implementation -----------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -293,6 +292,32 @@ void ObjCMessageExpr::getSelectorLocs(
SelLocs.push_back(getSelectorLoc(i));
}
+
+QualType ObjCMessageExpr::getCallReturnType(ASTContext &Ctx) const {
+ if (const ObjCMethodDecl *MD = getMethodDecl()) {
+ QualType QT = MD->getReturnType();
+ if (QT == Ctx.getObjCInstanceType()) {
+ // instancetype corresponds to expression types.
+ return getType();
+ }
+ return QT;
+ }
+
+ // Expression type might be different from an expected call return type,
+ // as expression type would never be a reference even if call returns a
+ // reference. Reconstruct the original expression type.
+ QualType QT = getType();
+ switch (getValueKind()) {
+ case VK_LValue:
+ return Ctx.getLValueReferenceType(QT);
+ case VK_XValue:
+ return Ctx.getRValueReferenceType(QT);
+ case VK_RValue:
+ return QT;
+ }
+ llvm_unreachable("Unsupported ExprValueKind");
+}
+
SourceRange ObjCMessageExpr::getReceiverRange() const {
switch (getReceiverKind()) {
case Instance:
@@ -352,6 +377,11 @@ Stmt::child_range ObjCMessageExpr::children() {
reinterpret_cast<Stmt **>(getArgs() + getNumArgs()));
}
+Stmt::const_child_range ObjCMessageExpr::children() const {
+ auto Children = const_cast<ObjCMessageExpr *>(this)->children();
+ return const_child_range(Children.begin(), Children.end());
+}
+
StringRef ObjCBridgedCastExpr::getBridgeKindName() const {
switch (getBridgeKind()) {
case OBC_Bridge:
diff --git a/lib/AST/ExternalASTMerger.cpp b/lib/AST/ExternalASTMerger.cpp
index 12e6bfc041a4..61e657da7c91 100644
--- a/lib/AST/ExternalASTMerger.cpp
+++ b/lib/AST/ExternalASTMerger.cpp
@@ -1,9 +1,8 @@
//===- ExternalASTMerger.cpp - Merging External AST Interface ---*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -57,7 +56,12 @@ LookupSameContext(Source<TranslationUnitDecl *> SourceTU, const DeclContext *DC,
}
auto *ND = cast<NamedDecl>(DC);
DeclarationName Name = ND->getDeclName();
- Source<DeclarationName> SourceName = ReverseImporter.Import(Name);
+ auto SourceNameOrErr = ReverseImporter.Import(Name);
+ if (!SourceNameOrErr) {
+ llvm::consumeError(SourceNameOrErr.takeError());
+ return nullptr;
+ }
+ Source<DeclarationName> SourceName = *SourceNameOrErr;
DeclContext::lookup_result SearchResult =
SourceParentDC.get()->lookup(SourceName.get());
size_t SearchResultSize = SearchResult.size();
@@ -111,7 +115,7 @@ public:
/// Whenever a DeclContext is imported, ensure that ExternalASTSource's origin
/// map is kept up to date. Also set the appropriate flags.
- Decl *Imported(Decl *From, Decl *To) override {
+ void Imported(Decl *From, Decl *To) override {
if (auto *ToDC = dyn_cast<DeclContext>(To)) {
const bool LoggingEnabled = Parent.LoggingEnabled();
if (LoggingEnabled)
@@ -154,7 +158,6 @@ public:
ToContainer->getPrimaryContext()->setMustBuildLookupTable();
assert(Parent.CanComplete(ToContainer));
}
- return To;
}
ASTImporter &GetReverse() { return Reverse; }
};
@@ -230,7 +233,7 @@ void ExternalASTMerger::CompleteType(TagDecl *Tag) {
if (!SourceTag->getDefinition())
return false;
Forward.MapImported(SourceTag, Tag);
- if (llvm::Error Err = Forward.ImportDefinition_New(SourceTag))
+ if (llvm::Error Err = Forward.ImportDefinition(SourceTag))
llvm::consumeError(std::move(Err));
Tag->setCompleteDefinition(SourceTag->isCompleteDefinition());
return true;
@@ -250,7 +253,7 @@ void ExternalASTMerger::CompleteType(ObjCInterfaceDecl *Interface) {
if (!SourceInterface->getDefinition())
return false;
Forward.MapImported(SourceInterface, Interface);
- if (llvm::Error Err = Forward.ImportDefinition_New(SourceInterface))
+ if (llvm::Error Err = Forward.ImportDefinition(SourceInterface))
llvm::consumeError(std::move(Err));
return true;
});
@@ -356,9 +359,13 @@ void ExternalASTMerger::RemoveSources(llvm::ArrayRef<ImporterSource> Sources) {
template <typename DeclTy>
static bool importSpecializations(DeclTy *D, ASTImporter *Importer) {
- for (auto *Spec : D->specializations())
- if (!Importer->Import(Spec))
+ for (auto *Spec : D->specializations()) {
+ auto ImportedSpecOrError = Importer->Import(Spec);
+ if (!ImportedSpecOrError) {
+ llvm::consumeError(ImportedSpecOrError.takeError());
return true;
+ }
+ }
return false;
}
@@ -385,15 +392,21 @@ bool ExternalASTMerger::FindExternalVisibleDeclsByName(const DeclContext *DC,
Candidates.push_back(C);
};
- ForEachMatchingDC(DC, [&](ASTImporter &Forward, ASTImporter &Reverse,
- Source<const DeclContext *> SourceDC) -> bool {
- DeclarationName FromName = Reverse.Import(Name);
- DeclContextLookupResult Result = SourceDC.get()->lookup(FromName);
- for (NamedDecl *FromD : Result) {
- FilterFoundDecl(std::make_pair(FromD, &Forward));
- }
- return false;
- });
+ ForEachMatchingDC(DC,
+ [&](ASTImporter &Forward, ASTImporter &Reverse,
+ Source<const DeclContext *> SourceDC) -> bool {
+ auto FromNameOrErr = Reverse.Import(Name);
+ if (!FromNameOrErr) {
+ llvm::consumeError(FromNameOrErr.takeError());
+ return false;
+ }
+ DeclContextLookupResult Result =
+ SourceDC.get()->lookup(*FromNameOrErr);
+ for (NamedDecl *FromD : Result) {
+ FilterFoundDecl(std::make_pair(FromD, &Forward));
+ }
+ return false;
+ });
if (Candidates.empty())
return false;
@@ -402,7 +415,10 @@ bool ExternalASTMerger::FindExternalVisibleDeclsByName(const DeclContext *DC,
for (const Candidate &C : Candidates) {
Decl *LookupRes = C.first.get();
ASTImporter *Importer = C.second;
- NamedDecl *ND = cast_or_null<NamedDecl>(Importer->Import(LookupRes));
+ auto NDOrErr = Importer->Import(LookupRes);
+ assert(NDOrErr);
+ (void)static_cast<bool>(NDOrErr);
+ NamedDecl *ND = cast_or_null<NamedDecl>(*NDOrErr);
assert(ND);
// If we don't import specialization, they are not available via lookup
// because the lookup result is imported TemplateDecl and it does not
@@ -424,9 +440,12 @@ void ExternalASTMerger::FindExternalLexicalDecls(
Source<const DeclContext *> SourceDC) -> bool {
for (const Decl *SourceDecl : SourceDC.get()->decls()) {
if (IsKindWeWant(SourceDecl->getKind())) {
- Decl *ImportedDecl = Forward.Import(const_cast<Decl *>(SourceDecl));
- assert(!ImportedDecl || IsSameDC(ImportedDecl->getDeclContext(), DC));
- (void)ImportedDecl;
+ auto ImportedDeclOrErr = Forward.Import(SourceDecl);
+ if (ImportedDeclOrErr)
+ assert(!(*ImportedDeclOrErr) ||
+ IsSameDC((*ImportedDeclOrErr)->getDeclContext(), DC));
+ else
+ llvm::consumeError(ImportedDeclOrErr.takeError());
}
}
return false;
diff --git a/lib/AST/ExternalASTSource.cpp b/lib/AST/ExternalASTSource.cpp
index 40829c2e249d..730102757440 100644
--- a/lib/AST/ExternalASTSource.cpp
+++ b/lib/AST/ExternalASTSource.cpp
@@ -1,9 +1,8 @@
//===- ExternalASTSource.cpp - Abstract External AST Interface ------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/AST/FormatString.cpp b/lib/AST/FormatString.cpp
index 04bd48f14a2a..578d5bc56733 100644
--- a/lib/AST/FormatString.cpp
+++ b/lib/AST/FormatString.cpp
@@ -1,9 +1,8 @@
// FormatString.cpp - Common stuff for handling printf/scanf formats -*- C++ -*-
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -224,6 +223,9 @@ clang::analyze_format_string::ParseLengthModifier(FormatSpecifier &FS,
if (I != E && *I == 'h') {
++I;
lmKind = LengthModifier::AsChar;
+ } else if (I != E && *I == 'l' && LO.OpenCL) {
+ ++I;
+ lmKind = LengthModifier::AsShortLong;
} else {
lmKind = LengthModifier::AsShort;
}
@@ -488,7 +490,8 @@ ArgType::matchesType(ASTContext &C, QualType argTy) const {
}
ArgType ArgType::makeVectorType(ASTContext &C, unsigned NumElts) const {
- if (K != SpecificTy) // Won't be a valid vector element type.
+ // Check for valid vector element types.
+ if (T.isNull())
return ArgType::Invalid();
QualType Vec = C.getExtVectorType(T, NumElts);
@@ -573,6 +576,8 @@ analyze_format_string::LengthModifier::toString() const {
return "hh";
case AsShort:
return "h";
+ case AsShortLong:
+ return "hl";
case AsLong: // or AsWideChar
return "l";
case AsLongLong:
@@ -708,13 +713,18 @@ void OptionalAmount::toString(raw_ostream &os) const {
}
}
-bool FormatSpecifier::hasValidLengthModifier(const TargetInfo &Target) const {
+bool FormatSpecifier::hasValidLengthModifier(const TargetInfo &Target,
+ const LangOptions &LO) const {
switch (LM.getKind()) {
case LengthModifier::None:
return true;
// Handle most integer flags
case LengthModifier::AsShort:
+ // Length modifier only applies to FP vectors.
+ if (LO.OpenCL && CS.isDoubleArg())
+ return !VectorNumElts.isInvalid();
+
if (Target.getTriple().isOSMSVCRT()) {
switch (CS.getKind()) {
case ConversionSpecifier::cArg:
@@ -753,8 +763,18 @@ bool FormatSpecifier::hasValidLengthModifier(const TargetInfo &Target) const {
return false;
}
+ case LengthModifier::AsShortLong:
+ return LO.OpenCL && !VectorNumElts.isInvalid();
+
// Handle 'l' flag
case LengthModifier::AsLong: // or AsWideChar
+ if (CS.isDoubleArg()) {
+ // Invalid for OpenCL FP scalars.
+ if (LO.OpenCL && VectorNumElts.isInvalid())
+ return false;
+ return true;
+ }
+
switch (CS.getKind()) {
case ConversionSpecifier::dArg:
case ConversionSpecifier::DArg:
@@ -765,14 +785,6 @@ bool FormatSpecifier::hasValidLengthModifier(const TargetInfo &Target) const {
case ConversionSpecifier::UArg:
case ConversionSpecifier::xArg:
case ConversionSpecifier::XArg:
- case ConversionSpecifier::aArg:
- case ConversionSpecifier::AArg:
- case ConversionSpecifier::fArg:
- case ConversionSpecifier::FArg:
- case ConversionSpecifier::eArg:
- case ConversionSpecifier::EArg:
- case ConversionSpecifier::gArg:
- case ConversionSpecifier::GArg:
case ConversionSpecifier::nArg:
case ConversionSpecifier::cArg:
case ConversionSpecifier::sArg:
@@ -879,6 +891,7 @@ bool FormatSpecifier::hasStandardLengthModifier() const {
case LengthModifier::AsInt3264:
case LengthModifier::AsInt64:
case LengthModifier::AsWide:
+ case LengthModifier::AsShortLong: // ???
return false;
}
llvm_unreachable("Invalid LengthModifier Kind!");
diff --git a/lib/AST/InheritViz.cpp b/lib/AST/InheritViz.cpp
index 0b82da133fa7..4b3d5bee5631 100644
--- a/lib/AST/InheritViz.cpp
+++ b/lib/AST/InheritViz.cpp
@@ -1,9 +1,8 @@
//===- InheritViz.cpp - Graphviz visualization for inheritance --*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/AST/ItaniumCXXABI.cpp b/lib/AST/ItaniumCXXABI.cpp
index 64580edf004b..727a905d08a1 100644
--- a/lib/AST/ItaniumCXXABI.cpp
+++ b/lib/AST/ItaniumCXXABI.cpp
@@ -1,9 +1,8 @@
//===------- ItaniumCXXABI.cpp - AST support for the Itanium C++ ABI ------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/AST/ItaniumMangle.cpp b/lib/AST/ItaniumMangle.cpp
index 98c843db31d6..6c813f09a4b3 100644
--- a/lib/AST/ItaniumMangle.cpp
+++ b/lib/AST/ItaniumMangle.cpp
@@ -1,9 +1,8 @@
//===--- ItaniumMangle.cpp - Itanium C++ Name Mangling ----------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -61,7 +60,8 @@ static const DeclContext *getEffectiveDeclContext(const Decl *D) {
}
const DeclContext *DC = D->getDeclContext();
- if (isa<CapturedDecl>(DC) || isa<OMPDeclareReductionDecl>(DC)) {
+ if (isa<CapturedDecl>(DC) || isa<OMPDeclareReductionDecl>(DC) ||
+ isa<OMPDeclareMapperDecl>(DC)) {
return getEffectiveDeclContext(cast<Decl>(DC));
}
@@ -486,6 +486,7 @@ private:
const AbiTagList *AdditionalAbiTags);
void mangleBlockForPrefix(const BlockDecl *Block);
void mangleUnqualifiedBlock(const BlockDecl *Block);
+ void mangleTemplateParamDecl(const NamedDecl *Decl);
void mangleLambda(const CXXRecordDecl *Lambda);
void mangleNestedName(const NamedDecl *ND, const DeclContext *DC,
const AbiTagList *AdditionalAbiTags,
@@ -537,6 +538,7 @@ private:
unsigned knownArity);
void mangleCastExpression(const Expr *E, StringRef CastEncoding);
void mangleInitListElements(const InitListExpr *InitList);
+ void mangleDeclRefExpr(const NamedDecl *D);
void mangleExpression(const Expr *E, unsigned Arity = UnknownArity);
void mangleCXXCtorType(CXXCtorType T, const CXXRecordDecl *InheritedFrom);
void mangleCXXDtorType(CXXDtorType T);
@@ -1372,7 +1374,8 @@ void CXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
// <unnamed-type-name> ::= <closure-type-name>
//
// <closure-type-name> ::= Ul <lambda-sig> E [ <nonnegative number> ] _
- // <lambda-sig> ::= <parameter-type>+ # Parameter types or 'v' for 'void'.
+ // <lambda-sig> ::= <template-param-decl>* <parameter-type>+
+ // # Parameter types or 'v' for 'void'.
if (const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(TD)) {
if (Record->isLambda() && Record->getLambdaManglingNumber()) {
assert(!AdditionalAbiTags &&
@@ -1503,7 +1506,7 @@ void CXXNameMangler::mangleNestedName(const NamedDecl *ND,
Out << 'N';
if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(ND)) {
- Qualifiers MethodQuals = Method->getTypeQualifiers();
+ Qualifiers MethodQuals = Method->getMethodQualifiers();
// We do not consider restrict a distinguishing attribute for overloading
// purposes so we must not mangle it.
MethodQuals.removeRestrict();
@@ -1678,6 +1681,24 @@ void CXXNameMangler::mangleUnqualifiedBlock(const BlockDecl *Block) {
Out << '_';
}
+// <template-param-decl>
+// ::= Ty # template type parameter
+// ::= Tn <type> # template non-type parameter
+// ::= Tt <template-param-decl>* E # template template parameter
+void CXXNameMangler::mangleTemplateParamDecl(const NamedDecl *Decl) {
+ if (isa<TemplateTypeParmDecl>(Decl)) {
+ Out << "Ty";
+ } else if (auto *Tn = dyn_cast<NonTypeTemplateParmDecl>(Decl)) {
+ Out << "Tn";
+ mangleType(Tn->getType());
+ } else if (auto *Tt = dyn_cast<TemplateTemplateParmDecl>(Decl)) {
+ Out << "Tt";
+ for (auto *Param : *Tt->getTemplateParameters())
+ mangleTemplateParamDecl(Param);
+ Out << "E";
+ }
+}
+
void CXXNameMangler::mangleLambda(const CXXRecordDecl *Lambda) {
// If the context of a closure type is an initializer for a class member
// (static or nonstatic), it is encoded in a qualified name with a final
@@ -1705,6 +1726,8 @@ void CXXNameMangler::mangleLambda(const CXXRecordDecl *Lambda) {
}
Out << "Ul";
+ for (auto *D : Lambda->getLambdaExplicitTemplateParameters())
+ mangleTemplateParamDecl(D);
const FunctionProtoType *Proto = Lambda->getLambdaTypeInfo()->getType()->
getAs<FunctionProtoType>();
mangleBareFunctionType(Proto, /*MangleReturnType=*/false,
@@ -1869,6 +1892,7 @@ void CXXNameMangler::mangleType(TemplateName TN) {
break;
case TemplateName::OverloadedTemplate:
+ case TemplateName::AssumedTemplate:
llvm_unreachable("can't mangle an overloaded template name as a <type>");
case TemplateName::DependentTemplate: {
@@ -1941,6 +1965,7 @@ bool CXXNameMangler::mangleUnresolvedTypeOrSimpleId(QualType Ty,
case Type::ObjCTypeParam:
case Type::Atomic:
case Type::Pipe:
+ case Type::MacroQualified:
llvm_unreachable("type is illegal as a nested name specifier");
case Type::SubstTemplateTypeParmPack:
@@ -2007,6 +2032,7 @@ bool CXXNameMangler::mangleUnresolvedTypeOrSimpleId(QualType Ty,
}
case TemplateName::OverloadedTemplate:
+ case TemplateName::AssumedTemplate:
case TemplateName::DependentTemplate:
llvm_unreachable("invalid base for a template specialization type");
@@ -2581,17 +2607,22 @@ void CXXNameMangler::mangleType(const BuiltinType *T) {
case BuiltinType::Double:
Out << 'd';
break;
- case BuiltinType::LongDouble:
- Out << (getASTContext().getTargetInfo().useFloat128ManglingForLongDouble()
- ? 'g'
- : 'e');
+ case BuiltinType::LongDouble: {
+ const TargetInfo *TI = getASTContext().getLangOpts().OpenMP &&
+ getASTContext().getLangOpts().OpenMPIsDevice
+ ? getASTContext().getAuxTargetInfo()
+ : &getASTContext().getTargetInfo();
+ Out << TI->getLongDoubleMangling();
break;
- case BuiltinType::Float128:
- if (getASTContext().getTargetInfo().useFloat128ManglingForLongDouble())
- Out << "U10__float128"; // Match the GCC mangling
- else
- Out << 'g';
+ }
+ case BuiltinType::Float128: {
+ const TargetInfo *TI = getASTContext().getLangOpts().OpenMP &&
+ getASTContext().getLangOpts().OpenMPIsDevice
+ ? getASTContext().getAuxTargetInfo()
+ : &getASTContext().getTargetInfo();
+ Out << TI->getFloat128Mangling();
break;
+ }
case BuiltinType::NullPtr:
Out << "Dn";
break;
@@ -2735,7 +2766,7 @@ void CXXNameMangler::mangleType(const FunctionProtoType *T) {
// Mangle CV-qualifiers, if present. These are 'this' qualifiers,
// e.g. "const" in "int (A::*)() const".
- mangleQualifiers(T->getTypeQuals());
+ mangleQualifiers(T->getMethodQuals());
// Mangle instantiation-dependent exception-specification, if present,
// per cxx-abi-dev proposal on 2016-10-11.
@@ -2833,7 +2864,10 @@ void CXXNameMangler::mangleBareFunctionType(const FunctionProtoType *Proto,
if (auto *Attr = FD->getParamDecl(I)->getAttr<PassObjectSizeAttr>()) {
// Attr can only take 1 character, so we can hardcode the length below.
assert(Attr->getType() <= 9 && Attr->getType() >= 0);
- Out << "U17pass_object_size" << Attr->getType();
+ if (Attr->isDynamic())
+ Out << "U25pass_dynamic_object_size" << Attr->getType();
+ else
+ Out << "U17pass_object_size" << Attr->getType();
}
}
}
@@ -3471,6 +3505,32 @@ void CXXNameMangler::mangleInitListElements(const InitListExpr *InitList) {
mangleExpression(InitList->getInit(i));
}
+void CXXNameMangler::mangleDeclRefExpr(const NamedDecl *D) {
+ switch (D->getKind()) {
+ default:
+ // <expr-primary> ::= L <mangled-name> E # external name
+ Out << 'L';
+ mangle(D);
+ Out << 'E';
+ break;
+
+ case Decl::ParmVar:
+ mangleFunctionParam(cast<ParmVarDecl>(D));
+ break;
+
+ case Decl::EnumConstant: {
+ const EnumConstantDecl *ED = cast<EnumConstantDecl>(D);
+ mangleIntegerLiteral(ED->getType(), ED->getInitVal());
+ break;
+ }
+
+ case Decl::NonTypeTemplateParm:
+ const NonTypeTemplateParmDecl *PD = cast<NonTypeTemplateParmDecl>(D);
+ mangleTemplateParameter(PD->getIndex());
+ break;
+ }
+}
+
void CXXNameMangler::mangleExpression(const Expr *E, unsigned Arity) {
// <expression> ::= <unary operator-name> <expression>
// ::= <binary operator-name> <expression> <expression>
@@ -3561,7 +3621,9 @@ recurse:
case Expr::AsTypeExprClass:
case Expr::PseudoObjectExprClass:
case Expr::AtomicExprClass:
+ case Expr::SourceLocExprClass:
case Expr::FixedPointLiteralClass:
+ case Expr::BuiltinBitCastExprClass:
{
if (!NullOut) {
// As bad as this diagnostic is, it's better than crashing.
@@ -3725,7 +3787,7 @@ recurse:
if (TypeSourceInfo *ScopeInfo = PDE->getScopeTypeInfo()) {
if (Qualifier) {
mangleUnresolvedPrefix(Qualifier,
- /*Recursive=*/true);
+ /*recursive=*/true);
mangleUnresolvedTypeOrSimpleId(ScopeInfo->getType());
Out << 'E';
} else {
@@ -3896,7 +3958,7 @@ recurse:
Diags.Report(DiagID);
return;
}
- case UETT_OpenMPRequiredSimdAlign:
+ case UETT_OpenMPRequiredSimdAlign: {
DiagnosticsEngine &Diags = Context.getDiags();
unsigned DiagID = Diags.getCustomDiagID(
DiagnosticsEngine::Error,
@@ -3904,6 +3966,7 @@ recurse:
Diags.Report(DiagID);
return;
}
+ }
if (SAE->isArgumentType()) {
Out << 't';
mangleType(SAE->getArgumentType());
@@ -4060,37 +4123,9 @@ recurse:
mangleExpression(cast<ParenExpr>(E)->getSubExpr(), Arity);
break;
- case Expr::DeclRefExprClass: {
- const NamedDecl *D = cast<DeclRefExpr>(E)->getDecl();
-
- switch (D->getKind()) {
- default:
- // <expr-primary> ::= L <mangled-name> E # external name
- Out << 'L';
- mangle(D);
- Out << 'E';
- break;
-
- case Decl::ParmVar:
- mangleFunctionParam(cast<ParmVarDecl>(D));
- break;
-
- case Decl::EnumConstant: {
- const EnumConstantDecl *ED = cast<EnumConstantDecl>(D);
- mangleIntegerLiteral(ED->getType(), ED->getInitVal());
- break;
- }
-
- case Decl::NonTypeTemplateParm: {
- const NonTypeTemplateParmDecl *PD = cast<NonTypeTemplateParmDecl>(D);
- mangleTemplateParameter(PD->getIndex());
- break;
- }
-
- }
-
+ case Expr::DeclRefExprClass:
+ mangleDeclRefExpr(cast<DeclRefExpr>(E)->getDecl());
break;
- }
case Expr::SubstNonTypeTemplateParmPackExprClass:
// FIXME: not clear how to mangle this!
@@ -4104,7 +4139,7 @@ recurse:
// FIXME: not clear how to mangle this!
const FunctionParmPackExpr *FPPE = cast<FunctionParmPackExpr>(E);
Out << "v110_SUBSTPACK";
- mangleFunctionParam(FPPE->getParameterPack());
+ mangleDeclRefExpr(FPPE->getParameterPack());
break;
}
@@ -4454,7 +4489,7 @@ void CXXNameMangler::mangleTemplateArg(TemplateArgument A) {
// It's possible to end up with a DeclRefExpr here in certain
// dependent cases, in which case we should mangle as a
// declaration.
- const Expr *E = A.getAsExpr()->IgnoreParens();
+ const Expr *E = A.getAsExpr()->IgnoreParenImpCasts();
if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
const ValueDecl *D = DRE->getDecl();
if (isa<VarDecl>(D) || isa<FunctionDecl>(D)) {
diff --git a/lib/AST/JSONNodeDumper.cpp b/lib/AST/JSONNodeDumper.cpp
new file mode 100644
index 000000000000..04b933b0fb30
--- /dev/null
+++ b/lib/AST/JSONNodeDumper.cpp
@@ -0,0 +1,1569 @@
+#include "clang/AST/JSONNodeDumper.h"
+#include "clang/Lex/Lexer.h"
+#include "llvm/ADT/StringSwitch.h"
+
+using namespace clang;
+
+void JSONNodeDumper::addPreviousDeclaration(const Decl *D) {
+ switch (D->getKind()) {
+#define DECL(DERIVED, BASE) \
+ case Decl::DERIVED: \
+ return writePreviousDeclImpl(cast<DERIVED##Decl>(D));
+#define ABSTRACT_DECL(DECL)
+#include "clang/AST/DeclNodes.inc"
+#undef ABSTRACT_DECL
+#undef DECL
+ }
+ llvm_unreachable("Decl that isn't part of DeclNodes.inc!");
+}
+
+void JSONNodeDumper::Visit(const Attr *A) {
+ const char *AttrName = nullptr;
+ switch (A->getKind()) {
+#define ATTR(X) \
+ case attr::X: \
+ AttrName = #X"Attr"; \
+ break;
+#include "clang/Basic/AttrList.inc"
+#undef ATTR
+ }
+ JOS.attribute("id", createPointerRepresentation(A));
+ JOS.attribute("kind", AttrName);
+ JOS.attributeObject("range", [A, this] { writeSourceRange(A->getRange()); });
+ attributeOnlyIfTrue("inherited", A->isInherited());
+ attributeOnlyIfTrue("implicit", A->isImplicit());
+
+ // FIXME: it would be useful for us to output the spelling kind as well as
+ // the actual spelling. This would allow us to distinguish between the
+ // various attribute syntaxes, but we don't currently track that information
+ // within the AST.
+ //JOS.attribute("spelling", A->getSpelling());
+
+ InnerAttrVisitor::Visit(A);
+}
+
+void JSONNodeDumper::Visit(const Stmt *S) {
+ if (!S)
+ return;
+
+ JOS.attribute("id", createPointerRepresentation(S));
+ JOS.attribute("kind", S->getStmtClassName());
+ JOS.attributeObject("range",
+ [S, this] { writeSourceRange(S->getSourceRange()); });
+
+ if (const auto *E = dyn_cast<Expr>(S)) {
+ JOS.attribute("type", createQualType(E->getType()));
+ const char *Category = nullptr;
+ switch (E->getValueKind()) {
+ case VK_LValue: Category = "lvalue"; break;
+ case VK_XValue: Category = "xvalue"; break;
+ case VK_RValue: Category = "rvalue"; break;
+ }
+ JOS.attribute("valueCategory", Category);
+ }
+ InnerStmtVisitor::Visit(S);
+}
+
+void JSONNodeDumper::Visit(const Type *T) {
+ JOS.attribute("id", createPointerRepresentation(T));
+ JOS.attribute("kind", (llvm::Twine(T->getTypeClassName()) + "Type").str());
+ JOS.attribute("type", createQualType(QualType(T, 0), /*Desugar*/ false));
+ attributeOnlyIfTrue("isDependent", T->isDependentType());
+ attributeOnlyIfTrue("isInstantiationDependent",
+ T->isInstantiationDependentType());
+ attributeOnlyIfTrue("isVariablyModified", T->isVariablyModifiedType());
+ attributeOnlyIfTrue("containsUnexpandedPack",
+ T->containsUnexpandedParameterPack());
+ attributeOnlyIfTrue("isImported", T->isFromAST());
+ InnerTypeVisitor::Visit(T);
+}
+
+void JSONNodeDumper::Visit(QualType T) {
+ JOS.attribute("id", createPointerRepresentation(T.getAsOpaquePtr()));
+ JOS.attribute("kind", "QualType");
+ JOS.attribute("type", createQualType(T));
+ JOS.attribute("qualifiers", T.split().Quals.getAsString());
+}
+
+void JSONNodeDumper::Visit(const Decl *D) {
+ JOS.attribute("id", createPointerRepresentation(D));
+
+ if (!D)
+ return;
+
+ JOS.attribute("kind", (llvm::Twine(D->getDeclKindName()) + "Decl").str());
+ JOS.attributeObject("loc",
+ [D, this] { writeSourceLocation(D->getLocation()); });
+ JOS.attributeObject("range",
+ [D, this] { writeSourceRange(D->getSourceRange()); });
+ attributeOnlyIfTrue("isImplicit", D->isImplicit());
+ attributeOnlyIfTrue("isInvalid", D->isInvalidDecl());
+
+ if (D->isUsed())
+ JOS.attribute("isUsed", true);
+ else if (D->isThisDeclarationReferenced())
+ JOS.attribute("isReferenced", true);
+
+ if (const auto *ND = dyn_cast<NamedDecl>(D))
+ attributeOnlyIfTrue("isHidden", ND->isHidden());
+
+ if (D->getLexicalDeclContext() != D->getDeclContext())
+ JOS.attribute("parentDeclContext",
+ createPointerRepresentation(D->getDeclContext()));
+
+ addPreviousDeclaration(D);
+ InnerDeclVisitor::Visit(D);
+}
+
+void JSONNodeDumper::Visit(const comments::Comment *C,
+ const comments::FullComment *FC) {
+ if (!C)
+ return;
+
+ JOS.attribute("id", createPointerRepresentation(C));
+ JOS.attribute("kind", C->getCommentKindName());
+ JOS.attributeObject("loc",
+ [C, this] { writeSourceLocation(C->getLocation()); });
+ JOS.attributeObject("range",
+ [C, this] { writeSourceRange(C->getSourceRange()); });
+
+ InnerCommentVisitor::visit(C, FC);
+}
+
+void JSONNodeDumper::Visit(const TemplateArgument &TA, SourceRange R,
+ const Decl *From, StringRef Label) {
+ JOS.attribute("kind", "TemplateArgument");
+ if (R.isValid())
+ JOS.attributeObject("range", [R, this] { writeSourceRange(R); });
+
+ if (From)
+ JOS.attribute(Label.empty() ? "fromDecl" : Label, createBareDeclRef(From));
+
+ InnerTemplateArgVisitor::Visit(TA);
+}
+
+void JSONNodeDumper::Visit(const CXXCtorInitializer *Init) {
+ JOS.attribute("kind", "CXXCtorInitializer");
+ if (Init->isAnyMemberInitializer())
+ JOS.attribute("anyInit", createBareDeclRef(Init->getAnyMember()));
+ else if (Init->isBaseInitializer())
+ JOS.attribute("baseInit",
+ createQualType(QualType(Init->getBaseClass(), 0)));
+ else if (Init->isDelegatingInitializer())
+ JOS.attribute("delegatingInit",
+ createQualType(Init->getTypeSourceInfo()->getType()));
+ else
+ llvm_unreachable("Unknown initializer type");
+}
+
+void JSONNodeDumper::Visit(const OMPClause *C) {}
+
+void JSONNodeDumper::Visit(const BlockDecl::Capture &C) {
+ JOS.attribute("kind", "Capture");
+ attributeOnlyIfTrue("byref", C.isByRef());
+ attributeOnlyIfTrue("nested", C.isNested());
+ if (C.getVariable())
+ JOS.attribute("var", createBareDeclRef(C.getVariable()));
+}
+
+void JSONNodeDumper::Visit(const GenericSelectionExpr::ConstAssociation &A) {
+ JOS.attribute("associationKind", A.getTypeSourceInfo() ? "case" : "default");
+ attributeOnlyIfTrue("selected", A.isSelected());
+}
+
+void JSONNodeDumper::writeBareSourceLocation(SourceLocation Loc,
+ bool IsSpelling) {
+ PresumedLoc Presumed = SM.getPresumedLoc(Loc);
+ unsigned ActualLine = IsSpelling ? SM.getSpellingLineNumber(Loc)
+ : SM.getExpansionLineNumber(Loc);
+ if (Presumed.isValid()) {
+ if (LastLocFilename != Presumed.getFilename()) {
+ JOS.attribute("file", Presumed.getFilename());
+ JOS.attribute("line", ActualLine);
+ } else if (LastLocLine != ActualLine)
+ JOS.attribute("line", ActualLine);
+
+ unsigned PresumedLine = Presumed.getLine();
+ if (ActualLine != PresumedLine && LastLocPresumedLine != PresumedLine)
+ JOS.attribute("presumedLine", PresumedLine);
+
+ JOS.attribute("col", Presumed.getColumn());
+ JOS.attribute("tokLen",
+ Lexer::MeasureTokenLength(Loc, SM, Ctx.getLangOpts()));
+ LastLocFilename = Presumed.getFilename();
+ LastLocPresumedLine = PresumedLine;
+ LastLocLine = ActualLine;
+ }
+}
+
+void JSONNodeDumper::writeSourceLocation(SourceLocation Loc) {
+ SourceLocation Spelling = SM.getSpellingLoc(Loc);
+ SourceLocation Expansion = SM.getExpansionLoc(Loc);
+
+ if (Expansion != Spelling) {
+ // If the expansion and the spelling are different, output subobjects
+ // describing both locations.
+ JOS.attributeObject("spellingLoc", [Spelling, this] {
+ writeBareSourceLocation(Spelling, /*IsSpelling*/ true);
+ });
+ JOS.attributeObject("expansionLoc", [Expansion, Loc, this] {
+ writeBareSourceLocation(Expansion, /*IsSpelling*/ false);
+ // If there is a macro expansion, add extra information if the interesting
+ // bit is the macro arg expansion.
+ if (SM.isMacroArgExpansion(Loc))
+ JOS.attribute("isMacroArgExpansion", true);
+ });
+ } else
+ writeBareSourceLocation(Spelling, /*IsSpelling*/ true);
+}
+
+void JSONNodeDumper::writeSourceRange(SourceRange R) {
+ JOS.attributeObject("begin",
+ [R, this] { writeSourceLocation(R.getBegin()); });
+ JOS.attributeObject("end", [R, this] { writeSourceLocation(R.getEnd()); });
+}
+
+std::string JSONNodeDumper::createPointerRepresentation(const void *Ptr) {
+ // Because JSON stores integer values as signed 64-bit integers, trying to
+ // represent them as such makes for very ugly pointer values in the resulting
+ // output. Instead, we convert the value to hex and treat it as a string.
+ return "0x" + llvm::utohexstr(reinterpret_cast<uint64_t>(Ptr), true);
+}
+
+llvm::json::Object JSONNodeDumper::createQualType(QualType QT, bool Desugar) {
+ SplitQualType SQT = QT.split();
+ llvm::json::Object Ret{{"qualType", QualType::getAsString(SQT, PrintPolicy)}};
+
+ if (Desugar && !QT.isNull()) {
+ SplitQualType DSQT = QT.getSplitDesugaredType();
+ if (DSQT != SQT)
+ Ret["desugaredQualType"] = QualType::getAsString(DSQT, PrintPolicy);
+ }
+ return Ret;
+}
+
+void JSONNodeDumper::writeBareDeclRef(const Decl *D) {
+ JOS.attribute("id", createPointerRepresentation(D));
+ if (!D)
+ return;
+
+ JOS.attribute("kind", (llvm::Twine(D->getDeclKindName()) + "Decl").str());
+ if (const auto *ND = dyn_cast<NamedDecl>(D))
+ JOS.attribute("name", ND->getDeclName().getAsString());
+ if (const auto *VD = dyn_cast<ValueDecl>(D))
+ JOS.attribute("type", createQualType(VD->getType()));
+}
+
+llvm::json::Object JSONNodeDumper::createBareDeclRef(const Decl *D) {
+ llvm::json::Object Ret{{"id", createPointerRepresentation(D)}};
+ if (!D)
+ return Ret;
+
+ Ret["kind"] = (llvm::Twine(D->getDeclKindName()) + "Decl").str();
+ if (const auto *ND = dyn_cast<NamedDecl>(D))
+ Ret["name"] = ND->getDeclName().getAsString();
+ if (const auto *VD = dyn_cast<ValueDecl>(D))
+ Ret["type"] = createQualType(VD->getType());
+ return Ret;
+}
+
+llvm::json::Array JSONNodeDumper::createCastPath(const CastExpr *C) {
+ llvm::json::Array Ret;
+ if (C->path_empty())
+ return Ret;
+
+ for (auto I = C->path_begin(), E = C->path_end(); I != E; ++I) {
+ const CXXBaseSpecifier *Base = *I;
+ const auto *RD =
+ cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
+
+ llvm::json::Object Val{{"name", RD->getName()}};
+ if (Base->isVirtual())
+ Val["isVirtual"] = true;
+ Ret.push_back(std::move(Val));
+ }
+ return Ret;
+}
+
+#define FIELD2(Name, Flag) if (RD->Flag()) Ret[Name] = true
+#define FIELD1(Flag) FIELD2(#Flag, Flag)
+
+static llvm::json::Object
+createDefaultConstructorDefinitionData(const CXXRecordDecl *RD) {
+ llvm::json::Object Ret;
+
+ FIELD2("exists", hasDefaultConstructor);
+ FIELD2("trivial", hasTrivialDefaultConstructor);
+ FIELD2("nonTrivial", hasNonTrivialDefaultConstructor);
+ FIELD2("userProvided", hasUserProvidedDefaultConstructor);
+ FIELD2("isConstexpr", hasConstexprDefaultConstructor);
+ FIELD2("needsImplicit", needsImplicitDefaultConstructor);
+ FIELD2("defaultedIsConstexpr", defaultedDefaultConstructorIsConstexpr);
+
+ return Ret;
+}
+
+static llvm::json::Object
+createCopyConstructorDefinitionData(const CXXRecordDecl *RD) {
+ llvm::json::Object Ret;
+
+ FIELD2("simple", hasSimpleCopyConstructor);
+ FIELD2("trivial", hasTrivialCopyConstructor);
+ FIELD2("nonTrivial", hasNonTrivialCopyConstructor);
+ FIELD2("userDeclared", hasUserDeclaredCopyConstructor);
+ FIELD2("hasConstParam", hasCopyConstructorWithConstParam);
+ FIELD2("implicitHasConstParam", implicitCopyConstructorHasConstParam);
+ FIELD2("needsImplicit", needsImplicitCopyConstructor);
+ FIELD2("needsOverloadResolution", needsOverloadResolutionForCopyConstructor);
+ if (!RD->needsOverloadResolutionForCopyConstructor())
+ FIELD2("defaultedIsDeleted", defaultedCopyConstructorIsDeleted);
+
+ return Ret;
+}
+
+static llvm::json::Object
+createMoveConstructorDefinitionData(const CXXRecordDecl *RD) {
+ llvm::json::Object Ret;
+
+ FIELD2("exists", hasMoveConstructor);
+ FIELD2("simple", hasSimpleMoveConstructor);
+ FIELD2("trivial", hasTrivialMoveConstructor);
+ FIELD2("nonTrivial", hasNonTrivialMoveConstructor);
+ FIELD2("userDeclared", hasUserDeclaredMoveConstructor);
+ FIELD2("needsImplicit", needsImplicitMoveConstructor);
+ FIELD2("needsOverloadResolution", needsOverloadResolutionForMoveConstructor);
+ if (!RD->needsOverloadResolutionForMoveConstructor())
+ FIELD2("defaultedIsDeleted", defaultedMoveConstructorIsDeleted);
+
+ return Ret;
+}
+
+static llvm::json::Object
+createCopyAssignmentDefinitionData(const CXXRecordDecl *RD) {
+ llvm::json::Object Ret;
+
+ FIELD2("trivial", hasTrivialCopyAssignment);
+ FIELD2("nonTrivial", hasNonTrivialCopyAssignment);
+ FIELD2("hasConstParam", hasCopyAssignmentWithConstParam);
+ FIELD2("implicitHasConstParam", implicitCopyAssignmentHasConstParam);
+ FIELD2("userDeclared", hasUserDeclaredCopyAssignment);
+ FIELD2("needsImplicit", needsImplicitCopyAssignment);
+ FIELD2("needsOverloadResolution", needsOverloadResolutionForCopyAssignment);
+
+ return Ret;
+}
+
+static llvm::json::Object
+createMoveAssignmentDefinitionData(const CXXRecordDecl *RD) {
+ llvm::json::Object Ret;
+
+ FIELD2("exists", hasMoveAssignment);
+ FIELD2("simple", hasSimpleMoveAssignment);
+ FIELD2("trivial", hasTrivialMoveAssignment);
+ FIELD2("nonTrivial", hasNonTrivialMoveAssignment);
+ FIELD2("userDeclared", hasUserDeclaredMoveAssignment);
+ FIELD2("needsImplicit", needsImplicitMoveAssignment);
+ FIELD2("needsOverloadResolution", needsOverloadResolutionForMoveAssignment);
+
+ return Ret;
+}
+
+static llvm::json::Object
+createDestructorDefinitionData(const CXXRecordDecl *RD) {
+ llvm::json::Object Ret;
+
+ FIELD2("simple", hasSimpleDestructor);
+ FIELD2("irrelevant", hasIrrelevantDestructor);
+ FIELD2("trivial", hasTrivialDestructor);
+ FIELD2("nonTrivial", hasNonTrivialDestructor);
+ FIELD2("userDeclared", hasUserDeclaredDestructor);
+ FIELD2("needsImplicit", needsImplicitDestructor);
+ FIELD2("needsOverloadResolution", needsOverloadResolutionForDestructor);
+ if (!RD->needsOverloadResolutionForDestructor())
+ FIELD2("defaultedIsDeleted", defaultedDestructorIsDeleted);
+
+ return Ret;
+}
+
+llvm::json::Object
+JSONNodeDumper::createCXXRecordDefinitionData(const CXXRecordDecl *RD) {
+ llvm::json::Object Ret;
+
+ // This data is common to all C++ classes.
+ FIELD1(isGenericLambda);
+ FIELD1(isLambda);
+ FIELD1(isEmpty);
+ FIELD1(isAggregate);
+ FIELD1(isStandardLayout);
+ FIELD1(isTriviallyCopyable);
+ FIELD1(isPOD);
+ FIELD1(isTrivial);
+ FIELD1(isPolymorphic);
+ FIELD1(isAbstract);
+ FIELD1(isLiteral);
+ FIELD1(canPassInRegisters);
+ FIELD1(hasUserDeclaredConstructor);
+ FIELD1(hasConstexprNonCopyMoveConstructor);
+ FIELD1(hasMutableFields);
+ FIELD1(hasVariantMembers);
+ FIELD2("canConstDefaultInit", allowConstDefaultInit);
+
+ Ret["defaultCtor"] = createDefaultConstructorDefinitionData(RD);
+ Ret["copyCtor"] = createCopyConstructorDefinitionData(RD);
+ Ret["moveCtor"] = createMoveConstructorDefinitionData(RD);
+ Ret["copyAssign"] = createCopyAssignmentDefinitionData(RD);
+ Ret["moveAssign"] = createMoveAssignmentDefinitionData(RD);
+ Ret["dtor"] = createDestructorDefinitionData(RD);
+
+ return Ret;
+}
+
+#undef FIELD1
+#undef FIELD2
+
+std::string JSONNodeDumper::createAccessSpecifier(AccessSpecifier AS) {
+ switch (AS) {
+ case AS_none: return "none";
+ case AS_private: return "private";
+ case AS_protected: return "protected";
+ case AS_public: return "public";
+ }
+ llvm_unreachable("Unknown access specifier");
+}
+
+llvm::json::Object
+JSONNodeDumper::createCXXBaseSpecifier(const CXXBaseSpecifier &BS) {
+ llvm::json::Object Ret;
+
+ Ret["type"] = createQualType(BS.getType());
+ Ret["access"] = createAccessSpecifier(BS.getAccessSpecifier());
+ Ret["writtenAccess"] =
+ createAccessSpecifier(BS.getAccessSpecifierAsWritten());
+ if (BS.isVirtual())
+ Ret["isVirtual"] = true;
+ if (BS.isPackExpansion())
+ Ret["isPackExpansion"] = true;
+
+ return Ret;
+}
+
+void JSONNodeDumper::VisitTypedefType(const TypedefType *TT) {
+ JOS.attribute("decl", createBareDeclRef(TT->getDecl()));
+}
+
+void JSONNodeDumper::VisitFunctionType(const FunctionType *T) {
+ FunctionType::ExtInfo E = T->getExtInfo();
+ attributeOnlyIfTrue("noreturn", E.getNoReturn());
+ attributeOnlyIfTrue("producesResult", E.getProducesResult());
+ if (E.getHasRegParm())
+ JOS.attribute("regParm", E.getRegParm());
+ JOS.attribute("cc", FunctionType::getNameForCallConv(E.getCC()));
+}
+
+void JSONNodeDumper::VisitFunctionProtoType(const FunctionProtoType *T) {
+ FunctionProtoType::ExtProtoInfo E = T->getExtProtoInfo();
+ attributeOnlyIfTrue("trailingReturn", E.HasTrailingReturn);
+ attributeOnlyIfTrue("const", T->isConst());
+ attributeOnlyIfTrue("volatile", T->isVolatile());
+ attributeOnlyIfTrue("restrict", T->isRestrict());
+ attributeOnlyIfTrue("variadic", E.Variadic);
+ switch (E.RefQualifier) {
+ case RQ_LValue: JOS.attribute("refQualifier", "&"); break;
+ case RQ_RValue: JOS.attribute("refQualifier", "&&"); break;
+ case RQ_None: break;
+ }
+ switch (E.ExceptionSpec.Type) {
+ case EST_DynamicNone:
+ case EST_Dynamic: {
+ JOS.attribute("exceptionSpec", "throw");
+ llvm::json::Array Types;
+ for (QualType QT : E.ExceptionSpec.Exceptions)
+ Types.push_back(createQualType(QT));
+ JOS.attribute("exceptionTypes", std::move(Types));
+ } break;
+ case EST_MSAny:
+ JOS.attribute("exceptionSpec", "throw");
+ JOS.attribute("throwsAny", true);
+ break;
+ case EST_BasicNoexcept:
+ JOS.attribute("exceptionSpec", "noexcept");
+ break;
+ case EST_NoexceptTrue:
+ case EST_NoexceptFalse:
+ JOS.attribute("exceptionSpec", "noexcept");
+ JOS.attribute("conditionEvaluatesTo",
+ E.ExceptionSpec.Type == EST_NoexceptTrue);
+ //JOS.attributeWithCall("exceptionSpecExpr",
+ // [this, E]() { Visit(E.ExceptionSpec.NoexceptExpr); });
+ break;
+ case EST_NoThrow:
+ JOS.attribute("exceptionSpec", "nothrow");
+ break;
+ // FIXME: I cannot find a way to trigger these cases while dumping the AST. I
+ // suspect you can only run into them when executing an AST dump from within
+ // the debugger, which is not a use case we worry about for the JSON dumping
+ // feature.
+ case EST_DependentNoexcept:
+ case EST_Unevaluated:
+ case EST_Uninstantiated:
+ case EST_Unparsed:
+ case EST_None: break;
+ }
+ VisitFunctionType(T);
+}
+
+void JSONNodeDumper::VisitRValueReferenceType(const ReferenceType *RT) {
+ attributeOnlyIfTrue("spelledAsLValue", RT->isSpelledAsLValue());
+}
+
+void JSONNodeDumper::VisitArrayType(const ArrayType *AT) {
+ switch (AT->getSizeModifier()) {
+ case ArrayType::Star:
+ JOS.attribute("sizeModifier", "*");
+ break;
+ case ArrayType::Static:
+ JOS.attribute("sizeModifier", "static");
+ break;
+ case ArrayType::Normal:
+ break;
+ }
+
+ std::string Str = AT->getIndexTypeQualifiers().getAsString();
+ if (!Str.empty())
+ JOS.attribute("indexTypeQualifiers", Str);
+}
+
+void JSONNodeDumper::VisitConstantArrayType(const ConstantArrayType *CAT) {
+ // FIXME: this should use ZExt instead of SExt, but JSON doesn't allow a
+ // narrowing conversion to int64_t so it cannot be expressed.
+ JOS.attribute("size", CAT->getSize().getSExtValue());
+ VisitArrayType(CAT);
+}
+
+void JSONNodeDumper::VisitDependentSizedExtVectorType(
+ const DependentSizedExtVectorType *VT) {
+ JOS.attributeObject(
+ "attrLoc", [VT, this] { writeSourceLocation(VT->getAttributeLoc()); });
+}
+
+void JSONNodeDumper::VisitVectorType(const VectorType *VT) {
+ JOS.attribute("numElements", VT->getNumElements());
+ switch (VT->getVectorKind()) {
+ case VectorType::GenericVector:
+ break;
+ case VectorType::AltiVecVector:
+ JOS.attribute("vectorKind", "altivec");
+ break;
+ case VectorType::AltiVecPixel:
+ JOS.attribute("vectorKind", "altivec pixel");
+ break;
+ case VectorType::AltiVecBool:
+ JOS.attribute("vectorKind", "altivec bool");
+ break;
+ case VectorType::NeonVector:
+ JOS.attribute("vectorKind", "neon");
+ break;
+ case VectorType::NeonPolyVector:
+ JOS.attribute("vectorKind", "neon poly");
+ break;
+ }
+}
+
+void JSONNodeDumper::VisitUnresolvedUsingType(const UnresolvedUsingType *UUT) {
+ JOS.attribute("decl", createBareDeclRef(UUT->getDecl()));
+}
+
+void JSONNodeDumper::VisitUnaryTransformType(const UnaryTransformType *UTT) {
+ switch (UTT->getUTTKind()) {
+ case UnaryTransformType::EnumUnderlyingType:
+ JOS.attribute("transformKind", "underlying_type");
+ break;
+ }
+}
+
+void JSONNodeDumper::VisitTagType(const TagType *TT) {
+ JOS.attribute("decl", createBareDeclRef(TT->getDecl()));
+}
+
+void JSONNodeDumper::VisitTemplateTypeParmType(
+ const TemplateTypeParmType *TTPT) {
+ JOS.attribute("depth", TTPT->getDepth());
+ JOS.attribute("index", TTPT->getIndex());
+ attributeOnlyIfTrue("isPack", TTPT->isParameterPack());
+ JOS.attribute("decl", createBareDeclRef(TTPT->getDecl()));
+}
+
+void JSONNodeDumper::VisitAutoType(const AutoType *AT) {
+ JOS.attribute("undeduced", !AT->isDeduced());
+ switch (AT->getKeyword()) {
+ case AutoTypeKeyword::Auto:
+ JOS.attribute("typeKeyword", "auto");
+ break;
+ case AutoTypeKeyword::DecltypeAuto:
+ JOS.attribute("typeKeyword", "decltype(auto)");
+ break;
+ case AutoTypeKeyword::GNUAutoType:
+ JOS.attribute("typeKeyword", "__auto_type");
+ break;
+ }
+}
+
+void JSONNodeDumper::VisitTemplateSpecializationType(
+ const TemplateSpecializationType *TST) {
+ attributeOnlyIfTrue("isAlias", TST->isTypeAlias());
+
+ std::string Str;
+ llvm::raw_string_ostream OS(Str);
+ TST->getTemplateName().print(OS, PrintPolicy);
+ JOS.attribute("templateName", OS.str());
+}
+
+void JSONNodeDumper::VisitInjectedClassNameType(
+ const InjectedClassNameType *ICNT) {
+ JOS.attribute("decl", createBareDeclRef(ICNT->getDecl()));
+}
+
+void JSONNodeDumper::VisitObjCInterfaceType(const ObjCInterfaceType *OIT) {
+ JOS.attribute("decl", createBareDeclRef(OIT->getDecl()));
+}
+
+void JSONNodeDumper::VisitPackExpansionType(const PackExpansionType *PET) {
+ if (llvm::Optional<unsigned> N = PET->getNumExpansions())
+ JOS.attribute("numExpansions", *N);
+}
+
+void JSONNodeDumper::VisitElaboratedType(const ElaboratedType *ET) {
+ if (const NestedNameSpecifier *NNS = ET->getQualifier()) {
+ std::string Str;
+ llvm::raw_string_ostream OS(Str);
+ NNS->print(OS, PrintPolicy, /*ResolveTemplateArgs*/ true);
+ JOS.attribute("qualifier", OS.str());
+ }
+ if (const TagDecl *TD = ET->getOwnedTagDecl())
+ JOS.attribute("ownedTagDecl", createBareDeclRef(TD));
+}
+
+void JSONNodeDumper::VisitMacroQualifiedType(const MacroQualifiedType *MQT) {
+ JOS.attribute("macroName", MQT->getMacroIdentifier()->getName());
+}
+
+void JSONNodeDumper::VisitMemberPointerType(const MemberPointerType *MPT) {
+ attributeOnlyIfTrue("isData", MPT->isMemberDataPointer());
+ attributeOnlyIfTrue("isFunction", MPT->isMemberFunctionPointer());
+}
+
+void JSONNodeDumper::VisitNamedDecl(const NamedDecl *ND) {
+ if (ND && ND->getDeclName())
+ JOS.attribute("name", ND->getNameAsString());
+}
+
+void JSONNodeDumper::VisitTypedefDecl(const TypedefDecl *TD) {
+ VisitNamedDecl(TD);
+ JOS.attribute("type", createQualType(TD->getUnderlyingType()));
+}
+
+void JSONNodeDumper::VisitTypeAliasDecl(const TypeAliasDecl *TAD) {
+ VisitNamedDecl(TAD);
+ JOS.attribute("type", createQualType(TAD->getUnderlyingType()));
+}
+
+void JSONNodeDumper::VisitNamespaceDecl(const NamespaceDecl *ND) {
+ VisitNamedDecl(ND);
+ attributeOnlyIfTrue("isInline", ND->isInline());
+ if (!ND->isOriginalNamespace())
+ JOS.attribute("originalNamespace",
+ createBareDeclRef(ND->getOriginalNamespace()));
+}
+
+void JSONNodeDumper::VisitUsingDirectiveDecl(const UsingDirectiveDecl *UDD) {
+ JOS.attribute("nominatedNamespace",
+ createBareDeclRef(UDD->getNominatedNamespace()));
+}
+
+void JSONNodeDumper::VisitNamespaceAliasDecl(const NamespaceAliasDecl *NAD) {
+ VisitNamedDecl(NAD);
+ JOS.attribute("aliasedNamespace",
+ createBareDeclRef(NAD->getAliasedNamespace()));
+}
+
+void JSONNodeDumper::VisitUsingDecl(const UsingDecl *UD) {
+ std::string Name;
+ if (const NestedNameSpecifier *NNS = UD->getQualifier()) {
+ llvm::raw_string_ostream SOS(Name);
+ NNS->print(SOS, UD->getASTContext().getPrintingPolicy());
+ }
+ Name += UD->getNameAsString();
+ JOS.attribute("name", Name);
+}
+
+void JSONNodeDumper::VisitUsingShadowDecl(const UsingShadowDecl *USD) {
+ JOS.attribute("target", createBareDeclRef(USD->getTargetDecl()));
+}
+
+void JSONNodeDumper::VisitVarDecl(const VarDecl *VD) {
+ VisitNamedDecl(VD);
+ JOS.attribute("type", createQualType(VD->getType()));
+
+ StorageClass SC = VD->getStorageClass();
+ if (SC != SC_None)
+ JOS.attribute("storageClass", VarDecl::getStorageClassSpecifierString(SC));
+ switch (VD->getTLSKind()) {
+ case VarDecl::TLS_Dynamic: JOS.attribute("tls", "dynamic"); break;
+ case VarDecl::TLS_Static: JOS.attribute("tls", "static"); break;
+ case VarDecl::TLS_None: break;
+ }
+ attributeOnlyIfTrue("nrvo", VD->isNRVOVariable());
+ attributeOnlyIfTrue("inline", VD->isInline());
+ attributeOnlyIfTrue("constexpr", VD->isConstexpr());
+ attributeOnlyIfTrue("modulePrivate", VD->isModulePrivate());
+ if (VD->hasInit()) {
+ switch (VD->getInitStyle()) {
+ case VarDecl::CInit: JOS.attribute("init", "c"); break;
+ case VarDecl::CallInit: JOS.attribute("init", "call"); break;
+ case VarDecl::ListInit: JOS.attribute("init", "list"); break;
+ }
+ }
+ attributeOnlyIfTrue("isParameterPack", VD->isParameterPack());
+}
+
+void JSONNodeDumper::VisitFieldDecl(const FieldDecl *FD) {
+ VisitNamedDecl(FD);
+ JOS.attribute("type", createQualType(FD->getType()));
+ attributeOnlyIfTrue("mutable", FD->isMutable());
+ attributeOnlyIfTrue("modulePrivate", FD->isModulePrivate());
+ attributeOnlyIfTrue("isBitfield", FD->isBitField());
+ attributeOnlyIfTrue("hasInClassInitializer", FD->hasInClassInitializer());
+}
+
+void JSONNodeDumper::VisitFunctionDecl(const FunctionDecl *FD) {
+ VisitNamedDecl(FD);
+ JOS.attribute("type", createQualType(FD->getType()));
+ StorageClass SC = FD->getStorageClass();
+ if (SC != SC_None)
+ JOS.attribute("storageClass", VarDecl::getStorageClassSpecifierString(SC));
+ attributeOnlyIfTrue("inline", FD->isInlineSpecified());
+ attributeOnlyIfTrue("virtual", FD->isVirtualAsWritten());
+ attributeOnlyIfTrue("pure", FD->isPure());
+ attributeOnlyIfTrue("explicitlyDeleted", FD->isDeletedAsWritten());
+ attributeOnlyIfTrue("constexpr", FD->isConstexpr());
+ attributeOnlyIfTrue("variadic", FD->isVariadic());
+
+ if (FD->isDefaulted())
+ JOS.attribute("explicitlyDefaulted",
+ FD->isDeleted() ? "deleted" : "default");
+}
+
+void JSONNodeDumper::VisitEnumDecl(const EnumDecl *ED) {
+ VisitNamedDecl(ED);
+ if (ED->isFixed())
+ JOS.attribute("fixedUnderlyingType", createQualType(ED->getIntegerType()));
+ if (ED->isScoped())
+ JOS.attribute("scopedEnumTag",
+ ED->isScopedUsingClassTag() ? "class" : "struct");
+}
+void JSONNodeDumper::VisitEnumConstantDecl(const EnumConstantDecl *ECD) {
+ VisitNamedDecl(ECD);
+ JOS.attribute("type", createQualType(ECD->getType()));
+}
+
+void JSONNodeDumper::VisitRecordDecl(const RecordDecl *RD) {
+ VisitNamedDecl(RD);
+ JOS.attribute("tagUsed", RD->getKindName());
+ attributeOnlyIfTrue("completeDefinition", RD->isCompleteDefinition());
+}
+void JSONNodeDumper::VisitCXXRecordDecl(const CXXRecordDecl *RD) {
+ VisitRecordDecl(RD);
+
+ // All other information requires a complete definition.
+ if (!RD->isCompleteDefinition())
+ return;
+
+ JOS.attribute("definitionData", createCXXRecordDefinitionData(RD));
+ if (RD->getNumBases()) {
+ JOS.attributeArray("bases", [this, RD] {
+ for (const auto &Spec : RD->bases())
+ JOS.value(createCXXBaseSpecifier(Spec));
+ });
+ }
+}
+
+void JSONNodeDumper::VisitTemplateTypeParmDecl(const TemplateTypeParmDecl *D) {
+ VisitNamedDecl(D);
+ JOS.attribute("tagUsed", D->wasDeclaredWithTypename() ? "typename" : "class");
+ JOS.attribute("depth", D->getDepth());
+ JOS.attribute("index", D->getIndex());
+ attributeOnlyIfTrue("isParameterPack", D->isParameterPack());
+
+ if (D->hasDefaultArgument())
+ JOS.attributeObject("defaultArg", [=] {
+ Visit(D->getDefaultArgument(), SourceRange(),
+ D->getDefaultArgStorage().getInheritedFrom(),
+ D->defaultArgumentWasInherited() ? "inherited from" : "previous");
+ });
+}
+
+void JSONNodeDumper::VisitNonTypeTemplateParmDecl(
+ const NonTypeTemplateParmDecl *D) {
+ VisitNamedDecl(D);
+ JOS.attribute("type", createQualType(D->getType()));
+ JOS.attribute("depth", D->getDepth());
+ JOS.attribute("index", D->getIndex());
+ attributeOnlyIfTrue("isParameterPack", D->isParameterPack());
+
+ if (D->hasDefaultArgument())
+ JOS.attributeObject("defaultArg", [=] {
+ Visit(D->getDefaultArgument(), SourceRange(),
+ D->getDefaultArgStorage().getInheritedFrom(),
+ D->defaultArgumentWasInherited() ? "inherited from" : "previous");
+ });
+}
+
+void JSONNodeDumper::VisitTemplateTemplateParmDecl(
+ const TemplateTemplateParmDecl *D) {
+ VisitNamedDecl(D);
+ JOS.attribute("depth", D->getDepth());
+ JOS.attribute("index", D->getIndex());
+ attributeOnlyIfTrue("isParameterPack", D->isParameterPack());
+
+ if (D->hasDefaultArgument())
+ JOS.attributeObject("defaultArg", [=] {
+ Visit(D->getDefaultArgument().getArgument(),
+ D->getDefaultArgStorage().getInheritedFrom()->getSourceRange(),
+ D->getDefaultArgStorage().getInheritedFrom(),
+ D->defaultArgumentWasInherited() ? "inherited from" : "previous");
+ });
+}
+
+void JSONNodeDumper::VisitLinkageSpecDecl(const LinkageSpecDecl *LSD) {
+ StringRef Lang;
+ switch (LSD->getLanguage()) {
+ case LinkageSpecDecl::lang_c: Lang = "C"; break;
+ case LinkageSpecDecl::lang_cxx: Lang = "C++"; break;
+ }
+ JOS.attribute("language", Lang);
+ attributeOnlyIfTrue("hasBraces", LSD->hasBraces());
+}
+
+void JSONNodeDumper::VisitAccessSpecDecl(const AccessSpecDecl *ASD) {
+ JOS.attribute("access", createAccessSpecifier(ASD->getAccess()));
+}
+
+void JSONNodeDumper::VisitFriendDecl(const FriendDecl *FD) {
+ if (const TypeSourceInfo *T = FD->getFriendType())
+ JOS.attribute("type", createQualType(T->getType()));
+}
+
+void JSONNodeDumper::VisitObjCIvarDecl(const ObjCIvarDecl *D) {
+ VisitNamedDecl(D);
+ JOS.attribute("type", createQualType(D->getType()));
+ attributeOnlyIfTrue("synthesized", D->getSynthesize());
+ switch (D->getAccessControl()) {
+ case ObjCIvarDecl::None: JOS.attribute("access", "none"); break;
+ case ObjCIvarDecl::Private: JOS.attribute("access", "private"); break;
+ case ObjCIvarDecl::Protected: JOS.attribute("access", "protected"); break;
+ case ObjCIvarDecl::Public: JOS.attribute("access", "public"); break;
+ case ObjCIvarDecl::Package: JOS.attribute("access", "package"); break;
+ }
+}
+
+void JSONNodeDumper::VisitObjCMethodDecl(const ObjCMethodDecl *D) {
+ VisitNamedDecl(D);
+ JOS.attribute("returnType", createQualType(D->getReturnType()));
+ JOS.attribute("instance", D->isInstanceMethod());
+ attributeOnlyIfTrue("variadic", D->isVariadic());
+}
+
+void JSONNodeDumper::VisitObjCTypeParamDecl(const ObjCTypeParamDecl *D) {
+ VisitNamedDecl(D);
+ JOS.attribute("type", createQualType(D->getUnderlyingType()));
+ attributeOnlyIfTrue("bounded", D->hasExplicitBound());
+ switch (D->getVariance()) {
+ case ObjCTypeParamVariance::Invariant:
+ break;
+ case ObjCTypeParamVariance::Covariant:
+ JOS.attribute("variance", "covariant");
+ break;
+ case ObjCTypeParamVariance::Contravariant:
+ JOS.attribute("variance", "contravariant");
+ break;
+ }
+}
+
+void JSONNodeDumper::VisitObjCCategoryDecl(const ObjCCategoryDecl *D) {
+ VisitNamedDecl(D);
+ JOS.attribute("interface", createBareDeclRef(D->getClassInterface()));
+ JOS.attribute("implementation", createBareDeclRef(D->getImplementation()));
+
+ llvm::json::Array Protocols;
+ for (const auto* P : D->protocols())
+ Protocols.push_back(createBareDeclRef(P));
+ if (!Protocols.empty())
+ JOS.attribute("protocols", std::move(Protocols));
+}
+
+void JSONNodeDumper::VisitObjCCategoryImplDecl(const ObjCCategoryImplDecl *D) {
+ VisitNamedDecl(D);
+ JOS.attribute("interface", createBareDeclRef(D->getClassInterface()));
+ JOS.attribute("categoryDecl", createBareDeclRef(D->getCategoryDecl()));
+}
+
+void JSONNodeDumper::VisitObjCProtocolDecl(const ObjCProtocolDecl *D) {
+ VisitNamedDecl(D);
+
+ llvm::json::Array Protocols;
+ for (const auto *P : D->protocols())
+ Protocols.push_back(createBareDeclRef(P));
+ if (!Protocols.empty())
+ JOS.attribute("protocols", std::move(Protocols));
+}
+
+void JSONNodeDumper::VisitObjCInterfaceDecl(const ObjCInterfaceDecl *D) {
+ VisitNamedDecl(D);
+ JOS.attribute("super", createBareDeclRef(D->getSuperClass()));
+ JOS.attribute("implementation", createBareDeclRef(D->getImplementation()));
+
+ llvm::json::Array Protocols;
+ for (const auto* P : D->protocols())
+ Protocols.push_back(createBareDeclRef(P));
+ if (!Protocols.empty())
+ JOS.attribute("protocols", std::move(Protocols));
+}
+
+void JSONNodeDumper::VisitObjCImplementationDecl(
+ const ObjCImplementationDecl *D) {
+ VisitNamedDecl(D);
+ JOS.attribute("super", createBareDeclRef(D->getSuperClass()));
+ JOS.attribute("interface", createBareDeclRef(D->getClassInterface()));
+}
+
+void JSONNodeDumper::VisitObjCCompatibleAliasDecl(
+ const ObjCCompatibleAliasDecl *D) {
+ VisitNamedDecl(D);
+ JOS.attribute("interface", createBareDeclRef(D->getClassInterface()));
+}
+
+void JSONNodeDumper::VisitObjCPropertyDecl(const ObjCPropertyDecl *D) {
+ VisitNamedDecl(D);
+ JOS.attribute("type", createQualType(D->getType()));
+
+ switch (D->getPropertyImplementation()) {
+ case ObjCPropertyDecl::None: break;
+ case ObjCPropertyDecl::Required: JOS.attribute("control", "required"); break;
+ case ObjCPropertyDecl::Optional: JOS.attribute("control", "optional"); break;
+ }
+
+ ObjCPropertyDecl::PropertyAttributeKind Attrs = D->getPropertyAttributes();
+ if (Attrs != ObjCPropertyDecl::OBJC_PR_noattr) {
+ if (Attrs & ObjCPropertyDecl::OBJC_PR_getter)
+ JOS.attribute("getter", createBareDeclRef(D->getGetterMethodDecl()));
+ if (Attrs & ObjCPropertyDecl::OBJC_PR_setter)
+ JOS.attribute("setter", createBareDeclRef(D->getSetterMethodDecl()));
+ attributeOnlyIfTrue("readonly", Attrs & ObjCPropertyDecl::OBJC_PR_readonly);
+ attributeOnlyIfTrue("assign", Attrs & ObjCPropertyDecl::OBJC_PR_assign);
+ attributeOnlyIfTrue("readwrite",
+ Attrs & ObjCPropertyDecl::OBJC_PR_readwrite);
+ attributeOnlyIfTrue("retain", Attrs & ObjCPropertyDecl::OBJC_PR_retain);
+ attributeOnlyIfTrue("copy", Attrs & ObjCPropertyDecl::OBJC_PR_copy);
+ attributeOnlyIfTrue("nonatomic",
+ Attrs & ObjCPropertyDecl::OBJC_PR_nonatomic);
+ attributeOnlyIfTrue("atomic", Attrs & ObjCPropertyDecl::OBJC_PR_atomic);
+ attributeOnlyIfTrue("weak", Attrs & ObjCPropertyDecl::OBJC_PR_weak);
+ attributeOnlyIfTrue("strong", Attrs & ObjCPropertyDecl::OBJC_PR_strong);
+ attributeOnlyIfTrue("unsafe_unretained",
+ Attrs & ObjCPropertyDecl::OBJC_PR_unsafe_unretained);
+ attributeOnlyIfTrue("class", Attrs & ObjCPropertyDecl::OBJC_PR_class);
+ attributeOnlyIfTrue("nullability",
+ Attrs & ObjCPropertyDecl::OBJC_PR_nullability);
+ attributeOnlyIfTrue("null_resettable",
+ Attrs & ObjCPropertyDecl::OBJC_PR_null_resettable);
+ }
+}
+
+void JSONNodeDumper::VisitObjCPropertyImplDecl(const ObjCPropertyImplDecl *D) {
+ VisitNamedDecl(D->getPropertyDecl());
+ JOS.attribute("implKind", D->getPropertyImplementation() ==
+ ObjCPropertyImplDecl::Synthesize
+ ? "synthesize"
+ : "dynamic");
+ JOS.attribute("propertyDecl", createBareDeclRef(D->getPropertyDecl()));
+ JOS.attribute("ivarDecl", createBareDeclRef(D->getPropertyIvarDecl()));
+}
+
+void JSONNodeDumper::VisitBlockDecl(const BlockDecl *D) {
+ attributeOnlyIfTrue("variadic", D->isVariadic());
+ attributeOnlyIfTrue("capturesThis", D->capturesCXXThis());
+}
+
+void JSONNodeDumper::VisitObjCEncodeExpr(const ObjCEncodeExpr *OEE) {
+ JOS.attribute("encodedType", createQualType(OEE->getEncodedType()));
+}
+
+void JSONNodeDumper::VisitObjCMessageExpr(const ObjCMessageExpr *OME) {
+ std::string Str;
+ llvm::raw_string_ostream OS(Str);
+
+ OME->getSelector().print(OS);
+ JOS.attribute("selector", OS.str());
+
+ switch (OME->getReceiverKind()) {
+ case ObjCMessageExpr::Instance:
+ JOS.attribute("receiverKind", "instance");
+ break;
+ case ObjCMessageExpr::Class:
+ JOS.attribute("receiverKind", "class");
+ JOS.attribute("classType", createQualType(OME->getClassReceiver()));
+ break;
+ case ObjCMessageExpr::SuperInstance:
+ JOS.attribute("receiverKind", "super (instance)");
+ JOS.attribute("superType", createQualType(OME->getSuperType()));
+ break;
+ case ObjCMessageExpr::SuperClass:
+ JOS.attribute("receiverKind", "super (class)");
+ JOS.attribute("superType", createQualType(OME->getSuperType()));
+ break;
+ }
+
+ QualType CallReturnTy = OME->getCallReturnType(Ctx);
+ if (OME->getType() != CallReturnTy)
+ JOS.attribute("callReturnType", createQualType(CallReturnTy));
+}
+
+void JSONNodeDumper::VisitObjCBoxedExpr(const ObjCBoxedExpr *OBE) {
+ if (const ObjCMethodDecl *MD = OBE->getBoxingMethod()) {
+ std::string Str;
+ llvm::raw_string_ostream OS(Str);
+
+ MD->getSelector().print(OS);
+ JOS.attribute("selector", OS.str());
+ }
+}
+
+void JSONNodeDumper::VisitObjCSelectorExpr(const ObjCSelectorExpr *OSE) {
+ std::string Str;
+ llvm::raw_string_ostream OS(Str);
+
+ OSE->getSelector().print(OS);
+ JOS.attribute("selector", OS.str());
+}
+
+void JSONNodeDumper::VisitObjCProtocolExpr(const ObjCProtocolExpr *OPE) {
+ JOS.attribute("protocol", createBareDeclRef(OPE->getProtocol()));
+}
+
+void JSONNodeDumper::VisitObjCPropertyRefExpr(const ObjCPropertyRefExpr *OPRE) {
+ if (OPRE->isImplicitProperty()) {
+ JOS.attribute("propertyKind", "implicit");
+ if (const ObjCMethodDecl *MD = OPRE->getImplicitPropertyGetter())
+ JOS.attribute("getter", createBareDeclRef(MD));
+ if (const ObjCMethodDecl *MD = OPRE->getImplicitPropertySetter())
+ JOS.attribute("setter", createBareDeclRef(MD));
+ } else {
+ JOS.attribute("propertyKind", "explicit");
+ JOS.attribute("property", createBareDeclRef(OPRE->getExplicitProperty()));
+ }
+
+ attributeOnlyIfTrue("isSuperReceiver", OPRE->isSuperReceiver());
+ attributeOnlyIfTrue("isMessagingGetter", OPRE->isMessagingGetter());
+ attributeOnlyIfTrue("isMessagingSetter", OPRE->isMessagingSetter());
+}
+
+void JSONNodeDumper::VisitObjCSubscriptRefExpr(
+ const ObjCSubscriptRefExpr *OSRE) {
+ JOS.attribute("subscriptKind",
+ OSRE->isArraySubscriptRefExpr() ? "array" : "dictionary");
+
+ if (const ObjCMethodDecl *MD = OSRE->getAtIndexMethodDecl())
+ JOS.attribute("getter", createBareDeclRef(MD));
+ if (const ObjCMethodDecl *MD = OSRE->setAtIndexMethodDecl())
+ JOS.attribute("setter", createBareDeclRef(MD));
+}
+
+void JSONNodeDumper::VisitObjCIvarRefExpr(const ObjCIvarRefExpr *OIRE) {
+ JOS.attribute("decl", createBareDeclRef(OIRE->getDecl()));
+ attributeOnlyIfTrue("isFreeIvar", OIRE->isFreeIvar());
+ JOS.attribute("isArrow", OIRE->isArrow());
+}
+
+void JSONNodeDumper::VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *OBLE) {
+ JOS.attribute("value", OBLE->getValue() ? "__objc_yes" : "__objc_no");
+}
+
+void JSONNodeDumper::VisitDeclRefExpr(const DeclRefExpr *DRE) {
+ JOS.attribute("referencedDecl", createBareDeclRef(DRE->getDecl()));
+ if (DRE->getDecl() != DRE->getFoundDecl())
+ JOS.attribute("foundReferencedDecl",
+ createBareDeclRef(DRE->getFoundDecl()));
+ switch (DRE->isNonOdrUse()) {
+ case NOUR_None: break;
+ case NOUR_Unevaluated: JOS.attribute("nonOdrUseReason", "unevaluated"); break;
+ case NOUR_Constant: JOS.attribute("nonOdrUseReason", "constant"); break;
+ case NOUR_Discarded: JOS.attribute("nonOdrUseReason", "discarded"); break;
+ }
+}
+
+void JSONNodeDumper::VisitPredefinedExpr(const PredefinedExpr *PE) {
+ JOS.attribute("name", PredefinedExpr::getIdentKindName(PE->getIdentKind()));
+}
+
+void JSONNodeDumper::VisitUnaryOperator(const UnaryOperator *UO) {
+ JOS.attribute("isPostfix", UO->isPostfix());
+ JOS.attribute("opcode", UnaryOperator::getOpcodeStr(UO->getOpcode()));
+ if (!UO->canOverflow())
+ JOS.attribute("canOverflow", false);
+}
+
+void JSONNodeDumper::VisitBinaryOperator(const BinaryOperator *BO) {
+ JOS.attribute("opcode", BinaryOperator::getOpcodeStr(BO->getOpcode()));
+}
+
+void JSONNodeDumper::VisitCompoundAssignOperator(
+ const CompoundAssignOperator *CAO) {
+ VisitBinaryOperator(CAO);
+ JOS.attribute("computeLHSType", createQualType(CAO->getComputationLHSType()));
+ JOS.attribute("computeResultType",
+ createQualType(CAO->getComputationResultType()));
+}
+
+void JSONNodeDumper::VisitMemberExpr(const MemberExpr *ME) {
+ // Note, we always write this Boolean field because the information it conveys
+ // is critical to understanding the AST node.
+ ValueDecl *VD = ME->getMemberDecl();
+ JOS.attribute("name", VD && VD->getDeclName() ? VD->getNameAsString() : "");
+ JOS.attribute("isArrow", ME->isArrow());
+ JOS.attribute("referencedMemberDecl", createPointerRepresentation(VD));
+ switch (ME->isNonOdrUse()) {
+ case NOUR_None: break;
+ case NOUR_Unevaluated: JOS.attribute("nonOdrUseReason", "unevaluated"); break;
+ case NOUR_Constant: JOS.attribute("nonOdrUseReason", "constant"); break;
+ case NOUR_Discarded: JOS.attribute("nonOdrUseReason", "discarded"); break;
+ }
+}
+
+void JSONNodeDumper::VisitCXXNewExpr(const CXXNewExpr *NE) {
+ attributeOnlyIfTrue("isGlobal", NE->isGlobalNew());
+ attributeOnlyIfTrue("isArray", NE->isArray());
+ attributeOnlyIfTrue("isPlacement", NE->getNumPlacementArgs() != 0);
+ switch (NE->getInitializationStyle()) {
+ case CXXNewExpr::NoInit: break;
+ case CXXNewExpr::CallInit: JOS.attribute("initStyle", "call"); break;
+ case CXXNewExpr::ListInit: JOS.attribute("initStyle", "list"); break;
+ }
+ if (const FunctionDecl *FD = NE->getOperatorNew())
+ JOS.attribute("operatorNewDecl", createBareDeclRef(FD));
+ if (const FunctionDecl *FD = NE->getOperatorDelete())
+ JOS.attribute("operatorDeleteDecl", createBareDeclRef(FD));
+}
+void JSONNodeDumper::VisitCXXDeleteExpr(const CXXDeleteExpr *DE) {
+ attributeOnlyIfTrue("isGlobal", DE->isGlobalDelete());
+ attributeOnlyIfTrue("isArray", DE->isArrayForm());
+ attributeOnlyIfTrue("isArrayAsWritten", DE->isArrayFormAsWritten());
+ if (const FunctionDecl *FD = DE->getOperatorDelete())
+ JOS.attribute("operatorDeleteDecl", createBareDeclRef(FD));
+}
+
+void JSONNodeDumper::VisitCXXThisExpr(const CXXThisExpr *TE) {
+ attributeOnlyIfTrue("implicit", TE->isImplicit());
+}
+
+void JSONNodeDumper::VisitCastExpr(const CastExpr *CE) {
+ JOS.attribute("castKind", CE->getCastKindName());
+ llvm::json::Array Path = createCastPath(CE);
+ if (!Path.empty())
+ JOS.attribute("path", std::move(Path));
+ // FIXME: This may not be useful information as it can be obtusely gleaned
+ // from the inner[] array.
+ if (const NamedDecl *ND = CE->getConversionFunction())
+ JOS.attribute("conversionFunc", createBareDeclRef(ND));
+}
+
+void JSONNodeDumper::VisitImplicitCastExpr(const ImplicitCastExpr *ICE) {
+ VisitCastExpr(ICE);
+ attributeOnlyIfTrue("isPartOfExplicitCast", ICE->isPartOfExplicitCast());
+}
+
+void JSONNodeDumper::VisitCallExpr(const CallExpr *CE) {
+ attributeOnlyIfTrue("adl", CE->usesADL());
+}
+
+void JSONNodeDumper::VisitUnaryExprOrTypeTraitExpr(
+ const UnaryExprOrTypeTraitExpr *TTE) {
+ switch (TTE->getKind()) {
+ case UETT_SizeOf: JOS.attribute("name", "sizeof"); break;
+ case UETT_AlignOf: JOS.attribute("name", "alignof"); break;
+ case UETT_VecStep: JOS.attribute("name", "vec_step"); break;
+ case UETT_PreferredAlignOf: JOS.attribute("name", "__alignof"); break;
+ case UETT_OpenMPRequiredSimdAlign:
+ JOS.attribute("name", "__builtin_omp_required_simd_align"); break;
+ }
+ if (TTE->isArgumentType())
+ JOS.attribute("argType", createQualType(TTE->getArgumentType()));
+}
+
+void JSONNodeDumper::VisitSizeOfPackExpr(const SizeOfPackExpr *SOPE) {
+ VisitNamedDecl(SOPE->getPack());
+}
+
+void JSONNodeDumper::VisitUnresolvedLookupExpr(
+ const UnresolvedLookupExpr *ULE) {
+ JOS.attribute("usesADL", ULE->requiresADL());
+ JOS.attribute("name", ULE->getName().getAsString());
+
+ JOS.attributeArray("lookups", [this, ULE] {
+ for (const NamedDecl *D : ULE->decls())
+ JOS.value(createBareDeclRef(D));
+ });
+}
+
+void JSONNodeDumper::VisitAddrLabelExpr(const AddrLabelExpr *ALE) {
+ JOS.attribute("name", ALE->getLabel()->getName());
+ JOS.attribute("labelDeclId", createPointerRepresentation(ALE->getLabel()));
+}
+
+void JSONNodeDumper::VisitCXXTypeidExpr(const CXXTypeidExpr *CTE) {
+ if (CTE->isTypeOperand()) {
+ QualType Adjusted = CTE->getTypeOperand(Ctx);
+ QualType Unadjusted = CTE->getTypeOperandSourceInfo()->getType();
+ JOS.attribute("typeArg", createQualType(Unadjusted));
+ if (Adjusted != Unadjusted)
+ JOS.attribute("adjustedTypeArg", createQualType(Adjusted));
+ }
+}
+
+void JSONNodeDumper::VisitConstantExpr(const ConstantExpr *CE) {
+ if (CE->getResultAPValueKind() != APValue::None) {
+ std::string Str;
+ llvm::raw_string_ostream OS(Str);
+ CE->getAPValueResult().printPretty(OS, Ctx, CE->getType());
+ JOS.attribute("value", OS.str());
+ }
+}
+
+void JSONNodeDumper::VisitInitListExpr(const InitListExpr *ILE) {
+ if (const FieldDecl *FD = ILE->getInitializedFieldInUnion())
+ JOS.attribute("field", createBareDeclRef(FD));
+}
+
+void JSONNodeDumper::VisitGenericSelectionExpr(
+ const GenericSelectionExpr *GSE) {
+ attributeOnlyIfTrue("resultDependent", GSE->isResultDependent());
+}
+
+void JSONNodeDumper::VisitCXXUnresolvedConstructExpr(
+ const CXXUnresolvedConstructExpr *UCE) {
+ if (UCE->getType() != UCE->getTypeAsWritten())
+ JOS.attribute("typeAsWritten", createQualType(UCE->getTypeAsWritten()));
+ attributeOnlyIfTrue("list", UCE->isListInitialization());
+}
+
+void JSONNodeDumper::VisitCXXConstructExpr(const CXXConstructExpr *CE) {
+ CXXConstructorDecl *Ctor = CE->getConstructor();
+ JOS.attribute("ctorType", createQualType(Ctor->getType()));
+ attributeOnlyIfTrue("elidable", CE->isElidable());
+ attributeOnlyIfTrue("list", CE->isListInitialization());
+ attributeOnlyIfTrue("initializer_list", CE->isStdInitListInitialization());
+ attributeOnlyIfTrue("zeroing", CE->requiresZeroInitialization());
+ attributeOnlyIfTrue("hadMultipleCandidates", CE->hadMultipleCandidates());
+
+ switch (CE->getConstructionKind()) {
+ case CXXConstructExpr::CK_Complete:
+ JOS.attribute("constructionKind", "complete");
+ break;
+ case CXXConstructExpr::CK_Delegating:
+ JOS.attribute("constructionKind", "delegating");
+ break;
+ case CXXConstructExpr::CK_NonVirtualBase:
+ JOS.attribute("constructionKind", "non-virtual base");
+ break;
+ case CXXConstructExpr::CK_VirtualBase:
+ JOS.attribute("constructionKind", "virtual base");
+ break;
+ }
+}
+
+void JSONNodeDumper::VisitExprWithCleanups(const ExprWithCleanups *EWC) {
+ attributeOnlyIfTrue("cleanupsHaveSideEffects",
+ EWC->cleanupsHaveSideEffects());
+ if (EWC->getNumObjects()) {
+ JOS.attributeArray("cleanups", [this, EWC] {
+ for (const ExprWithCleanups::CleanupObject &CO : EWC->getObjects())
+ JOS.value(createBareDeclRef(CO));
+ });
+ }
+}
+
+void JSONNodeDumper::VisitCXXBindTemporaryExpr(
+ const CXXBindTemporaryExpr *BTE) {
+ const CXXTemporary *Temp = BTE->getTemporary();
+ JOS.attribute("temp", createPointerRepresentation(Temp));
+ if (const CXXDestructorDecl *Dtor = Temp->getDestructor())
+ JOS.attribute("dtor", createBareDeclRef(Dtor));
+}
+
+void JSONNodeDumper::VisitMaterializeTemporaryExpr(
+ const MaterializeTemporaryExpr *MTE) {
+ if (const ValueDecl *VD = MTE->getExtendingDecl())
+ JOS.attribute("extendingDecl", createBareDeclRef(VD));
+
+ switch (MTE->getStorageDuration()) {
+ case SD_Automatic:
+ JOS.attribute("storageDuration", "automatic");
+ break;
+ case SD_Dynamic:
+ JOS.attribute("storageDuration", "dynamic");
+ break;
+ case SD_FullExpression:
+ JOS.attribute("storageDuration", "full expression");
+ break;
+ case SD_Static:
+ JOS.attribute("storageDuration", "static");
+ break;
+ case SD_Thread:
+ JOS.attribute("storageDuration", "thread");
+ break;
+ }
+
+ attributeOnlyIfTrue("boundToLValueRef", MTE->isBoundToLvalueReference());
+}
+
+void JSONNodeDumper::VisitCXXDependentScopeMemberExpr(
+ const CXXDependentScopeMemberExpr *DSME) {
+ JOS.attribute("isArrow", DSME->isArrow());
+ JOS.attribute("member", DSME->getMember().getAsString());
+ attributeOnlyIfTrue("hasTemplateKeyword", DSME->hasTemplateKeyword());
+ attributeOnlyIfTrue("hasExplicitTemplateArgs",
+ DSME->hasExplicitTemplateArgs());
+
+ if (DSME->getNumTemplateArgs()) {
+ JOS.attributeArray("explicitTemplateArgs", [DSME, this] {
+ for (const TemplateArgumentLoc &TAL : DSME->template_arguments())
+ JOS.object(
+ [&TAL, this] { Visit(TAL.getArgument(), TAL.getSourceRange()); });
+ });
+ }
+}
+
+void JSONNodeDumper::VisitIntegerLiteral(const IntegerLiteral *IL) {
+ JOS.attribute("value",
+ IL->getValue().toString(
+ /*Radix=*/10, IL->getType()->isSignedIntegerType()));
+}
+void JSONNodeDumper::VisitCharacterLiteral(const CharacterLiteral *CL) {
+ // FIXME: This should probably print the character literal as a string,
+ // rather than as a numerical value. It would be nice if the behavior matched
+ // what we do to print a string literal; right now, it is impossible to tell
+ // the difference between 'a' and L'a' in C from the JSON output.
+ JOS.attribute("value", CL->getValue());
+}
+void JSONNodeDumper::VisitFixedPointLiteral(const FixedPointLiteral *FPL) {
+ JOS.attribute("value", FPL->getValueAsString(/*Radix=*/10));
+}
+void JSONNodeDumper::VisitFloatingLiteral(const FloatingLiteral *FL) {
+ llvm::SmallVector<char, 16> Buffer;
+ FL->getValue().toString(Buffer);
+ JOS.attribute("value", Buffer);
+}
+void JSONNodeDumper::VisitStringLiteral(const StringLiteral *SL) {
+ std::string Buffer;
+ llvm::raw_string_ostream SS(Buffer);
+ SL->outputString(SS);
+ JOS.attribute("value", SS.str());
+}
+void JSONNodeDumper::VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *BLE) {
+ JOS.attribute("value", BLE->getValue());
+}
+
+void JSONNodeDumper::VisitIfStmt(const IfStmt *IS) {
+ attributeOnlyIfTrue("hasInit", IS->hasInitStorage());
+ attributeOnlyIfTrue("hasVar", IS->hasVarStorage());
+ attributeOnlyIfTrue("hasElse", IS->hasElseStorage());
+ attributeOnlyIfTrue("isConstexpr", IS->isConstexpr());
+}
+
+void JSONNodeDumper::VisitSwitchStmt(const SwitchStmt *SS) {
+ attributeOnlyIfTrue("hasInit", SS->hasInitStorage());
+ attributeOnlyIfTrue("hasVar", SS->hasVarStorage());
+}
+void JSONNodeDumper::VisitCaseStmt(const CaseStmt *CS) {
+ attributeOnlyIfTrue("isGNURange", CS->caseStmtIsGNURange());
+}
+
+void JSONNodeDumper::VisitLabelStmt(const LabelStmt *LS) {
+ JOS.attribute("name", LS->getName());
+ JOS.attribute("declId", createPointerRepresentation(LS->getDecl()));
+}
+void JSONNodeDumper::VisitGotoStmt(const GotoStmt *GS) {
+ JOS.attribute("targetLabelDeclId",
+ createPointerRepresentation(GS->getLabel()));
+}
+
+void JSONNodeDumper::VisitWhileStmt(const WhileStmt *WS) {
+ attributeOnlyIfTrue("hasVar", WS->hasVarStorage());
+}
+
+void JSONNodeDumper::VisitObjCAtCatchStmt(const ObjCAtCatchStmt* OACS) {
+ // FIXME: it would be nice for the ASTNodeTraverser would handle the catch
+ // parameter the same way for C++ and ObjC rather. In this case, C++ gets a
+ // null child node and ObjC gets no child node.
+ attributeOnlyIfTrue("isCatchAll", OACS->getCatchParamDecl() == nullptr);
+}
+
+void JSONNodeDumper::VisitNullTemplateArgument(const TemplateArgument &TA) {
+ JOS.attribute("isNull", true);
+}
+void JSONNodeDumper::VisitTypeTemplateArgument(const TemplateArgument &TA) {
+ JOS.attribute("type", createQualType(TA.getAsType()));
+}
+void JSONNodeDumper::VisitDeclarationTemplateArgument(
+ const TemplateArgument &TA) {
+ JOS.attribute("decl", createBareDeclRef(TA.getAsDecl()));
+}
+void JSONNodeDumper::VisitNullPtrTemplateArgument(const TemplateArgument &TA) {
+ JOS.attribute("isNullptr", true);
+}
+void JSONNodeDumper::VisitIntegralTemplateArgument(const TemplateArgument &TA) {
+ JOS.attribute("value", TA.getAsIntegral().getSExtValue());
+}
+void JSONNodeDumper::VisitTemplateTemplateArgument(const TemplateArgument &TA) {
+ // FIXME: cannot just call dump() on the argument, as that doesn't specify
+ // the output format.
+}
+void JSONNodeDumper::VisitTemplateExpansionTemplateArgument(
+ const TemplateArgument &TA) {
+ // FIXME: cannot just call dump() on the argument, as that doesn't specify
+ // the output format.
+}
+void JSONNodeDumper::VisitExpressionTemplateArgument(
+ const TemplateArgument &TA) {
+ JOS.attribute("isExpr", true);
+}
+void JSONNodeDumper::VisitPackTemplateArgument(const TemplateArgument &TA) {
+ JOS.attribute("isPack", true);
+}
+
+StringRef JSONNodeDumper::getCommentCommandName(unsigned CommandID) const {
+ if (Traits)
+ return Traits->getCommandInfo(CommandID)->Name;
+ if (const comments::CommandInfo *Info =
+ comments::CommandTraits::getBuiltinCommandInfo(CommandID))
+ return Info->Name;
+ return "<invalid>";
+}
+
+void JSONNodeDumper::visitTextComment(const comments::TextComment *C,
+ const comments::FullComment *) {
+ JOS.attribute("text", C->getText());
+}
+
+void JSONNodeDumper::visitInlineCommandComment(
+ const comments::InlineCommandComment *C, const comments::FullComment *) {
+ JOS.attribute("name", getCommentCommandName(C->getCommandID()));
+
+ switch (C->getRenderKind()) {
+ case comments::InlineCommandComment::RenderNormal:
+ JOS.attribute("renderKind", "normal");
+ break;
+ case comments::InlineCommandComment::RenderBold:
+ JOS.attribute("renderKind", "bold");
+ break;
+ case comments::InlineCommandComment::RenderEmphasized:
+ JOS.attribute("renderKind", "emphasized");
+ break;
+ case comments::InlineCommandComment::RenderMonospaced:
+ JOS.attribute("renderKind", "monospaced");
+ break;
+ }
+
+ llvm::json::Array Args;
+ for (unsigned I = 0, E = C->getNumArgs(); I < E; ++I)
+ Args.push_back(C->getArgText(I));
+
+ if (!Args.empty())
+ JOS.attribute("args", std::move(Args));
+}
+
+void JSONNodeDumper::visitHTMLStartTagComment(
+ const comments::HTMLStartTagComment *C, const comments::FullComment *) {
+ JOS.attribute("name", C->getTagName());
+ attributeOnlyIfTrue("selfClosing", C->isSelfClosing());
+ attributeOnlyIfTrue("malformed", C->isMalformed());
+
+ llvm::json::Array Attrs;
+ for (unsigned I = 0, E = C->getNumAttrs(); I < E; ++I)
+ Attrs.push_back(
+ {{"name", C->getAttr(I).Name}, {"value", C->getAttr(I).Value}});
+
+ if (!Attrs.empty())
+ JOS.attribute("attrs", std::move(Attrs));
+}
+
+void JSONNodeDumper::visitHTMLEndTagComment(
+ const comments::HTMLEndTagComment *C, const comments::FullComment *) {
+ JOS.attribute("name", C->getTagName());
+}
+
+void JSONNodeDumper::visitBlockCommandComment(
+ const comments::BlockCommandComment *C, const comments::FullComment *) {
+ JOS.attribute("name", getCommentCommandName(C->getCommandID()));
+
+ llvm::json::Array Args;
+ for (unsigned I = 0, E = C->getNumArgs(); I < E; ++I)
+ Args.push_back(C->getArgText(I));
+
+ if (!Args.empty())
+ JOS.attribute("args", std::move(Args));
+}
+
+void JSONNodeDumper::visitParamCommandComment(
+ const comments::ParamCommandComment *C, const comments::FullComment *FC) {
+ switch (C->getDirection()) {
+ case comments::ParamCommandComment::In:
+ JOS.attribute("direction", "in");
+ break;
+ case comments::ParamCommandComment::Out:
+ JOS.attribute("direction", "out");
+ break;
+ case comments::ParamCommandComment::InOut:
+ JOS.attribute("direction", "in,out");
+ break;
+ }
+ attributeOnlyIfTrue("explicit", C->isDirectionExplicit());
+
+ if (C->hasParamName())
+ JOS.attribute("param", C->isParamIndexValid() ? C->getParamName(FC)
+ : C->getParamNameAsWritten());
+
+ if (C->isParamIndexValid() && !C->isVarArgParam())
+ JOS.attribute("paramIdx", C->getParamIndex());
+}
+
+void JSONNodeDumper::visitTParamCommandComment(
+ const comments::TParamCommandComment *C, const comments::FullComment *FC) {
+ if (C->hasParamName())
+ JOS.attribute("param", C->isPositionValid() ? C->getParamName(FC)
+ : C->getParamNameAsWritten());
+ if (C->isPositionValid()) {
+ llvm::json::Array Positions;
+ for (unsigned I = 0, E = C->getDepth(); I < E; ++I)
+ Positions.push_back(C->getIndex(I));
+
+ if (!Positions.empty())
+ JOS.attribute("positions", std::move(Positions));
+ }
+}
+
+void JSONNodeDumper::visitVerbatimBlockComment(
+ const comments::VerbatimBlockComment *C, const comments::FullComment *) {
+ JOS.attribute("name", getCommentCommandName(C->getCommandID()));
+ JOS.attribute("closeName", C->getCloseName());
+}
+
+void JSONNodeDumper::visitVerbatimBlockLineComment(
+ const comments::VerbatimBlockLineComment *C,
+ const comments::FullComment *) {
+ JOS.attribute("text", C->getText());
+}
+
+void JSONNodeDumper::visitVerbatimLineComment(
+ const comments::VerbatimLineComment *C, const comments::FullComment *) {
+ JOS.attribute("text", C->getText());
+}
diff --git a/lib/AST/Linkage.h b/lib/AST/Linkage.h
index 8ad748bcc4a2..4e913540de86 100644
--- a/lib/AST/Linkage.h
+++ b/lib/AST/Linkage.h
@@ -1,9 +1,8 @@
//===----- Linkage.h - Linkage calculation-related utilities ----*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/AST/Mangle.cpp b/lib/AST/Mangle.cpp
index bb29bffc1b8f..625282368a4d 100644
--- a/lib/AST/Mangle.cpp
+++ b/lib/AST/Mangle.cpp
@@ -1,9 +1,8 @@
//===--- Mangle.cpp - Mangle C++ Names --------------------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -18,10 +17,13 @@
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/Mangle.h"
+#include "clang/AST/VTableBuilder.h"
#include "clang/Basic/ABI.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Mangler.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
@@ -281,3 +283,205 @@ void MangleContext::mangleObjCMethodName(const ObjCMethodDecl *MD,
mangleObjCMethodNameWithoutSize(MD, OS);
Out << OS.str().size() << OS.str();
}
+
+class ASTNameGenerator::Implementation {
+ std::unique_ptr<MangleContext> MC;
+ llvm::DataLayout DL;
+
+public:
+ explicit Implementation(ASTContext &Ctx)
+ : MC(Ctx.createMangleContext()), DL(Ctx.getTargetInfo().getDataLayout()) {
+ }
+
+ bool writeName(const Decl *D, raw_ostream &OS) {
+ // First apply frontend mangling.
+ SmallString<128> FrontendBuf;
+ llvm::raw_svector_ostream FrontendBufOS(FrontendBuf);
+ if (auto *FD = dyn_cast<FunctionDecl>(D)) {
+ if (FD->isDependentContext())
+ return true;
+ if (writeFuncOrVarName(FD, FrontendBufOS))
+ return true;
+ } else if (auto *VD = dyn_cast<VarDecl>(D)) {
+ if (writeFuncOrVarName(VD, FrontendBufOS))
+ return true;
+ } else if (auto *MD = dyn_cast<ObjCMethodDecl>(D)) {
+ MC->mangleObjCMethodNameWithoutSize(MD, OS);
+ return false;
+ } else if (auto *ID = dyn_cast<ObjCInterfaceDecl>(D)) {
+ writeObjCClassName(ID, FrontendBufOS);
+ } else {
+ return true;
+ }
+
+ // Now apply backend mangling.
+ llvm::Mangler::getNameWithPrefix(OS, FrontendBufOS.str(), DL);
+ return false;
+ }
+
+ std::string getName(const Decl *D) {
+ std::string Name;
+ {
+ llvm::raw_string_ostream OS(Name);
+ writeName(D, OS);
+ }
+ return Name;
+ }
+
+ enum ObjCKind {
+ ObjCClass,
+ ObjCMetaclass,
+ };
+
+ static StringRef getClassSymbolPrefix(ObjCKind Kind,
+ const ASTContext &Context) {
+ if (Context.getLangOpts().ObjCRuntime.isGNUFamily())
+ return Kind == ObjCMetaclass ? "_OBJC_METACLASS_" : "_OBJC_CLASS_";
+ return Kind == ObjCMetaclass ? "OBJC_METACLASS_$_" : "OBJC_CLASS_$_";
+ }
+
+ std::vector<std::string> getAllManglings(const ObjCContainerDecl *OCD) {
+ StringRef ClassName;
+ if (const auto *OID = dyn_cast<ObjCInterfaceDecl>(OCD))
+ ClassName = OID->getObjCRuntimeNameAsString();
+ else if (const auto *OID = dyn_cast<ObjCImplementationDecl>(OCD))
+ ClassName = OID->getObjCRuntimeNameAsString();
+
+ if (ClassName.empty())
+ return {};
+
+ auto Mangle = [&](ObjCKind Kind, StringRef ClassName) -> std::string {
+ SmallString<40> Mangled;
+ auto Prefix = getClassSymbolPrefix(Kind, OCD->getASTContext());
+ llvm::Mangler::getNameWithPrefix(Mangled, Prefix + ClassName, DL);
+ return Mangled.str();
+ };
+
+ return {
+ Mangle(ObjCClass, ClassName),
+ Mangle(ObjCMetaclass, ClassName),
+ };
+ }
+
+ std::vector<std::string> getAllManglings(const Decl *D) {
+ if (const auto *OCD = dyn_cast<ObjCContainerDecl>(D))
+ return getAllManglings(OCD);
+
+ if (!(isa<CXXRecordDecl>(D) || isa<CXXMethodDecl>(D)))
+ return {};
+
+ const NamedDecl *ND = cast<NamedDecl>(D);
+
+ ASTContext &Ctx = ND->getASTContext();
+ std::unique_ptr<MangleContext> M(Ctx.createMangleContext());
+
+ std::vector<std::string> Manglings;
+
+ auto hasDefaultCXXMethodCC = [](ASTContext &C, const CXXMethodDecl *MD) {
+ auto DefaultCC = C.getDefaultCallingConvention(/*IsVariadic=*/false,
+ /*IsCXXMethod=*/true);
+ auto CC = MD->getType()->getAs<FunctionProtoType>()->getCallConv();
+ return CC == DefaultCC;
+ };
+
+ if (const auto *CD = dyn_cast_or_null<CXXConstructorDecl>(ND)) {
+ Manglings.emplace_back(getMangledStructor(CD, Ctor_Base));
+
+ if (Ctx.getTargetInfo().getCXXABI().isItaniumFamily())
+ if (!CD->getParent()->isAbstract())
+ Manglings.emplace_back(getMangledStructor(CD, Ctor_Complete));
+
+ if (Ctx.getTargetInfo().getCXXABI().isMicrosoft())
+ if (CD->hasAttr<DLLExportAttr>() && CD->isDefaultConstructor())
+ if (!(hasDefaultCXXMethodCC(Ctx, CD) && CD->getNumParams() == 0))
+ Manglings.emplace_back(getMangledStructor(CD, Ctor_DefaultClosure));
+ } else if (const auto *DD = dyn_cast_or_null<CXXDestructorDecl>(ND)) {
+ Manglings.emplace_back(getMangledStructor(DD, Dtor_Base));
+ if (Ctx.getTargetInfo().getCXXABI().isItaniumFamily()) {
+ Manglings.emplace_back(getMangledStructor(DD, Dtor_Complete));
+ if (DD->isVirtual())
+ Manglings.emplace_back(getMangledStructor(DD, Dtor_Deleting));
+ }
+ } else if (const auto *MD = dyn_cast_or_null<CXXMethodDecl>(ND)) {
+ Manglings.emplace_back(getName(ND));
+ if (MD->isVirtual())
+ if (const auto *TIV = Ctx.getVTableContext()->getThunkInfo(MD))
+ for (const auto &T : *TIV)
+ Manglings.emplace_back(getMangledThunk(MD, T));
+ }
+
+ return Manglings;
+ }
+
+private:
+ bool writeFuncOrVarName(const NamedDecl *D, raw_ostream &OS) {
+ if (MC->shouldMangleDeclName(D)) {
+ if (const auto *CtorD = dyn_cast<CXXConstructorDecl>(D))
+ MC->mangleCXXCtor(CtorD, Ctor_Complete, OS);
+ else if (const auto *DtorD = dyn_cast<CXXDestructorDecl>(D))
+ MC->mangleCXXDtor(DtorD, Dtor_Complete, OS);
+ else
+ MC->mangleName(D, OS);
+ return false;
+ } else {
+ IdentifierInfo *II = D->getIdentifier();
+ if (!II)
+ return true;
+ OS << II->getName();
+ return false;
+ }
+ }
+
+ void writeObjCClassName(const ObjCInterfaceDecl *D, raw_ostream &OS) {
+ OS << getClassSymbolPrefix(ObjCClass, D->getASTContext());
+ OS << D->getObjCRuntimeNameAsString();
+ }
+
+ std::string getMangledStructor(const NamedDecl *ND, unsigned StructorType) {
+ std::string FrontendBuf;
+ llvm::raw_string_ostream FOS(FrontendBuf);
+
+ if (const auto *CD = dyn_cast_or_null<CXXConstructorDecl>(ND))
+ MC->mangleCXXCtor(CD, static_cast<CXXCtorType>(StructorType), FOS);
+ else if (const auto *DD = dyn_cast_or_null<CXXDestructorDecl>(ND))
+ MC->mangleCXXDtor(DD, static_cast<CXXDtorType>(StructorType), FOS);
+
+ std::string BackendBuf;
+ llvm::raw_string_ostream BOS(BackendBuf);
+
+ llvm::Mangler::getNameWithPrefix(BOS, FOS.str(), DL);
+
+ return BOS.str();
+ }
+
+ std::string getMangledThunk(const CXXMethodDecl *MD, const ThunkInfo &T) {
+ std::string FrontendBuf;
+ llvm::raw_string_ostream FOS(FrontendBuf);
+
+ MC->mangleThunk(MD, T, FOS);
+
+ std::string BackendBuf;
+ llvm::raw_string_ostream BOS(BackendBuf);
+
+ llvm::Mangler::getNameWithPrefix(BOS, FOS.str(), DL);
+
+ return BOS.str();
+ }
+};
+
+ASTNameGenerator::ASTNameGenerator(ASTContext &Ctx)
+ : Impl(llvm::make_unique<Implementation>(Ctx)) {}
+
+ASTNameGenerator::~ASTNameGenerator() {}
+
+bool ASTNameGenerator::writeName(const Decl *D, raw_ostream &OS) {
+ return Impl->writeName(D, OS);
+}
+
+std::string ASTNameGenerator::getName(const Decl *D) {
+ return Impl->getName(D);
+}
+
+std::vector<std::string> ASTNameGenerator::getAllManglings(const Decl *D) {
+ return Impl->getAllManglings(D);
+}
diff --git a/lib/AST/MicrosoftCXXABI.cpp b/lib/AST/MicrosoftCXXABI.cpp
index 3b417c135285..4dc4156df9ca 100644
--- a/lib/AST/MicrosoftCXXABI.cpp
+++ b/lib/AST/MicrosoftCXXABI.cpp
@@ -1,9 +1,8 @@
//===------- MicrosoftCXXABI.cpp - AST support for the Microsoft C++ ABI --===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/AST/MicrosoftMangle.cpp b/lib/AST/MicrosoftMangle.cpp
index 92e9679e49aa..5e9358e24fc9 100644
--- a/lib/AST/MicrosoftMangle.cpp
+++ b/lib/AST/MicrosoftMangle.cpp
@@ -1,9 +1,8 @@
//===--- MicrosoftMangle.cpp - Microsoft Visual C++ Name Mangling ---------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -32,6 +31,7 @@
#include "llvm/Support/xxhash.h"
#include "llvm/Support/MD5.h"
#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/StringSaver.h"
using namespace clang;
@@ -97,7 +97,8 @@ static const DeclContext *getEffectiveDeclContext(const Decl *D) {
}
const DeclContext *DC = D->getDeclContext();
- if (isa<CapturedDecl>(DC) || isa<OMPDeclareReductionDecl>(DC)) {
+ if (isa<CapturedDecl>(DC) || isa<OMPDeclareReductionDecl>(DC) ||
+ isa<OMPDeclareMapperDecl>(DC)) {
return getEffectiveDeclContext(cast<Decl>(DC));
}
@@ -265,9 +266,15 @@ class MicrosoftCXXNameMangler {
BackRefVec NameBackReferences;
typedef llvm::DenseMap<const void *, unsigned> ArgBackRefMap;
- ArgBackRefMap TypeBackReferences;
+ ArgBackRefMap FunArgBackReferences;
+ ArgBackRefMap TemplateArgBackReferences;
+
+ typedef llvm::DenseMap<const void *, StringRef> TemplateArgStringMap;
+ TemplateArgStringMap TemplateArgStrings;
+ llvm::StringSaver TemplateArgStringStorage;
+ llvm::BumpPtrAllocator TemplateArgStringStorageAlloc;
- typedef std::set<int> PassObjectSizeArgsSet;
+ typedef std::set<std::pair<int, bool>> PassObjectSizeArgsSet;
PassObjectSizeArgsSet PassObjectSizeArgs;
ASTContext &getASTContext() const { return Context.getASTContext(); }
@@ -281,18 +288,21 @@ public:
MicrosoftCXXNameMangler(MicrosoftMangleContextImpl &C, raw_ostream &Out_)
: Context(C), Out(Out_), Structor(nullptr), StructorType(-1),
+ TemplateArgStringStorage(TemplateArgStringStorageAlloc),
PointersAre64Bit(C.getASTContext().getTargetInfo().getPointerWidth(0) ==
64) {}
MicrosoftCXXNameMangler(MicrosoftMangleContextImpl &C, raw_ostream &Out_,
const CXXConstructorDecl *D, CXXCtorType Type)
: Context(C), Out(Out_), Structor(getStructor(D)), StructorType(Type),
+ TemplateArgStringStorage(TemplateArgStringStorageAlloc),
PointersAre64Bit(C.getASTContext().getTargetInfo().getPointerWidth(0) ==
64) {}
MicrosoftCXXNameMangler(MicrosoftMangleContextImpl &C, raw_ostream &Out_,
const CXXDestructorDecl *D, CXXDtorType Type)
: Context(C), Out(Out_), Structor(getStructor(D)), StructorType(Type),
+ TemplateArgStringStorage(TemplateArgStringStorageAlloc),
PointersAre64Bit(C.getASTContext().getTargetInfo().getPointerWidth(0) ==
64) {}
@@ -343,7 +353,7 @@ private:
const TemplateArgumentList &TemplateArgs);
void mangleObjCMethodName(const ObjCMethodDecl *MD);
- void mangleArgumentType(QualType T, SourceRange Range);
+ void mangleFunctionArgumentType(QualType T, SourceRange Range);
void manglePassObjectSizeArg(const PassObjectSizeAttr *POSA);
bool isArtificialTagType(QualType T) const;
@@ -793,7 +803,7 @@ void MicrosoftCXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
// the X<Y> part is aliased. However, if you need to mangle
// void foo(A::X<A::Y>, A::X<B::Y>),
// the A::X<> part is not aliased.
- // That said, from the mangler's perspective we have a structure like this:
+ // That is, from the mangler's perspective we have a structure like this:
// namespace[s] -> type[ -> template-parameters]
// but from the Clang perspective we have
// type [ -> template-parameters]
@@ -803,12 +813,40 @@ void MicrosoftCXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
// the mangled type name as a key to check the mangling of different types
// for aliasing.
- llvm::SmallString<64> TemplateMangling;
- llvm::raw_svector_ostream Stream(TemplateMangling);
- MicrosoftCXXNameMangler Extra(Context, Stream);
- Extra.mangleTemplateInstantiationName(TD, *TemplateArgs);
-
- mangleSourceName(TemplateMangling);
+ // It's important to key cache reads off ND, not TD -- the same TD can
+ // be used with different TemplateArgs, but ND uniquely identifies
+ // TD / TemplateArg pairs.
+ ArgBackRefMap::iterator Found = TemplateArgBackReferences.find(ND);
+ if (Found == TemplateArgBackReferences.end()) {
+
+ TemplateArgStringMap::iterator Found = TemplateArgStrings.find(ND);
+ if (Found == TemplateArgStrings.end()) {
+ // Mangle full template name into temporary buffer.
+ llvm::SmallString<64> TemplateMangling;
+ llvm::raw_svector_ostream Stream(TemplateMangling);
+ MicrosoftCXXNameMangler Extra(Context, Stream);
+ Extra.mangleTemplateInstantiationName(TD, *TemplateArgs);
+
+ // Use the string backref vector to possibly get a back reference.
+ mangleSourceName(TemplateMangling);
+
+ // Memoize back reference for this type if one exist, else memoize
+ // the mangling itself.
+ BackRefVec::iterator StringFound =
+ llvm::find(NameBackReferences, TemplateMangling);
+ if (StringFound != NameBackReferences.end()) {
+ TemplateArgBackReferences[ND] =
+ StringFound - NameBackReferences.begin();
+ } else {
+ TemplateArgStrings[ND] =
+ TemplateArgStringStorage.save(TemplateMangling.str());
+ }
+ } else {
+ Out << Found->second; // Outputs a StringRef.
+ }
+ } else {
+ Out << Found->second; // Outputs a back reference (an int).
+ }
return;
}
@@ -1242,15 +1280,8 @@ void MicrosoftCXXNameMangler::mangleOperatorName(OverloadedOperatorKind OO,
case OO_Array_Delete: Out << "?_V"; break;
// <operator-name> ::= ?__L # co_await
case OO_Coawait: Out << "?__L"; break;
-
- case OO_Spaceship: {
- // FIXME: Once MS picks a mangling, use it.
- DiagnosticsEngine &Diags = Context.getDiags();
- unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
- "cannot mangle this three-way comparison operator yet");
- Diags.Report(Loc, DiagID);
- break;
- }
+ // <operator-name> ::= ?__M # <=>
+ case OO_Spaceship: Out << "?__M"; break;
case OO_Conditional: {
DiagnosticsEngine &Diags = Context.getDiags();
@@ -1268,8 +1299,7 @@ void MicrosoftCXXNameMangler::mangleOperatorName(OverloadedOperatorKind OO,
void MicrosoftCXXNameMangler::mangleSourceName(StringRef Name) {
// <source name> ::= <identifier> @
- BackRefVec::iterator Found =
- std::find(NameBackReferences.begin(), NameBackReferences.end(), Name);
+ BackRefVec::iterator Found = llvm::find(NameBackReferences, Name);
if (Found == NameBackReferences.end()) {
if (NameBackReferences.size() < 10)
NameBackReferences.push_back(Name);
@@ -1290,11 +1320,13 @@ void MicrosoftCXXNameMangler::mangleTemplateInstantiationName(
// Always start with the unqualified name.
// Templates have their own context for back references.
- ArgBackRefMap OuterArgsContext;
+ ArgBackRefMap OuterFunArgsContext;
+ ArgBackRefMap OuterTemplateArgsContext;
BackRefVec OuterTemplateContext;
PassObjectSizeArgsSet OuterPassObjectSizeArgs;
NameBackReferences.swap(OuterTemplateContext);
- TypeBackReferences.swap(OuterArgsContext);
+ FunArgBackReferences.swap(OuterFunArgsContext);
+ TemplateArgBackReferences.swap(OuterTemplateArgsContext);
PassObjectSizeArgs.swap(OuterPassObjectSizeArgs);
mangleUnscopedTemplateName(TD);
@@ -1302,7 +1334,8 @@ void MicrosoftCXXNameMangler::mangleTemplateInstantiationName(
// Restore the previous back reference contexts.
NameBackReferences.swap(OuterTemplateContext);
- TypeBackReferences.swap(OuterArgsContext);
+ FunArgBackReferences.swap(OuterFunArgsContext);
+ TemplateArgBackReferences.swap(OuterTemplateArgsContext);
PassObjectSizeArgs.swap(OuterPassObjectSizeArgs);
}
@@ -1707,8 +1740,8 @@ void MicrosoftCXXNameMangler::manglePointerCVQualifiers(Qualifiers Quals) {
}
}
-void MicrosoftCXXNameMangler::mangleArgumentType(QualType T,
- SourceRange Range) {
+void MicrosoftCXXNameMangler::mangleFunctionArgumentType(QualType T,
+ SourceRange Range) {
// MSVC will backreference two canonically equivalent types that have slightly
// different manglings when mangled alone.
@@ -1738,9 +1771,9 @@ void MicrosoftCXXNameMangler::mangleArgumentType(QualType T,
TypePtr = T.getCanonicalType().getAsOpaquePtr();
}
- ArgBackRefMap::iterator Found = TypeBackReferences.find(TypePtr);
+ ArgBackRefMap::iterator Found = FunArgBackReferences.find(TypePtr);
- if (Found == TypeBackReferences.end()) {
+ if (Found == FunArgBackReferences.end()) {
size_t OutSizeBefore = Out.tell();
mangleType(T, Range, QMM_Drop);
@@ -1749,9 +1782,9 @@ void MicrosoftCXXNameMangler::mangleArgumentType(QualType T,
// Only types longer than 1 character are considered
// and only 10 back references slots are available:
bool LongerThanOneChar = (Out.tell() - OutSizeBefore > 1);
- if (LongerThanOneChar && TypeBackReferences.size() < 10) {
- size_t Size = TypeBackReferences.size();
- TypeBackReferences[TypePtr] = Size;
+ if (LongerThanOneChar && FunArgBackReferences.size() < 10) {
+ size_t Size = FunArgBackReferences.size();
+ FunArgBackReferences[TypePtr] = Size;
}
} else {
Out << Found->second;
@@ -1761,18 +1794,20 @@ void MicrosoftCXXNameMangler::mangleArgumentType(QualType T,
void MicrosoftCXXNameMangler::manglePassObjectSizeArg(
const PassObjectSizeAttr *POSA) {
int Type = POSA->getType();
+ bool Dynamic = POSA->isDynamic();
- auto Iter = PassObjectSizeArgs.insert(Type).first;
+ auto Iter = PassObjectSizeArgs.insert({Type, Dynamic}).first;
auto *TypePtr = (const void *)&*Iter;
- ArgBackRefMap::iterator Found = TypeBackReferences.find(TypePtr);
+ ArgBackRefMap::iterator Found = FunArgBackReferences.find(TypePtr);
- if (Found == TypeBackReferences.end()) {
- mangleArtificialTagType(TTK_Enum, "__pass_object_size" + llvm::utostr(Type),
- {"__clang"});
+ if (Found == FunArgBackReferences.end()) {
+ std::string Name =
+ Dynamic ? "__pass_dynamic_object_size" : "__pass_object_size";
+ mangleArtificialTagType(TTK_Enum, Name + llvm::utostr(Type), {"__clang"});
- if (TypeBackReferences.size() < 10) {
- size_t Size = TypeBackReferences.size();
- TypeBackReferences[TypePtr] = Size;
+ if (FunArgBackReferences.size() < 10) {
+ size_t Size = FunArgBackReferences.size();
+ FunArgBackReferences[TypePtr] = Size;
}
} else {
Out << Found->second;
@@ -1937,8 +1972,9 @@ void MicrosoftCXXNameMangler::mangleType(const BuiltinType *T, Qualifiers,
// ::= _M # unsigned __int128
// ::= _N # bool
// _O # <array in parameter>
- // ::= _T # __float80 (Intel)
+ // ::= _Q # char8_t
// ::= _S # char16_t
+ // ::= _T # __float80 (Intel)
// ::= _U # char32_t
// ::= _W # wchar_t
// ::= _Z # __float80 (Digital Mars)
@@ -1999,6 +2035,9 @@ void MicrosoftCXXNameMangler::mangleType(const BuiltinType *T, Qualifiers,
case BuiltinType::Bool:
Out << "_N";
break;
+ case BuiltinType::Char8:
+ Out << "_Q";
+ break;
case BuiltinType::Char16:
Out << "_S";
break;
@@ -2094,7 +2133,6 @@ void MicrosoftCXXNameMangler::mangleType(const BuiltinType *T, Qualifiers,
case BuiltinType::SatUShortFract:
case BuiltinType::SatUFract:
case BuiltinType::SatULongFract:
- case BuiltinType::Char8:
case BuiltinType::Float128: {
DiagnosticsEngine &Diags = Context.getDiags();
unsigned DiagID = Diags.getCustomDiagID(
@@ -2112,7 +2150,7 @@ void MicrosoftCXXNameMangler::mangleType(const FunctionProtoType *T, Qualifiers,
// Structors only appear in decls, so at this point we know it's not a
// structor type.
// FIXME: This may not be lambda-friendly.
- if (T->getTypeQuals() || T->getRefQualifier() != RQ_None) {
+ if (T->getMethodQuals() || T->getRefQualifier() != RQ_None) {
Out << "$$A8@@";
mangleFunctionType(T, /*D=*/nullptr, /*ForceThisQuals=*/true);
} else {
@@ -2161,7 +2199,7 @@ void MicrosoftCXXNameMangler::mangleFunctionType(const FunctionType *T,
// If this is a C++ instance method, mangle the CVR qualifiers for the
// this pointer.
if (HasThisQuals) {
- Qualifiers Quals = Proto->getTypeQuals();
+ Qualifiers Quals = Proto->getMethodQuals();
manglePointerExtQualifiers(Quals, /*PointeeType=*/QualType());
mangleRefQualifier(Proto->getRefQualifier());
mangleQualifiers(Quals, /*IsMember=*/false);
@@ -2195,12 +2233,12 @@ void MicrosoftCXXNameMangler::mangleFunctionType(const FunctionType *T,
Out << 'X';
} else if (StructorType == Ctor_CopyingClosure) {
// Copy constructor closure always takes an unqualified reference.
- mangleArgumentType(getASTContext().getLValueReferenceType(
- Proto->getParamType(0)
- ->getAs<LValueReferenceType>()
- ->getPointeeType(),
- /*SpelledAsLValue=*/true),
- Range);
+ mangleFunctionArgumentType(getASTContext().getLValueReferenceType(
+ Proto->getParamType(0)
+ ->getAs<LValueReferenceType>()
+ ->getPointeeType(),
+ /*SpelledAsLValue=*/true),
+ Range);
Out << '@';
} else {
llvm_unreachable("unexpected constructor closure!");
@@ -2242,7 +2280,7 @@ void MicrosoftCXXNameMangler::mangleFunctionType(const FunctionType *T,
} else {
// Happens for function pointer type arguments for example.
for (unsigned I = 0, E = Proto->getNumParams(); I != E; ++I) {
- mangleArgumentType(Proto->getParamType(I), Range);
+ mangleFunctionArgumentType(Proto->getParamType(I), Range);
// Mangle each pass_object_size parameter as if it's a parameter of enum
// type passed directly after the parameter with the pass_object_size
// attribute. The aforementioned enum's name is __pass_object_size, and we
@@ -2734,10 +2772,12 @@ void MicrosoftCXXNameMangler::mangleType(const ObjCObjectType *T,
if (T->qual_empty() && !T->isSpecialized())
return mangleType(T->getBaseType(), Range, QMM_Drop);
- ArgBackRefMap OuterArgsContext;
+ ArgBackRefMap OuterFunArgsContext;
+ ArgBackRefMap OuterTemplateArgsContext;
BackRefVec OuterTemplateContext;
- TypeBackReferences.swap(OuterArgsContext);
+ FunArgBackReferences.swap(OuterFunArgsContext);
+ TemplateArgBackReferences.swap(OuterTemplateArgsContext);
NameBackReferences.swap(OuterTemplateContext);
mangleTagTypeKind(TTK_Struct);
@@ -2761,7 +2801,8 @@ void MicrosoftCXXNameMangler::mangleType(const ObjCObjectType *T,
Out << '@';
- TypeBackReferences.swap(OuterArgsContext);
+ FunArgBackReferences.swap(OuterFunArgsContext);
+ TemplateArgBackReferences.swap(OuterTemplateArgsContext);
NameBackReferences.swap(OuterTemplateContext);
}
@@ -3154,12 +3195,18 @@ void MicrosoftMangleContextImpl::mangleCXXCatchableType(
}
Mangler.getStream() << RTTIMangling;
- // VS2015 CTP6 omits the copy-constructor in the mangled name. This name is,
- // in fact, superfluous but I'm not sure the change was made consciously.
+ // VS2015 and VS2017.1 omit the copy-constructor in the mangled name but
+ // both older and newer versions include it.
+ // FIXME: It is known that the Ctor is present in 2013, and in 2017.7
+ // (_MSC_VER 1914) and newer, and that it's omitted in 2015 and 2017.4
+ // (_MSC_VER 1911), but it's unknown when exactly it reappeared (1914?
+ // Or 1912, 1913 aleady?).
+ bool OmitCopyCtor = getASTContext().getLangOpts().isCompatibleWithMSVC(
+ LangOptions::MSVC2015) &&
+ !getASTContext().getLangOpts().isCompatibleWithMSVC(
+ LangOptions::MSVC2017_7);
llvm::SmallString<64> CopyCtorMangling;
- if (!getASTContext().getLangOpts().isCompatibleWithMSVC(
- LangOptions::MSVC2015) &&
- CD) {
+ if (!OmitCopyCtor && CD) {
llvm::raw_svector_ostream Stream(CopyCtorMangling);
msvc_hashing_ostream MHO(Stream);
mangleCXXCtor(CD, CT, MHO);
@@ -3457,8 +3504,7 @@ void MicrosoftMangleContextImpl::mangleStringLiteral(const StringLiteral *SL,
} else {
const char SpecialChars[] = {',', '/', '\\', ':', '.',
' ', '\n', '\t', '\'', '-'};
- const char *Pos =
- std::find(std::begin(SpecialChars), std::end(SpecialChars), Byte);
+ const char *Pos = llvm::find(SpecialChars, Byte);
if (Pos != std::end(SpecialChars)) {
Mangler.getStream() << '?' << (Pos - std::begin(SpecialChars));
} else {
diff --git a/lib/AST/NSAPI.cpp b/lib/AST/NSAPI.cpp
index 5b8300893e2d..5104dc59d621 100644
--- a/lib/AST/NSAPI.cpp
+++ b/lib/AST/NSAPI.cpp
@@ -1,9 +1,8 @@
//===--- NSAPI.cpp - NSFoundation APIs ------------------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/AST/NestedNameSpecifier.cpp b/lib/AST/NestedNameSpecifier.cpp
index 42f6a133d717..09d85102585b 100644
--- a/lib/AST/NestedNameSpecifier.cpp
+++ b/lib/AST/NestedNameSpecifier.cpp
@@ -1,9 +1,8 @@
//===- NestedNameSpecifier.cpp - C++ nested name specifiers ---------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/AST/ODRHash.cpp b/lib/AST/ODRHash.cpp
index a4c344ce0a9b..3b89c630b451 100644
--- a/lib/AST/ODRHash.cpp
+++ b/lib/AST/ODRHash.cpp
@@ -1,9 +1,8 @@
//===-- ODRHash.cpp - Hashing to diagnose ODR failures ----------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
@@ -72,8 +71,13 @@ void ODRHash::AddDeclarationNameImpl(DeclarationName Name) {
AddBoolean(S.isKeywordSelector());
AddBoolean(S.isUnarySelector());
unsigned NumArgs = S.getNumArgs();
+ ID.AddInteger(NumArgs);
for (unsigned i = 0; i < NumArgs; ++i) {
- AddIdentifierInfo(S.getIdentifierInfoForSlot(i));
+ const IdentifierInfo *II = S.getIdentifierInfoForSlot(i);
+ AddBoolean(II);
+ if (II) {
+ AddIdentifierInfo(II);
+ }
}
break;
}
@@ -141,6 +145,7 @@ void ODRHash::AddTemplateName(TemplateName Name) {
break;
// TODO: Support these cases.
case TemplateName::OverloadedTemplate:
+ case TemplateName::AssumedTemplate:
case TemplateName::QualifiedTemplate:
case TemplateName::DependentTemplate:
case TemplateName::SubstTemplateTemplateParm:
@@ -696,7 +701,52 @@ public:
ID.AddInteger(Quals.getAsOpaqueValue());
}
+ // Return the RecordType if the typedef only strips away a keyword.
+ // Otherwise, return the original type.
+ static const Type *RemoveTypedef(const Type *T) {
+ const auto *TypedefT = dyn_cast<TypedefType>(T);
+ if (!TypedefT) {
+ return T;
+ }
+
+ const TypedefNameDecl *D = TypedefT->getDecl();
+ QualType UnderlyingType = D->getUnderlyingType();
+
+ if (UnderlyingType.hasLocalQualifiers()) {
+ return T;
+ }
+
+ const auto *ElaboratedT = dyn_cast<ElaboratedType>(UnderlyingType);
+ if (!ElaboratedT) {
+ return T;
+ }
+
+ if (ElaboratedT->getQualifier() != nullptr) {
+ return T;
+ }
+
+ QualType NamedType = ElaboratedT->getNamedType();
+ if (NamedType.hasLocalQualifiers()) {
+ return T;
+ }
+
+ const auto *RecordT = dyn_cast<RecordType>(NamedType);
+ if (!RecordT) {
+ return T;
+ }
+
+ const IdentifierInfo *TypedefII = TypedefT->getDecl()->getIdentifier();
+ const IdentifierInfo *RecordII = RecordT->getDecl()->getIdentifier();
+ if (!TypedefII || !RecordII ||
+ TypedefII->getName() != RecordII->getName()) {
+ return T;
+ }
+
+ return RecordT;
+ }
+
void Visit(const Type *T) {
+ T = RemoveTypedef(T);
ID.AddInteger(T->getTypeClass());
Inherited::Visit(T);
}
@@ -704,14 +754,36 @@ public:
void VisitType(const Type *T) {}
void VisitAdjustedType(const AdjustedType *T) {
- AddQualType(T->getOriginalType());
- AddQualType(T->getAdjustedType());
+ QualType Original = T->getOriginalType();
+ QualType Adjusted = T->getAdjustedType();
+
+ // The original type and pointee type can be the same, as in the case of
+ // function pointers decaying to themselves. Set a bool and only process
+ // the type once, to prevent doubling the work.
+ SplitQualType split = Adjusted.split();
+ if (auto Pointer = dyn_cast<PointerType>(split.Ty)) {
+ if (Pointer->getPointeeType() == Original) {
+ Hash.AddBoolean(true);
+ ID.AddInteger(split.Quals.getAsOpaqueValue());
+ AddQualType(Original);
+ VisitType(T);
+ return;
+ }
+ }
+
+ // The original type and pointee type are different, such as in the case
+ // of a array decaying to an element pointer. Set a bool to false and
+ // process both types.
+ Hash.AddBoolean(false);
+ AddQualType(Original);
+ AddQualType(Adjusted);
+
VisitType(T);
}
void VisitDecayedType(const DecayedType *T) {
- AddQualType(T->getDecayedType());
- AddQualType(T->getPointeeType());
+ // getDecayedType and getPointeeType are derived from getAdjustedType
+ // and don't need to be separately processed.
VisitAdjustedType(T);
}
diff --git a/lib/AST/OpenMPClause.cpp b/lib/AST/OpenMPClause.cpp
index 76098f15bf36..9d8a7ebc3023 100644
--- a/lib/AST/OpenMPClause.cpp
+++ b/lib/AST/OpenMPClause.cpp
@@ -1,9 +1,8 @@
//===- OpenMPClause.cpp - Classes for OpenMP clauses ----------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -36,6 +35,20 @@ OMPClause::child_range OMPClause::children() {
llvm_unreachable("unknown OMPClause");
}
+OMPClause::child_range OMPClause::used_children() {
+ switch (getClauseKind()) {
+#define OPENMP_CLAUSE(Name, Class) \
+ case OMPC_##Name: \
+ return static_cast<Class *>(this)->used_children();
+#include "clang/Basic/OpenMPKinds.def"
+ case OMPC_threadprivate:
+ case OMPC_uniform:
+ case OMPC_unknown:
+ break;
+ }
+ llvm_unreachable("unknown OMPClause");
+}
+
OMPClauseWithPreInit *OMPClauseWithPreInit::get(OMPClause *C) {
auto *Res = OMPClauseWithPreInit::get(const_cast<const OMPClause *>(C));
return Res ? const_cast<OMPClauseWithPreInit *>(Res) : nullptr;
@@ -74,6 +87,8 @@ const OMPClauseWithPreInit *OMPClauseWithPreInit::get(const OMPClause *C) {
case OMPC_final:
case OMPC_safelen:
case OMPC_simdlen:
+ case OMPC_allocator:
+ case OMPC_allocate:
case OMPC_collapse:
case OMPC_private:
case OMPC_shared:
@@ -145,6 +160,8 @@ const OMPClauseWithPostUpdate *OMPClauseWithPostUpdate::get(const OMPClause *C)
case OMPC_num_threads:
case OMPC_safelen:
case OMPC_simdlen:
+ case OMPC_allocator:
+ case OMPC_allocate:
case OMPC_collapse:
case OMPC_private:
case OMPC_shared:
@@ -192,6 +209,25 @@ const OMPClauseWithPostUpdate *OMPClauseWithPostUpdate::get(const OMPClause *C)
return nullptr;
}
+/// Gets the address of the original, non-captured, expression used in the
+/// clause as the preinitializer.
+static Stmt **getAddrOfExprAsWritten(Stmt *S) {
+ if (!S)
+ return nullptr;
+ if (auto *DS = dyn_cast<DeclStmt>(S)) {
+ assert(DS->isSingleDecl() && "Only single expression must be captured.");
+ if (auto *OED = dyn_cast<OMPCapturedExprDecl>(DS->getSingleDecl()))
+ return OED->getInitAddress();
+ }
+ return nullptr;
+}
+
+OMPClause::child_range OMPIfClause::used_children() {
+ if (Stmt **C = getAddrOfExprAsWritten(getPreInitStmt()))
+ return child_range(C, C + 1);
+ return child_range(&Condition, &Condition + 1);
+}
+
OMPOrderedClause *OMPOrderedClause::Create(const ASTContext &C, Expr *Num,
unsigned NumLoops,
SourceLocation StartLoc,
@@ -698,6 +734,25 @@ OMPInReductionClause *OMPInReductionClause::CreateEmpty(const ASTContext &C,
return new (Mem) OMPInReductionClause(N);
}
+OMPAllocateClause *
+OMPAllocateClause::Create(const ASTContext &C, SourceLocation StartLoc,
+ SourceLocation LParenLoc, Expr *Allocator,
+ SourceLocation ColonLoc, SourceLocation EndLoc,
+ ArrayRef<Expr *> VL) {
+ // Allocate space for private variables and initializer expressions.
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(VL.size()));
+ auto *Clause = new (Mem) OMPAllocateClause(StartLoc, LParenLoc, Allocator,
+ ColonLoc, EndLoc, VL.size());
+ Clause->setVarRefs(VL);
+ return Clause;
+}
+
+OMPAllocateClause *OMPAllocateClause::CreateEmpty(const ASTContext &C,
+ unsigned N) {
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(N));
+ return new (Mem) OMPAllocateClause(N);
+}
+
OMPFlushClause *OMPFlushClause::Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation LParenLoc,
@@ -791,24 +846,23 @@ unsigned OMPClauseMappableExprCommon::getUniqueDeclarationsTotalNumber(
return TotalNum;
}
-OMPMapClause *
-OMPMapClause::Create(const ASTContext &C, SourceLocation StartLoc,
- SourceLocation LParenLoc, SourceLocation EndLoc,
- ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations,
- MappableExprComponentListsRef ComponentLists,
- ArrayRef<OpenMPMapModifierKind> MapModifiers,
- ArrayRef<SourceLocation> MapModifiersLoc,
- OpenMPMapClauseKind Type, bool TypeIsImplicit,
- SourceLocation TypeLoc) {
- unsigned NumVars = Vars.size();
- unsigned NumUniqueDeclarations =
- getUniqueDeclarationsTotalNumber(Declarations);
- unsigned NumComponentLists = ComponentLists.size();
- unsigned NumComponents = getComponentsTotalNumber(ComponentLists);
+OMPMapClause *OMPMapClause::Create(
+ const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars,
+ ArrayRef<ValueDecl *> Declarations,
+ MappableExprComponentListsRef ComponentLists, ArrayRef<Expr *> UDMapperRefs,
+ ArrayRef<OpenMPMapModifierKind> MapModifiers,
+ ArrayRef<SourceLocation> MapModifiersLoc,
+ NestedNameSpecifierLoc UDMQualifierLoc, DeclarationNameInfo MapperId,
+ OpenMPMapClauseKind Type, bool TypeIsImplicit, SourceLocation TypeLoc) {
+ OMPMappableExprListSizeTy Sizes;
+ Sizes.NumVars = Vars.size();
+ Sizes.NumUniqueDeclarations = getUniqueDeclarationsTotalNumber(Declarations);
+ Sizes.NumComponentLists = ComponentLists.size();
+ Sizes.NumComponents = getComponentsTotalNumber(ComponentLists);
// We need to allocate:
- // NumVars x Expr* - we have an original list expression for each clause list
- // entry.
+ // 2 x NumVars x Expr* - we have an original list expression and an associated
+ // user-defined mapper for each clause list entry.
// NumUniqueDeclarations x ValueDecl* - unique base declarations associated
// with each component list.
// (NumUniqueDeclarations + NumComponentLists) x unsigned - we specify the
@@ -819,47 +873,47 @@ OMPMapClause::Create(const ASTContext &C, SourceLocation StartLoc,
void *Mem = C.Allocate(
totalSizeToAlloc<Expr *, ValueDecl *, unsigned,
OMPClauseMappableExprCommon::MappableComponent>(
- NumVars, NumUniqueDeclarations,
- NumUniqueDeclarations + NumComponentLists, NumComponents));
- OMPMapClause *Clause = new (Mem) OMPMapClause(
- MapModifiers, MapModifiersLoc, Type, TypeIsImplicit, TypeLoc, StartLoc,
- LParenLoc, EndLoc, NumVars, NumUniqueDeclarations, NumComponentLists,
- NumComponents);
+ 2 * Sizes.NumVars, Sizes.NumUniqueDeclarations,
+ Sizes.NumUniqueDeclarations + Sizes.NumComponentLists,
+ Sizes.NumComponents));
+ OMPMapClause *Clause = new (Mem)
+ OMPMapClause(MapModifiers, MapModifiersLoc, UDMQualifierLoc, MapperId,
+ Type, TypeIsImplicit, TypeLoc, Locs, Sizes);
Clause->setVarRefs(Vars);
+ Clause->setUDMapperRefs(UDMapperRefs);
Clause->setClauseInfo(Declarations, ComponentLists);
Clause->setMapType(Type);
Clause->setMapLoc(TypeLoc);
return Clause;
}
-OMPMapClause *OMPMapClause::CreateEmpty(const ASTContext &C, unsigned NumVars,
- unsigned NumUniqueDeclarations,
- unsigned NumComponentLists,
- unsigned NumComponents) {
+OMPMapClause *
+OMPMapClause::CreateEmpty(const ASTContext &C,
+ const OMPMappableExprListSizeTy &Sizes) {
void *Mem = C.Allocate(
totalSizeToAlloc<Expr *, ValueDecl *, unsigned,
OMPClauseMappableExprCommon::MappableComponent>(
- NumVars, NumUniqueDeclarations,
- NumUniqueDeclarations + NumComponentLists, NumComponents));
- return new (Mem) OMPMapClause(NumVars, NumUniqueDeclarations,
- NumComponentLists, NumComponents);
-}
-
-OMPToClause *OMPToClause::Create(const ASTContext &C, SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation EndLoc, ArrayRef<Expr *> Vars,
- ArrayRef<ValueDecl *> Declarations,
- MappableExprComponentListsRef ComponentLists) {
- unsigned NumVars = Vars.size();
- unsigned NumUniqueDeclarations =
- getUniqueDeclarationsTotalNumber(Declarations);
- unsigned NumComponentLists = ComponentLists.size();
- unsigned NumComponents = getComponentsTotalNumber(ComponentLists);
+ 2 * Sizes.NumVars, Sizes.NumUniqueDeclarations,
+ Sizes.NumUniqueDeclarations + Sizes.NumComponentLists,
+ Sizes.NumComponents));
+ return new (Mem) OMPMapClause(Sizes);
+}
+
+OMPToClause *OMPToClause::Create(
+ const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars,
+ ArrayRef<ValueDecl *> Declarations,
+ MappableExprComponentListsRef ComponentLists, ArrayRef<Expr *> UDMapperRefs,
+ NestedNameSpecifierLoc UDMQualifierLoc, DeclarationNameInfo MapperId) {
+ OMPMappableExprListSizeTy Sizes;
+ Sizes.NumVars = Vars.size();
+ Sizes.NumUniqueDeclarations = getUniqueDeclarationsTotalNumber(Declarations);
+ Sizes.NumComponentLists = ComponentLists.size();
+ Sizes.NumComponents = getComponentsTotalNumber(ComponentLists);
// We need to allocate:
- // NumVars x Expr* - we have an original list expression for each clause list
- // entry.
+ // 2 x NumVars x Expr* - we have an original list expression and an associated
+ // user-defined mapper for each clause list entry.
// NumUniqueDeclarations x ValueDecl* - unique base declarations associated
// with each component list.
// (NumUniqueDeclarations + NumComponentLists) x unsigned - we specify the
@@ -870,45 +924,43 @@ OMPToClause *OMPToClause::Create(const ASTContext &C, SourceLocation StartLoc,
void *Mem = C.Allocate(
totalSizeToAlloc<Expr *, ValueDecl *, unsigned,
OMPClauseMappableExprCommon::MappableComponent>(
- NumVars, NumUniqueDeclarations,
- NumUniqueDeclarations + NumComponentLists, NumComponents));
+ 2 * Sizes.NumVars, Sizes.NumUniqueDeclarations,
+ Sizes.NumUniqueDeclarations + Sizes.NumComponentLists,
+ Sizes.NumComponents));
- OMPToClause *Clause = new (Mem)
- OMPToClause(StartLoc, LParenLoc, EndLoc, NumVars, NumUniqueDeclarations,
- NumComponentLists, NumComponents);
+ auto *Clause = new (Mem) OMPToClause(UDMQualifierLoc, MapperId, Locs, Sizes);
Clause->setVarRefs(Vars);
+ Clause->setUDMapperRefs(UDMapperRefs);
Clause->setClauseInfo(Declarations, ComponentLists);
return Clause;
}
-OMPToClause *OMPToClause::CreateEmpty(const ASTContext &C, unsigned NumVars,
- unsigned NumUniqueDeclarations,
- unsigned NumComponentLists,
- unsigned NumComponents) {
+OMPToClause *OMPToClause::CreateEmpty(const ASTContext &C,
+ const OMPMappableExprListSizeTy &Sizes) {
void *Mem = C.Allocate(
totalSizeToAlloc<Expr *, ValueDecl *, unsigned,
OMPClauseMappableExprCommon::MappableComponent>(
- NumVars, NumUniqueDeclarations,
- NumUniqueDeclarations + NumComponentLists, NumComponents));
- return new (Mem) OMPToClause(NumVars, NumUniqueDeclarations,
- NumComponentLists, NumComponents);
-}
-
-OMPFromClause *
-OMPFromClause::Create(const ASTContext &C, SourceLocation StartLoc,
- SourceLocation LParenLoc, SourceLocation EndLoc,
- ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations,
- MappableExprComponentListsRef ComponentLists) {
- unsigned NumVars = Vars.size();
- unsigned NumUniqueDeclarations =
- getUniqueDeclarationsTotalNumber(Declarations);
- unsigned NumComponentLists = ComponentLists.size();
- unsigned NumComponents = getComponentsTotalNumber(ComponentLists);
+ 2 * Sizes.NumVars, Sizes.NumUniqueDeclarations,
+ Sizes.NumUniqueDeclarations + Sizes.NumComponentLists,
+ Sizes.NumComponents));
+ return new (Mem) OMPToClause(Sizes);
+}
+
+OMPFromClause *OMPFromClause::Create(
+ const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars,
+ ArrayRef<ValueDecl *> Declarations,
+ MappableExprComponentListsRef ComponentLists, ArrayRef<Expr *> UDMapperRefs,
+ NestedNameSpecifierLoc UDMQualifierLoc, DeclarationNameInfo MapperId) {
+ OMPMappableExprListSizeTy Sizes;
+ Sizes.NumVars = Vars.size();
+ Sizes.NumUniqueDeclarations = getUniqueDeclarationsTotalNumber(Declarations);
+ Sizes.NumComponentLists = ComponentLists.size();
+ Sizes.NumComponents = getComponentsTotalNumber(ComponentLists);
// We need to allocate:
- // NumVars x Expr* - we have an original list expression for each clause list
- // entry.
+ // 2 x NumVars x Expr* - we have an original list expression and an associated
+ // user-defined mapper for each clause list entry.
// NumUniqueDeclarations x ValueDecl* - unique base declarations associated
// with each component list.
// (NumUniqueDeclarations + NumComponentLists) x unsigned - we specify the
@@ -919,29 +971,29 @@ OMPFromClause::Create(const ASTContext &C, SourceLocation StartLoc,
void *Mem = C.Allocate(
totalSizeToAlloc<Expr *, ValueDecl *, unsigned,
OMPClauseMappableExprCommon::MappableComponent>(
- NumVars, NumUniqueDeclarations,
- NumUniqueDeclarations + NumComponentLists, NumComponents));
+ 2 * Sizes.NumVars, Sizes.NumUniqueDeclarations,
+ Sizes.NumUniqueDeclarations + Sizes.NumComponentLists,
+ Sizes.NumComponents));
- OMPFromClause *Clause = new (Mem)
- OMPFromClause(StartLoc, LParenLoc, EndLoc, NumVars, NumUniqueDeclarations,
- NumComponentLists, NumComponents);
+ auto *Clause =
+ new (Mem) OMPFromClause(UDMQualifierLoc, MapperId, Locs, Sizes);
Clause->setVarRefs(Vars);
+ Clause->setUDMapperRefs(UDMapperRefs);
Clause->setClauseInfo(Declarations, ComponentLists);
return Clause;
}
-OMPFromClause *OMPFromClause::CreateEmpty(const ASTContext &C, unsigned NumVars,
- unsigned NumUniqueDeclarations,
- unsigned NumComponentLists,
- unsigned NumComponents) {
+OMPFromClause *
+OMPFromClause::CreateEmpty(const ASTContext &C,
+ const OMPMappableExprListSizeTy &Sizes) {
void *Mem = C.Allocate(
totalSizeToAlloc<Expr *, ValueDecl *, unsigned,
OMPClauseMappableExprCommon::MappableComponent>(
- NumVars, NumUniqueDeclarations,
- NumUniqueDeclarations + NumComponentLists, NumComponents));
- return new (Mem) OMPFromClause(NumVars, NumUniqueDeclarations,
- NumComponentLists, NumComponents);
+ 2 * Sizes.NumVars, Sizes.NumUniqueDeclarations,
+ Sizes.NumUniqueDeclarations + Sizes.NumComponentLists,
+ Sizes.NumComponents));
+ return new (Mem) OMPFromClause(Sizes);
}
void OMPUseDevicePtrClause::setPrivateCopies(ArrayRef<Expr *> VL) {
@@ -957,15 +1009,15 @@ void OMPUseDevicePtrClause::setInits(ArrayRef<Expr *> VL) {
}
OMPUseDevicePtrClause *OMPUseDevicePtrClause::Create(
- const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
- SourceLocation EndLoc, ArrayRef<Expr *> Vars, ArrayRef<Expr *> PrivateVars,
- ArrayRef<Expr *> Inits, ArrayRef<ValueDecl *> Declarations,
+ const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars,
+ ArrayRef<Expr *> PrivateVars, ArrayRef<Expr *> Inits,
+ ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists) {
- unsigned NumVars = Vars.size();
- unsigned NumUniqueDeclarations =
- getUniqueDeclarationsTotalNumber(Declarations);
- unsigned NumComponentLists = ComponentLists.size();
- unsigned NumComponents = getComponentsTotalNumber(ComponentLists);
+ OMPMappableExprListSizeTy Sizes;
+ Sizes.NumVars = Vars.size();
+ Sizes.NumUniqueDeclarations = getUniqueDeclarationsTotalNumber(Declarations);
+ Sizes.NumComponentLists = ComponentLists.size();
+ Sizes.NumComponents = getComponentsTotalNumber(ComponentLists);
// We need to allocate:
// 3 x NumVars x Expr* - we have an original list expression for each clause
@@ -980,12 +1032,11 @@ OMPUseDevicePtrClause *OMPUseDevicePtrClause::Create(
void *Mem = C.Allocate(
totalSizeToAlloc<Expr *, ValueDecl *, unsigned,
OMPClauseMappableExprCommon::MappableComponent>(
- 3 * NumVars, NumUniqueDeclarations,
- NumUniqueDeclarations + NumComponentLists, NumComponents));
+ 3 * Sizes.NumVars, Sizes.NumUniqueDeclarations,
+ Sizes.NumUniqueDeclarations + Sizes.NumComponentLists,
+ Sizes.NumComponents));
- OMPUseDevicePtrClause *Clause = new (Mem) OMPUseDevicePtrClause(
- StartLoc, LParenLoc, EndLoc, NumVars, NumUniqueDeclarations,
- NumComponentLists, NumComponents);
+ OMPUseDevicePtrClause *Clause = new (Mem) OMPUseDevicePtrClause(Locs, Sizes);
Clause->setVarRefs(Vars);
Clause->setPrivateCopies(PrivateVars);
@@ -994,29 +1045,28 @@ OMPUseDevicePtrClause *OMPUseDevicePtrClause::Create(
return Clause;
}
-OMPUseDevicePtrClause *OMPUseDevicePtrClause::CreateEmpty(
- const ASTContext &C, unsigned NumVars, unsigned NumUniqueDeclarations,
- unsigned NumComponentLists, unsigned NumComponents) {
+OMPUseDevicePtrClause *
+OMPUseDevicePtrClause::CreateEmpty(const ASTContext &C,
+ const OMPMappableExprListSizeTy &Sizes) {
void *Mem = C.Allocate(
totalSizeToAlloc<Expr *, ValueDecl *, unsigned,
OMPClauseMappableExprCommon::MappableComponent>(
- 3 * NumVars, NumUniqueDeclarations,
- NumUniqueDeclarations + NumComponentLists, NumComponents));
- return new (Mem) OMPUseDevicePtrClause(NumVars, NumUniqueDeclarations,
- NumComponentLists, NumComponents);
+ 3 * Sizes.NumVars, Sizes.NumUniqueDeclarations,
+ Sizes.NumUniqueDeclarations + Sizes.NumComponentLists,
+ Sizes.NumComponents));
+ return new (Mem) OMPUseDevicePtrClause(Sizes);
}
OMPIsDevicePtrClause *
-OMPIsDevicePtrClause::Create(const ASTContext &C, SourceLocation StartLoc,
- SourceLocation LParenLoc, SourceLocation EndLoc,
+OMPIsDevicePtrClause::Create(const ASTContext &C, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> Vars,
ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists) {
- unsigned NumVars = Vars.size();
- unsigned NumUniqueDeclarations =
- getUniqueDeclarationsTotalNumber(Declarations);
- unsigned NumComponentLists = ComponentLists.size();
- unsigned NumComponents = getComponentsTotalNumber(ComponentLists);
+ OMPMappableExprListSizeTy Sizes;
+ Sizes.NumVars = Vars.size();
+ Sizes.NumUniqueDeclarations = getUniqueDeclarationsTotalNumber(Declarations);
+ Sizes.NumComponentLists = ComponentLists.size();
+ Sizes.NumComponents = getComponentsTotalNumber(ComponentLists);
// We need to allocate:
// NumVars x Expr* - we have an original list expression for each clause list
@@ -1031,28 +1081,27 @@ OMPIsDevicePtrClause::Create(const ASTContext &C, SourceLocation StartLoc,
void *Mem = C.Allocate(
totalSizeToAlloc<Expr *, ValueDecl *, unsigned,
OMPClauseMappableExprCommon::MappableComponent>(
- NumVars, NumUniqueDeclarations,
- NumUniqueDeclarations + NumComponentLists, NumComponents));
+ Sizes.NumVars, Sizes.NumUniqueDeclarations,
+ Sizes.NumUniqueDeclarations + Sizes.NumComponentLists,
+ Sizes.NumComponents));
- OMPIsDevicePtrClause *Clause = new (Mem) OMPIsDevicePtrClause(
- StartLoc, LParenLoc, EndLoc, NumVars, NumUniqueDeclarations,
- NumComponentLists, NumComponents);
+ OMPIsDevicePtrClause *Clause = new (Mem) OMPIsDevicePtrClause(Locs, Sizes);
Clause->setVarRefs(Vars);
Clause->setClauseInfo(Declarations, ComponentLists);
return Clause;
}
-OMPIsDevicePtrClause *OMPIsDevicePtrClause::CreateEmpty(
- const ASTContext &C, unsigned NumVars, unsigned NumUniqueDeclarations,
- unsigned NumComponentLists, unsigned NumComponents) {
+OMPIsDevicePtrClause *
+OMPIsDevicePtrClause::CreateEmpty(const ASTContext &C,
+ const OMPMappableExprListSizeTy &Sizes) {
void *Mem = C.Allocate(
totalSizeToAlloc<Expr *, ValueDecl *, unsigned,
OMPClauseMappableExprCommon::MappableComponent>(
- NumVars, NumUniqueDeclarations,
- NumUniqueDeclarations + NumComponentLists, NumComponents));
- return new (Mem) OMPIsDevicePtrClause(NumVars, NumUniqueDeclarations,
- NumComponentLists, NumComponents);
+ Sizes.NumVars, Sizes.NumUniqueDeclarations,
+ Sizes.NumUniqueDeclarations + Sizes.NumComponentLists,
+ Sizes.NumComponents));
+ return new (Mem) OMPIsDevicePtrClause(Sizes);
}
//===----------------------------------------------------------------------===//
@@ -1091,6 +1140,12 @@ void OMPClausePrinter::VisitOMPSimdlenClause(OMPSimdlenClause *Node) {
OS << ")";
}
+void OMPClausePrinter::VisitOMPAllocatorClause(OMPAllocatorClause *Node) {
+ OS << "allocator(";
+ Node->getAllocator()->printPretty(OS, nullptr, Policy, 0);
+ OS << ")";
+}
+
void OMPClausePrinter::VisitOMPCollapseClause(OMPCollapseClause *Node) {
OS << "collapse(";
Node->getNumForLoops()->printPretty(OS, nullptr, Policy, 0);
@@ -1261,6 +1316,21 @@ void OMPClausePrinter::VisitOMPClauseList(T *Node, char StartSym) {
}
}
+void OMPClausePrinter::VisitOMPAllocateClause(OMPAllocateClause *Node) {
+ if (Node->varlist_empty())
+ return;
+ OS << "allocate";
+ if (Expr *Allocator = Node->getAllocator()) {
+ OS << "(";
+ Allocator->printPretty(OS, nullptr, Policy, 0);
+ OS << ":";
+ VisitOMPClauseList(Node, ' ');
+ } else {
+ VisitOMPClauseList(Node, '(');
+ }
+ OS << ")";
+}
+
void OMPClausePrinter::VisitOMPPrivateClause(OMPPrivateClause *Node) {
if (!Node->varlist_empty()) {
OS << "private";
@@ -1432,6 +1502,14 @@ void OMPClausePrinter::VisitOMPMapClause(OMPMapClause *Node) {
if (Node->getMapTypeModifier(I) != OMPC_MAP_MODIFIER_unknown) {
OS << getOpenMPSimpleClauseTypeName(OMPC_map,
Node->getMapTypeModifier(I));
+ if (Node->getMapTypeModifier(I) == OMPC_MAP_MODIFIER_mapper) {
+ OS << '(';
+ NestedNameSpecifier *MapperNNS =
+ Node->getMapperQualifierLoc().getNestedNameSpecifier();
+ if (MapperNNS)
+ MapperNNS->print(OS, Policy);
+ OS << Node->getMapperIdInfo() << ')';
+ }
OS << ',';
}
}
@@ -1446,7 +1524,19 @@ void OMPClausePrinter::VisitOMPMapClause(OMPMapClause *Node) {
void OMPClausePrinter::VisitOMPToClause(OMPToClause *Node) {
if (!Node->varlist_empty()) {
OS << "to";
- VisitOMPClauseList(Node, '(');
+ DeclarationNameInfo MapperId = Node->getMapperIdInfo();
+ if (MapperId.getName() && !MapperId.getName().isEmpty()) {
+ OS << '(';
+ OS << "mapper(";
+ NestedNameSpecifier *MapperNNS =
+ Node->getMapperQualifierLoc().getNestedNameSpecifier();
+ if (MapperNNS)
+ MapperNNS->print(OS, Policy);
+ OS << MapperId << "):";
+ VisitOMPClauseList(Node, ' ');
+ } else {
+ VisitOMPClauseList(Node, '(');
+ }
OS << ")";
}
}
@@ -1454,7 +1544,19 @@ void OMPClausePrinter::VisitOMPToClause(OMPToClause *Node) {
void OMPClausePrinter::VisitOMPFromClause(OMPFromClause *Node) {
if (!Node->varlist_empty()) {
OS << "from";
- VisitOMPClauseList(Node, '(');
+ DeclarationNameInfo MapperId = Node->getMapperIdInfo();
+ if (MapperId.getName() && !MapperId.getName().isEmpty()) {
+ OS << '(';
+ OS << "mapper(";
+ NestedNameSpecifier *MapperNNS =
+ Node->getMapperQualifierLoc().getNestedNameSpecifier();
+ if (MapperNNS)
+ MapperNNS->print(OS, Policy);
+ OS << MapperId << "):";
+ VisitOMPClauseList(Node, ' ');
+ } else {
+ VisitOMPClauseList(Node, '(');
+ }
OS << ")";
}
}
diff --git a/lib/AST/ParentMap.cpp b/lib/AST/ParentMap.cpp
index 88c178aa372f..2ff5c9d8aeb5 100644
--- a/lib/AST/ParentMap.cpp
+++ b/lib/AST/ParentMap.cpp
@@ -1,9 +1,8 @@
//===--- ParentMap.cpp - Mappings from Stmts to their Parents ---*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -84,6 +83,18 @@ static void BuildParentMap(MapTy& M, Stmt* S,
}
break;
}
+ case Stmt::CapturedStmtClass:
+ for (Stmt *SubStmt : S->children()) {
+ if (SubStmt) {
+ M[SubStmt] = S;
+ BuildParentMap(M, SubStmt, OVMode);
+ }
+ }
+ if (Stmt *SubStmt = cast<CapturedStmt>(S)->getCapturedStmt()) {
+ M[SubStmt] = S;
+ BuildParentMap(M, SubStmt, OVMode);
+ }
+ break;
default:
for (Stmt *SubStmt : S->children()) {
if (SubStmt) {
diff --git a/lib/AST/PrintfFormatString.cpp b/lib/AST/PrintfFormatString.cpp
index e0a0c5b7582a..a1207aae5aa2 100644
--- a/lib/AST/PrintfFormatString.cpp
+++ b/lib/AST/PrintfFormatString.cpp
@@ -1,9 +1,8 @@
//== PrintfFormatString.cpp - Analysis of printf format strings --*- C++ -*-==//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -316,7 +315,11 @@ static PrintfSpecifierResult ParsePrintfSpecifier(FormatStringHandler &H,
case 'f': k = ConversionSpecifier::fArg; break;
case 'g': k = ConversionSpecifier::gArg; break;
case 'i': k = ConversionSpecifier::iArg; break;
- case 'n': k = ConversionSpecifier::nArg; break;
+ case 'n':
+ // Not handled, but reserved in OpenCL.
+ if (!LO.OpenCL)
+ k = ConversionSpecifier::nArg;
+ break;
case 'o': k = ConversionSpecifier::oArg; break;
case 'p': k = ConversionSpecifier::pArg; break;
case 's': k = ConversionSpecifier::sArg; break;
@@ -487,10 +490,12 @@ ArgType PrintfSpecifier::getScalarArgType(ASTContext &Ctx,
// GNU extension.
return Ctx.LongLongTy;
case LengthModifier::None:
+ case LengthModifier::AsShortLong:
return Ctx.IntTy;
case LengthModifier::AsInt32:
return ArgType(Ctx.IntTy, "__int32");
- case LengthModifier::AsChar: return ArgType::AnyCharTy;
+ case LengthModifier::AsChar:
+ return ArgType::AnyCharTy;
case LengthModifier::AsShort: return Ctx.ShortTy;
case LengthModifier::AsLong: return Ctx.LongTy;
case LengthModifier::AsLongLong:
@@ -521,6 +526,7 @@ ArgType PrintfSpecifier::getScalarArgType(ASTContext &Ctx,
// GNU extension.
return Ctx.UnsignedLongLongTy;
case LengthModifier::None:
+ case LengthModifier::AsShortLong:
return Ctx.UnsignedIntTy;
case LengthModifier::AsInt32:
return ArgType(Ctx.UnsignedIntTy, "unsigned __int32");
@@ -550,6 +556,18 @@ ArgType PrintfSpecifier::getScalarArgType(ASTContext &Ctx,
}
if (CS.isDoubleArg()) {
+ if (!VectorNumElts.isInvalid()) {
+ switch (LM.getKind()) {
+ case LengthModifier::AsShort:
+ return Ctx.HalfTy;
+ case LengthModifier::AsShortLong:
+ return Ctx.FloatTy;
+ case LengthModifier::AsLong:
+ default:
+ return Ctx.DoubleTy;
+ }
+ }
+
if (LM.getKind() == LengthModifier::AsLongDouble)
return Ctx.LongDoubleTy;
return Ctx.DoubleTy;
@@ -583,6 +601,8 @@ ArgType PrintfSpecifier::getScalarArgType(ASTContext &Ctx,
case LengthModifier::AsInt64:
case LengthModifier::AsWide:
return ArgType::Invalid();
+ case LengthModifier::AsShortLong:
+ llvm_unreachable("only used for OpenCL which doesn not handle nArg");
}
}
@@ -761,10 +781,13 @@ bool PrintfSpecifier::fixType(QualType QT, const LangOptions &LangOpt,
case BuiltinType::UInt:
case BuiltinType::Int:
case BuiltinType::Float:
+ LM.setKind(VectorNumElts.isInvalid() ?
+ LengthModifier::None : LengthModifier::AsShortLong);
+ break;
case BuiltinType::Double:
- LM.setKind(LengthModifier::None);
+ LM.setKind(VectorNumElts.isInvalid() ?
+ LengthModifier::None : LengthModifier::AsLong);
break;
-
case BuiltinType::Char_U:
case BuiltinType::UChar:
case BuiltinType::Char_S:
@@ -797,7 +820,7 @@ bool PrintfSpecifier::fixType(QualType QT, const LangOptions &LangOpt,
namedTypeToLengthModifier(QT, LM);
// If fixing the length modifier was enough, we might be done.
- if (hasValidLengthModifier(Ctx.getTargetInfo())) {
+ if (hasValidLengthModifier(Ctx.getTargetInfo(), LangOpt)) {
// If we're going to offer a fix anyway, make sure the sign matches.
switch (CS.getKind()) {
case ConversionSpecifier::uArg:
diff --git a/lib/AST/QualTypeNames.cpp b/lib/AST/QualTypeNames.cpp
index 8b605ef295a0..f28f00171cce 100644
--- a/lib/AST/QualTypeNames.cpp
+++ b/lib/AST/QualTypeNames.cpp
@@ -1,11 +1,8 @@
//===------- QualTypeNames.cpp - Generate Complete QualType Names ---------===//
//
-// The LLVM Compiler Infrastructure
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -382,6 +379,19 @@ QualType getFullyQualifiedType(QualType QT, const ASTContext &Ctx,
return QT;
}
+ if (auto *MPT = dyn_cast<MemberPointerType>(QT.getTypePtr())) {
+ // Get the qualifiers.
+ Qualifiers Quals = QT.getQualifiers();
+ // Fully qualify the pointee and class types.
+ QT = getFullyQualifiedType(QT->getPointeeType(), Ctx, WithGlobalNsPrefix);
+ QualType Class = getFullyQualifiedType(QualType(MPT->getClass(), 0), Ctx,
+ WithGlobalNsPrefix);
+ QT = Ctx.getMemberPointerType(QT, Class.getTypePtr());
+ // Add back the qualifiers.
+ QT = Ctx.getQualifiedType(QT, Quals);
+ return QT;
+ }
+
// In case of myType& we need to strip the reference first, fully
// qualify and attach the reference once again.
if (isa<ReferenceType>(QT.getTypePtr())) {
diff --git a/lib/AST/RawCommentList.cpp b/lib/AST/RawCommentList.cpp
index ab873a396419..df53b7fa1004 100644
--- a/lib/AST/RawCommentList.cpp
+++ b/lib/AST/RawCommentList.cpp
@@ -1,9 +1,8 @@
//===--- RawCommentList.cpp - Processing raw comments -----------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/AST/RecordLayout.cpp b/lib/AST/RecordLayout.cpp
index 9db23d50d0af..e7b500e1902d 100644
--- a/lib/AST/RecordLayout.cpp
+++ b/lib/AST/RecordLayout.cpp
@@ -1,9 +1,8 @@
//===- RecordLayout.cpp - Layout information for a struct/union -----------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/AST/RecordLayoutBuilder.cpp b/lib/AST/RecordLayoutBuilder.cpp
index 62dc22c81403..2a3419a0cec3 100644
--- a/lib/AST/RecordLayoutBuilder.cpp
+++ b/lib/AST/RecordLayoutBuilder.cpp
@@ -1,9 +1,8 @@
//=== RecordLayoutBuilder.cpp - Helper class for building record layouts ---==//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -128,9 +127,10 @@ class EmptySubobjectMap {
CharUnits Offset, bool PlacingEmptyBase);
void UpdateEmptyFieldSubobjects(const CXXRecordDecl *RD,
- const CXXRecordDecl *Class,
- CharUnits Offset);
- void UpdateEmptyFieldSubobjects(const FieldDecl *FD, CharUnits Offset);
+ const CXXRecordDecl *Class, CharUnits Offset,
+ bool PlacingOverlappingField);
+ void UpdateEmptyFieldSubobjects(const FieldDecl *FD, CharUnits Offset,
+ bool PlacingOverlappingField);
/// AnyEmptySubobjectsBeyondOffset - Returns whether there are any empty
/// subobjects beyond the given offset.
@@ -239,7 +239,7 @@ EmptySubobjectMap::CanPlaceSubobjectAtOffset(const CXXRecordDecl *RD,
return true;
const ClassVectorTy &Classes = I->second;
- if (std::find(Classes.begin(), Classes.end(), RD) == Classes.end())
+ if (llvm::find(Classes, RD) == Classes.end())
return true;
// There is already an empty class of the same type at this offset.
@@ -255,7 +255,7 @@ void EmptySubobjectMap::AddSubobjectAtOffset(const CXXRecordDecl *RD,
// If we have empty structures inside a union, we can assign both
// the same offset. Just avoid pushing them twice in the list.
ClassVectorTy &Classes = EmptyClassOffsets[Offset];
- if (std::find(Classes.begin(), Classes.end(), RD) != Classes.end())
+ if (llvm::is_contained(Classes, RD))
return;
Classes.push_back(RD);
@@ -352,7 +352,7 @@ void EmptySubobjectMap::UpdateEmptyBaseSubobjects(const BaseSubobjectInfo *Info,
continue;
CharUnits FieldOffset = Offset + getFieldOffset(Layout, FieldNo);
- UpdateEmptyFieldSubobjects(*I, FieldOffset);
+ UpdateEmptyFieldSubobjects(*I, FieldOffset, PlacingEmptyBase);
}
}
@@ -472,20 +472,25 @@ EmptySubobjectMap::CanPlaceFieldAtOffset(const FieldDecl *FD,
return false;
// We are able to place the member variable at this offset.
- // Make sure to update the empty base subobject map.
- UpdateEmptyFieldSubobjects(FD, Offset);
+ // Make sure to update the empty field subobject map.
+ UpdateEmptyFieldSubobjects(FD, Offset, FD->hasAttr<NoUniqueAddressAttr>());
return true;
}
-void EmptySubobjectMap::UpdateEmptyFieldSubobjects(const CXXRecordDecl *RD,
- const CXXRecordDecl *Class,
- CharUnits Offset) {
+void EmptySubobjectMap::UpdateEmptyFieldSubobjects(
+ const CXXRecordDecl *RD, const CXXRecordDecl *Class, CharUnits Offset,
+ bool PlacingOverlappingField) {
// We know that the only empty subobjects that can conflict with empty
- // field subobjects are subobjects of empty bases that can be placed at offset
- // zero. Because of this, we only need to keep track of empty field
- // subobjects with offsets less than the size of the largest empty
- // subobject for our class.
- if (Offset >= SizeOfLargestEmptySubobject)
+ // field subobjects are subobjects of empty bases and potentially-overlapping
+ // fields that can be placed at offset zero. Because of this, we only need to
+ // keep track of empty field subobjects with offsets less than the size of
+ // the largest empty subobject for our class.
+ //
+ // (Proof: we will only consider placing a subobject at offset zero or at
+ // >= the current dsize. The only cases where the earlier subobject can be
+ // placed beyond the end of dsize is if it's an empty base or a
+ // potentially-overlapping field.)
+ if (!PlacingOverlappingField && Offset >= SizeOfLargestEmptySubobject)
return;
AddSubobjectAtOffset(RD, Offset);
@@ -500,7 +505,8 @@ void EmptySubobjectMap::UpdateEmptyFieldSubobjects(const CXXRecordDecl *RD,
const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
CharUnits BaseOffset = Offset + Layout.getBaseClassOffset(BaseDecl);
- UpdateEmptyFieldSubobjects(BaseDecl, Class, BaseOffset);
+ UpdateEmptyFieldSubobjects(BaseDecl, Class, BaseOffset,
+ PlacingOverlappingField);
}
if (RD == Class) {
@@ -509,7 +515,8 @@ void EmptySubobjectMap::UpdateEmptyFieldSubobjects(const CXXRecordDecl *RD,
const CXXRecordDecl *VBaseDecl = Base.getType()->getAsCXXRecordDecl();
CharUnits VBaseOffset = Offset + Layout.getVBaseClassOffset(VBaseDecl);
- UpdateEmptyFieldSubobjects(VBaseDecl, Class, VBaseOffset);
+ UpdateEmptyFieldSubobjects(VBaseDecl, Class, VBaseOffset,
+ PlacingOverlappingField);
}
}
@@ -522,15 +529,15 @@ void EmptySubobjectMap::UpdateEmptyFieldSubobjects(const CXXRecordDecl *RD,
CharUnits FieldOffset = Offset + getFieldOffset(Layout, FieldNo);
- UpdateEmptyFieldSubobjects(*I, FieldOffset);
+ UpdateEmptyFieldSubobjects(*I, FieldOffset, PlacingOverlappingField);
}
}
-void EmptySubobjectMap::UpdateEmptyFieldSubobjects(const FieldDecl *FD,
- CharUnits Offset) {
+void EmptySubobjectMap::UpdateEmptyFieldSubobjects(
+ const FieldDecl *FD, CharUnits Offset, bool PlacingOverlappingField) {
QualType T = FD->getType();
if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl()) {
- UpdateEmptyFieldSubobjects(RD, RD, Offset);
+ UpdateEmptyFieldSubobjects(RD, RD, Offset, PlacingOverlappingField);
return;
}
@@ -553,10 +560,12 @@ void EmptySubobjectMap::UpdateEmptyFieldSubobjects(const FieldDecl *FD,
// offset zero. Because of this, we only need to keep track of empty field
// subobjects with offsets less than the size of the largest empty
// subobject for our class.
- if (ElementOffset >= SizeOfLargestEmptySubobject)
+ if (!PlacingOverlappingField &&
+ ElementOffset >= SizeOfLargestEmptySubobject)
return;
- UpdateEmptyFieldSubobjects(RD, RD, ElementOffset);
+ UpdateEmptyFieldSubobjects(RD, RD, ElementOffset,
+ PlacingOverlappingField);
ElementOffset += Layout.getSize();
}
}
@@ -623,6 +632,10 @@ protected:
CharUnits NonVirtualSize;
CharUnits NonVirtualAlignment;
+ /// If we've laid out a field but not included its tail padding in Size yet,
+ /// this is the size up to the end of that field.
+ CharUnits PaddedFieldSize;
+
/// PrimaryBase - the primary base class (if one exists) of the class
/// we're laying out.
const CXXRecordDecl *PrimaryBase;
@@ -671,7 +684,8 @@ protected:
UnfilledBitsInLastUnit(0), LastBitfieldTypeSize(0),
MaxFieldAlignment(CharUnits::Zero()), DataSize(0),
NonVirtualSize(CharUnits::Zero()),
- NonVirtualAlignment(CharUnits::One()), PrimaryBase(nullptr),
+ NonVirtualAlignment(CharUnits::One()),
+ PaddedFieldSize(CharUnits::Zero()), PrimaryBase(nullptr),
PrimaryBaseIsVirtual(false), HasOwnVFPtr(false),
HasPackedField(false), FirstNearlyEmptyVBase(nullptr) {}
@@ -981,7 +995,6 @@ void ItaniumRecordLayoutBuilder::EnsureVTablePointerAlignment(
// Round up the current record size to pointer alignment.
setSize(getSize().alignTo(BaseAlign));
- setDataSize(getSize());
// Update the alignment.
UpdateAlignment(BaseAlign, UnpackedBaseAlign);
@@ -1173,6 +1186,7 @@ ItaniumRecordLayoutBuilder::LayoutBase(const BaseSubobjectInfo *Base) {
// Query the external layout to see if it provides an offset.
bool HasExternalLayout = false;
if (UseExternalLayout) {
+ // FIXME: This appears to be reversed.
if (Base->IsVirtual)
HasExternalLayout = External.getExternalNVBaseOffset(Base->Class, Offset);
else
@@ -1343,8 +1357,8 @@ void ItaniumRecordLayoutBuilder::Layout(const ObjCInterfaceDecl *D) {
// We start laying out ivars not at the end of the superclass
// structure, but at the next byte following the last field.
- setSize(SL.getDataSize());
- setDataSize(getSize());
+ setDataSize(SL.getDataSize());
+ setSize(getDataSize());
}
InitializeLayout(D);
@@ -1730,32 +1744,49 @@ void ItaniumRecordLayoutBuilder::LayoutField(const FieldDecl *D,
UnfilledBitsInLastUnit = 0;
LastBitfieldTypeSize = 0;
+ auto *FieldClass = D->getType()->getAsCXXRecordDecl();
+ bool PotentiallyOverlapping = D->hasAttr<NoUniqueAddressAttr>() && FieldClass;
+ bool IsOverlappingEmptyField = PotentiallyOverlapping && FieldClass->isEmpty();
bool FieldPacked = Packed || D->hasAttr<PackedAttr>();
- CharUnits FieldOffset =
- IsUnion ? CharUnits::Zero() : getDataSize();
+
+ CharUnits FieldOffset = (IsUnion || IsOverlappingEmptyField)
+ ? CharUnits::Zero()
+ : getDataSize();
CharUnits FieldSize;
CharUnits FieldAlign;
+ // The amount of this class's dsize occupied by the field.
+ // This is equal to FieldSize unless we're permitted to pack
+ // into the field's tail padding.
+ CharUnits EffectiveFieldSize;
if (D->getType()->isIncompleteArrayType()) {
// This is a flexible array member; we can't directly
// query getTypeInfo about these, so we figure it out here.
// Flexible array members don't have any size, but they
// have to be aligned appropriately for their element type.
- FieldSize = CharUnits::Zero();
+ EffectiveFieldSize = FieldSize = CharUnits::Zero();
const ArrayType* ATy = Context.getAsArrayType(D->getType());
FieldAlign = Context.getTypeAlignInChars(ATy->getElementType());
} else if (const ReferenceType *RT = D->getType()->getAs<ReferenceType>()) {
unsigned AS = Context.getTargetAddressSpace(RT->getPointeeType());
- FieldSize =
+ EffectiveFieldSize = FieldSize =
Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(AS));
FieldAlign =
Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerAlign(AS));
} else {
std::pair<CharUnits, CharUnits> FieldInfo =
Context.getTypeInfoInChars(D->getType());
- FieldSize = FieldInfo.first;
+ EffectiveFieldSize = FieldSize = FieldInfo.first;
FieldAlign = FieldInfo.second;
+ // A potentially-overlapping field occupies its dsize or nvsize, whichever
+ // is larger.
+ if (PotentiallyOverlapping) {
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(FieldClass);
+ EffectiveFieldSize =
+ std::max(Layout.getNonVirtualSize(), Layout.getDataSize());
+ }
+
if (IsMsStruct) {
// If MS bitfield layout is required, figure out what type is being
// laid out and align the field to the width of that type.
@@ -1835,7 +1866,12 @@ void ItaniumRecordLayoutBuilder::LayoutField(const FieldDecl *D,
// Check if we can place the field at this offset.
while (!EmptySubobjects->CanPlaceFieldAtOffset(D, FieldOffset)) {
// We couldn't place the field at the offset. Try again at a new offset.
- FieldOffset += FieldAlign;
+ // We try offset 0 (for an empty field) and then dsize(C) onwards.
+ if (FieldOffset == CharUnits::Zero() &&
+ getDataSize() != CharUnits::Zero())
+ FieldOffset = getDataSize().alignTo(FieldAlign);
+ else
+ FieldOffset += FieldAlign;
}
}
}
@@ -1854,18 +1890,23 @@ void ItaniumRecordLayoutBuilder::LayoutField(const FieldDecl *D,
if (FieldSize % ASanAlignment)
ExtraSizeForAsan +=
ASanAlignment - CharUnits::fromQuantity(FieldSize % ASanAlignment);
- FieldSize += ExtraSizeForAsan;
+ EffectiveFieldSize = FieldSize = FieldSize + ExtraSizeForAsan;
}
// Reserve space for this field.
- uint64_t FieldSizeInBits = Context.toBits(FieldSize);
- if (IsUnion)
- setDataSize(std::max(getDataSizeInBits(), FieldSizeInBits));
- else
- setDataSize(FieldOffset + FieldSize);
+ if (!IsOverlappingEmptyField) {
+ uint64_t EffectiveFieldSizeInBits = Context.toBits(EffectiveFieldSize);
+ if (IsUnion)
+ setDataSize(std::max(getDataSizeInBits(), EffectiveFieldSizeInBits));
+ else
+ setDataSize(FieldOffset + EffectiveFieldSize);
- // Update the size.
- setSize(std::max(getSizeInBits(), getDataSizeInBits()));
+ PaddedFieldSize = std::max(PaddedFieldSize, FieldOffset + FieldSize);
+ setSize(std::max(getSizeInBits(), getDataSizeInBits()));
+ } else {
+ setSize(std::max(getSizeInBits(),
+ (uint64_t)Context.toBits(FieldOffset + FieldSize)));
+ }
// Remember max struct/class alignment.
UnadjustedAlignment = std::max(UnadjustedAlignment, FieldAlign);
@@ -1886,6 +1927,10 @@ void ItaniumRecordLayoutBuilder::FinishLayout(const NamedDecl *D) {
setSize(CharUnits::One());
}
+ // If we have any remaining field tail padding, include that in the overall
+ // size.
+ setSize(std::max(getSizeInBits(), (uint64_t)Context.toBits(PaddedFieldSize)));
+
// Finally, round the size of the record up to the alignment of the
// record itself.
uint64_t UnpaddedSize = getSizeInBits() - UnfilledBitsInLastUnit;
@@ -2693,7 +2738,8 @@ void MicrosoftRecordLayoutBuilder::layoutBitField(const FieldDecl *FD) {
auto FieldBitOffset = External.getExternalFieldOffset(FD);
placeFieldAtBitOffset(FieldBitOffset);
auto NewSize = Context.toCharUnitsFromBits(
- llvm::alignTo(FieldBitOffset + Width, Context.getCharWidth()));
+ llvm::alignDown(FieldBitOffset, Context.toBits(Info.Alignment)) +
+ Context.toBits(Info.Size));
Size = std::max(Size, NewSize);
Alignment = std::max(Alignment, Info.Alignment);
} else if (IsUnion) {
@@ -2742,12 +2788,17 @@ void MicrosoftRecordLayoutBuilder::injectVBPtr(const CXXRecordDecl *RD) {
CharUnits InjectionSite = VBPtrOffset;
// But before we do, make sure it's properly aligned.
VBPtrOffset = VBPtrOffset.alignTo(PointerInfo.Alignment);
+ // Determine where the first field should be laid out after the vbptr.
+ CharUnits FieldStart = VBPtrOffset + PointerInfo.Size;
// Shift everything after the vbptr down, unless we're using an external
// layout.
- if (UseExternalLayout)
+ if (UseExternalLayout) {
+ // It is possible that there were no fields or bases located after vbptr,
+ // so the size was not adjusted before.
+ if (Size < FieldStart)
+ Size = FieldStart;
return;
- // Determine where the first field should be laid out after the vbptr.
- CharUnits FieldStart = VBPtrOffset + PointerInfo.Size;
+ }
// Make sure that the amount we push the fields back by is a multiple of the
// alignment.
CharUnits Offset = (FieldStart - InjectionSite)
@@ -2772,8 +2823,14 @@ void MicrosoftRecordLayoutBuilder::injectVFPtr(const CXXRecordDecl *RD) {
if (HasVBPtr)
VBPtrOffset += Offset;
- if (UseExternalLayout)
+ if (UseExternalLayout) {
+ // The class may have no bases or fields, but still have a vfptr
+ // (e.g. it's an interface class). The size was not correctly set before
+ // in this case.
+ if (FieldOffsets.empty() && Bases.empty())
+ Size += Offset;
return;
+ }
Size += Offset;
@@ -3276,10 +3333,10 @@ static void DumpRecordLayout(raw_ostream &OS, const RecordDecl *RD,
}
// Sort nvbases by offset.
- std::stable_sort(Bases.begin(), Bases.end(),
- [&](const CXXRecordDecl *L, const CXXRecordDecl *R) {
- return Layout.getBaseClassOffset(L) < Layout.getBaseClassOffset(R);
- });
+ llvm::stable_sort(
+ Bases, [&](const CXXRecordDecl *L, const CXXRecordDecl *R) {
+ return Layout.getBaseClassOffset(L) < Layout.getBaseClassOffset(R);
+ });
// Dump (non-virtual) bases
for (const CXXRecordDecl *Base : Bases) {
diff --git a/lib/AST/ScanfFormatString.cpp b/lib/AST/ScanfFormatString.cpp
index 08ba7a7a4f5c..8d763f28e57f 100644
--- a/lib/AST/ScanfFormatString.cpp
+++ b/lib/AST/ScanfFormatString.cpp
@@ -1,9 +1,8 @@
//= ScanfFormatString.cpp - Analysis of printf format strings --*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -143,7 +142,7 @@ static ScanfSpecifierResult ParseScanfSpecifier(FormatStringHandler &H,
}
// Look for the length modifier.
- if (ParseLengthModifier(FS, I, E, LO, /*scanf=*/true) && I == E) {
+ if (ParseLengthModifier(FS, I, E, LO, /*IsScanf=*/true) && I == E) {
// No more characters left?
H.HandleIncompleteSpecifier(Start, E - Start);
return true;
@@ -262,9 +261,10 @@ ArgType ScanfSpecifier::getArgType(ASTContext &Ctx) const {
case LengthModifier::AsInt32:
case LengthModifier::AsInt3264:
case LengthModifier::AsWide:
+ case LengthModifier::AsShortLong:
return ArgType::Invalid();
}
- llvm_unreachable("Unsupported LenghtModifier Type");
+ llvm_unreachable("Unsupported LengthModifier Type");
// Unsigned int.
case ConversionSpecifier::oArg:
@@ -302,9 +302,10 @@ ArgType ScanfSpecifier::getArgType(ASTContext &Ctx) const {
case LengthModifier::AsInt32:
case LengthModifier::AsInt3264:
case LengthModifier::AsWide:
+ case LengthModifier::AsShortLong:
return ArgType::Invalid();
}
- llvm_unreachable("Unsupported LenghtModifier Type");
+ llvm_unreachable("Unsupported LengthModifier Type");
// Float.
case ConversionSpecifier::aArg:
@@ -397,6 +398,7 @@ ArgType ScanfSpecifier::getArgType(ASTContext &Ctx) const {
case LengthModifier::AsInt32:
case LengthModifier::AsInt3264:
case LengthModifier::AsWide:
+ case LengthModifier::AsShortLong:
return ArgType::Invalid();
}
@@ -502,7 +504,7 @@ bool ScanfSpecifier::fixType(QualType QT, QualType RawQT,
namedTypeToLengthModifier(PT, LM);
// If fixing the length modifier was enough, we are done.
- if (hasValidLengthModifier(Ctx.getTargetInfo())) {
+ if (hasValidLengthModifier(Ctx.getTargetInfo(), LangOpt)) {
const analyze_scanf::ArgType &AT = getArgType(Ctx);
if (AT.isValid() && AT.matchesType(Ctx, QT))
return true;
diff --git a/lib/AST/SelectorLocationsKind.cpp b/lib/AST/SelectorLocationsKind.cpp
index 8b72c85d7ef7..2c34c9c60c2b 100644
--- a/lib/AST/SelectorLocationsKind.cpp
+++ b/lib/AST/SelectorLocationsKind.cpp
@@ -1,9 +1,8 @@
//===--- SelectorLocationsKind.cpp - Kind of selector locations -*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/AST/Stmt.cpp b/lib/AST/Stmt.cpp
index 116291bfa1ef..0a4d403106bd 100644
--- a/lib/AST/Stmt.cpp
+++ b/lib/AST/Stmt.cpp
@@ -1,9 +1,8 @@
//===- Stmt.cpp - Statement AST Node Implementation -----------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -118,30 +117,6 @@ void Stmt::EnableStatistics() {
StatisticsEnabled = true;
}
-Stmt *Stmt::IgnoreImplicit() {
- Stmt *s = this;
-
- Stmt *lasts = nullptr;
-
- while (s != lasts) {
- lasts = s;
-
- if (auto *fe = dyn_cast<FullExpr>(s))
- s = fe->getSubExpr();
-
- if (auto *mte = dyn_cast<MaterializeTemporaryExpr>(s))
- s = mte->GetTemporaryExpr();
-
- if (auto *bte = dyn_cast<CXXBindTemporaryExpr>(s))
- s = bte->getSubExpr();
-
- if (auto *ice = dyn_cast<ImplicitCastExpr>(s))
- s = ice->getSubExpr();
- }
-
- return s;
-}
-
/// Skip no-op (attributed, compound) container stmts and skip captured
/// stmt at the top, if \a IgnoreCaptured is true.
Stmt *Stmt::IgnoreContainers(bool IgnoreCaptured) {
@@ -345,6 +320,23 @@ CompoundStmt *CompoundStmt::CreateEmpty(const ASTContext &C,
return New;
}
+const Expr *ValueStmt::getExprStmt() const {
+ const Stmt *S = this;
+ do {
+ if (const auto *E = dyn_cast<Expr>(S))
+ return E;
+
+ if (const auto *LS = dyn_cast<LabelStmt>(S))
+ S = LS->getSubStmt();
+ else if (const auto *AS = dyn_cast<AttributedStmt>(S))
+ S = AS->getSubStmt();
+ else
+ llvm_unreachable("unknown kind of ValueStmt");
+ } while (isa<ValueStmt>(S));
+
+ return nullptr;
+}
+
const char *LabelStmt::getName() const {
return getDecl()->getIdentifier()->getNameStart();
}
@@ -452,6 +444,14 @@ void GCCAsmStmt::setInputExpr(unsigned i, Expr *E) {
Exprs[i + NumOutputs] = E;
}
+AddrLabelExpr *GCCAsmStmt::getLabelExpr(unsigned i) const {
+ return cast<AddrLabelExpr>(Exprs[i + NumInputs]);
+}
+
+StringRef GCCAsmStmt::getLabelName(unsigned i) const {
+ return getLabelExpr(i)->getLabel()->getName();
+}
+
/// getInputConstraint - Return the specified input constraint. Unlike output
/// constraints, these can be empty.
StringRef GCCAsmStmt::getInputConstraint(unsigned i) const {
@@ -464,13 +464,16 @@ void GCCAsmStmt::setOutputsAndInputsAndClobbers(const ASTContext &C,
Stmt **Exprs,
unsigned NumOutputs,
unsigned NumInputs,
+ unsigned NumLabels,
StringLiteral **Clobbers,
unsigned NumClobbers) {
this->NumOutputs = NumOutputs;
this->NumInputs = NumInputs;
this->NumClobbers = NumClobbers;
+ this->NumLabels = NumLabels;
+ assert(!(NumOutputs && NumLabels) && "asm goto cannot have outputs");
- unsigned NumExprs = NumOutputs + NumInputs;
+ unsigned NumExprs = NumOutputs + NumInputs + NumLabels;
C.Deallocate(this->Names);
this->Names = new (C) IdentifierInfo*[NumExprs];
@@ -480,9 +483,10 @@ void GCCAsmStmt::setOutputsAndInputsAndClobbers(const ASTContext &C,
this->Exprs = new (C) Stmt*[NumExprs];
std::copy(Exprs, Exprs + NumExprs, this->Exprs);
+ unsigned NumConstraints = NumOutputs + NumInputs;
C.Deallocate(this->Constraints);
- this->Constraints = new (C) StringLiteral*[NumExprs];
- std::copy(Constraints, Constraints + NumExprs, this->Constraints);
+ this->Constraints = new (C) StringLiteral*[NumConstraints];
+ std::copy(Constraints, Constraints + NumConstraints, this->Constraints);
C.Deallocate(this->Clobbers);
this->Clobbers = new (C) StringLiteral*[NumClobbers];
@@ -505,6 +509,10 @@ int GCCAsmStmt::getNamedOperand(StringRef SymbolicName) const {
if (getInputName(i) == SymbolicName)
return getNumOutputs() + NumPlusOperands + i;
+ for (unsigned i = 0, e = getNumLabels(); i != e; ++i)
+ if (getLabelName(i) == SymbolicName)
+ return i + getNumInputs();
+
// Not found.
return -1;
}
@@ -622,8 +630,8 @@ unsigned GCCAsmStmt::AnalyzeAsmString(SmallVectorImpl<AsmStringPiece>&Pieces,
while (CurPtr != StrEnd && isDigit(*CurPtr))
N = N*10 + ((*CurPtr++)-'0');
- unsigned NumOperands =
- getNumOutputs() + getNumPlusOperands() + getNumInputs();
+ unsigned NumOperands = getNumOutputs() + getNumPlusOperands() +
+ getNumInputs() + getNumLabels();
if (N >= NumOperands) {
DiagOffs = CurPtr-StrStart-1;
return diag::err_asm_invalid_operand_number;
@@ -736,10 +744,12 @@ GCCAsmStmt::GCCAsmStmt(const ASTContext &C, SourceLocation asmloc,
unsigned numinputs, IdentifierInfo **names,
StringLiteral **constraints, Expr **exprs,
StringLiteral *asmstr, unsigned numclobbers,
- StringLiteral **clobbers, SourceLocation rparenloc)
+ StringLiteral **clobbers, unsigned numlabels,
+ SourceLocation rparenloc)
: AsmStmt(GCCAsmStmtClass, asmloc, issimple, isvolatile, numoutputs,
- numinputs, numclobbers), RParenLoc(rparenloc), AsmStr(asmstr) {
- unsigned NumExprs = NumOutputs + NumInputs;
+ numinputs, numclobbers),
+ RParenLoc(rparenloc), AsmStr(asmstr), NumLabels(numlabels) {
+ unsigned NumExprs = NumOutputs + NumInputs + NumLabels;
Names = new (C) IdentifierInfo*[NumExprs];
std::copy(names, names + NumExprs, Names);
@@ -747,8 +757,9 @@ GCCAsmStmt::GCCAsmStmt(const ASTContext &C, SourceLocation asmloc,
Exprs = new (C) Stmt*[NumExprs];
std::copy(exprs, exprs + NumExprs, Exprs);
- Constraints = new (C) StringLiteral*[NumExprs];
- std::copy(constraints, constraints + NumExprs, Constraints);
+ unsigned NumConstraints = NumOutputs + NumInputs;
+ Constraints = new (C) StringLiteral*[NumConstraints];
+ std::copy(constraints, constraints + NumConstraints, Constraints);
Clobbers = new (C) StringLiteral*[NumClobbers];
std::copy(clobbers, clobbers + NumClobbers, Clobbers);
@@ -1262,6 +1273,10 @@ Stmt::child_range CapturedStmt::children() {
return child_range(getStoredStmts(), getStoredStmts() + NumCaptures);
}
+Stmt::const_child_range CapturedStmt::children() const {
+ return const_child_range(getStoredStmts(), getStoredStmts() + NumCaptures);
+}
+
CapturedDecl *CapturedStmt::getCapturedDecl() {
return CapDeclAndKind.getPointer();
}
diff --git a/lib/AST/StmtCXX.cpp b/lib/AST/StmtCXX.cpp
index 12367f8fd54b..060d090fc06a 100644
--- a/lib/AST/StmtCXX.cpp
+++ b/lib/AST/StmtCXX.cpp
@@ -1,9 +1,8 @@
//===--- StmtCXX.cpp - Classes for representing C++ statements ------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/AST/StmtIterator.cpp b/lib/AST/StmtIterator.cpp
index 00056e83af2c..70c8dc94e2df 100644
--- a/lib/AST/StmtIterator.cpp
+++ b/lib/AST/StmtIterator.cpp
@@ -1,9 +1,8 @@
//===- StmtIterator.cpp - Iterators for Statements ------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/AST/StmtObjC.cpp b/lib/AST/StmtObjC.cpp
index ed21e2d0d2b6..3d586795517c 100644
--- a/lib/AST/StmtObjC.cpp
+++ b/lib/AST/StmtObjC.cpp
@@ -1,9 +1,8 @@
//===--- StmtObjC.cpp - Classes for representing ObjC statements ---------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/AST/StmtOpenMP.cpp b/lib/AST/StmtOpenMP.cpp
index 85a2daa0801a..4e829897cebe 100644
--- a/lib/AST/StmtOpenMP.cpp
+++ b/lib/AST/StmtOpenMP.cpp
@@ -1,9 +1,8 @@
//===--- StmtOpenMP.cpp - Classes for OpenMP directives -------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -23,6 +22,25 @@ void OMPExecutableDirective::setClauses(ArrayRef<OMPClause *> Clauses) {
std::copy(Clauses.begin(), Clauses.end(), getClauses().begin());
}
+bool OMPExecutableDirective::isStandaloneDirective() const {
+ // Special case: 'omp target enter data', 'omp target exit data',
+ // 'omp target update' are stand-alone directives, but for implementation
+ // reasons they have empty synthetic structured block, to simplify codegen.
+ if (isa<OMPTargetEnterDataDirective>(this) ||
+ isa<OMPTargetExitDataDirective>(this) ||
+ isa<OMPTargetUpdateDirective>(this))
+ return true;
+ return !hasAssociatedStmt() || !getAssociatedStmt();
+}
+
+const Stmt *OMPExecutableDirective::getStructuredBlock() const {
+ assert(!isStandaloneDirective() &&
+ "Standalone Executable Directives don't have Structured Blocks.");
+ if (auto *LD = dyn_cast<OMPLoopDirective>(this))
+ return LD->getBody();
+ return getInnermostCapturedStmt()->getCapturedStmt();
+}
+
void OMPLoopDirective::setCounters(ArrayRef<Expr *> A) {
assert(A.size() == getCollapsedNumber() &&
"Number of loop counters is not the same as the collapsed number");
diff --git a/lib/AST/StmtPrinter.cpp b/lib/AST/StmtPrinter.cpp
index ae726e387107..46802d765e1f 100644
--- a/lib/AST/StmtPrinter.cpp
+++ b/lib/AST/StmtPrinter.cpp
@@ -1,9 +1,8 @@
//===- StmtPrinter.cpp - Printing implementation for Stmt ASTs ------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -37,6 +36,7 @@
#include "clang/Basic/CharInfo.h"
#include "clang/Basic/ExpressionTraits.h"
#include "clang/Basic/IdentifierTable.h"
+#include "clang/Basic/JsonSupport.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/Lambda.h"
#include "clang/Basic/OpenMPKinds.h"
@@ -414,12 +414,15 @@ void StmtPrinter::VisitGCCAsmStmt(GCCAsmStmt *Node) {
if (Node->isVolatile())
OS << "volatile ";
+ if (Node->isAsmGoto())
+ OS << "goto ";
+
OS << "(";
VisitStringLiteral(Node->getAsmString());
// Outputs
if (Node->getNumOutputs() != 0 || Node->getNumInputs() != 0 ||
- Node->getNumClobbers() != 0)
+ Node->getNumClobbers() != 0 || Node->getNumLabels() != 0)
OS << " : ";
for (unsigned i = 0, e = Node->getNumOutputs(); i != e; ++i) {
@@ -439,7 +442,8 @@ void StmtPrinter::VisitGCCAsmStmt(GCCAsmStmt *Node) {
}
// Inputs
- if (Node->getNumInputs() != 0 || Node->getNumClobbers() != 0)
+ if (Node->getNumInputs() != 0 || Node->getNumClobbers() != 0 ||
+ Node->getNumLabels() != 0)
OS << " : ";
for (unsigned i = 0, e = Node->getNumInputs(); i != e; ++i) {
@@ -459,7 +463,7 @@ void StmtPrinter::VisitGCCAsmStmt(GCCAsmStmt *Node) {
}
// Clobbers
- if (Node->getNumClobbers() != 0)
+ if (Node->getNumClobbers() != 0 || Node->getNumLabels())
OS << " : ";
for (unsigned i = 0, e = Node->getNumClobbers(); i != e; ++i) {
@@ -469,6 +473,16 @@ void StmtPrinter::VisitGCCAsmStmt(GCCAsmStmt *Node) {
VisitStringLiteral(Node->getClobberStringLiteral(i));
}
+ // Labels
+ if (Node->getNumLabels() != 0)
+ OS << " : ";
+
+ for (unsigned i = 0, e = Node->getNumLabels(); i != e; ++i) {
+ if (i != 0)
+ OS << ", ";
+ OS << Node->getLabelName(i);
+ }
+
OS << ");";
if (Policy.IncludeNewlines) OS << NL;
}
@@ -906,6 +920,10 @@ void StmtPrinter::VisitOMPTargetTeamsDistributeSimdDirective(
// Expr printing methods.
//===----------------------------------------------------------------------===//
+void StmtPrinter::VisitSourceLocExpr(SourceLocExpr *Node) {
+ OS << Node->getBuiltinStr() << "()";
+}
+
void StmtPrinter::VisitConstantExpr(ConstantExpr *Node) {
PrintExpr(Node->getSubExpr());
}
@@ -1262,15 +1280,15 @@ void StmtPrinter::VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *Node){
void StmtPrinter::VisitGenericSelectionExpr(GenericSelectionExpr *Node) {
OS << "_Generic(";
PrintExpr(Node->getControllingExpr());
- for (unsigned i = 0; i != Node->getNumAssocs(); ++i) {
+ for (const GenericSelectionExpr::Association &Assoc : Node->associations()) {
OS << ", ";
- QualType T = Node->getAssocType(i);
+ QualType T = Assoc.getType();
if (T.isNull())
OS << "default";
else
T.print(OS, Policy);
OS << ": ";
- PrintExpr(Node->getAssocExpr(i));
+ PrintExpr(Assoc.getAssociationExpr());
}
OS << ")";
}
@@ -1604,21 +1622,14 @@ void StmtPrinter::VisitAtomicExpr(AtomicExpr *Node) {
// C++
void StmtPrinter::VisitCXXOperatorCallExpr(CXXOperatorCallExpr *Node) {
- const char *OpStrings[NUM_OVERLOADED_OPERATORS] = {
- "",
-#define OVERLOADED_OPERATOR(Name,Spelling,Token,Unary,Binary,MemberOnly) \
- Spelling,
-#include "clang/Basic/OperatorKinds.def"
- };
-
OverloadedOperatorKind Kind = Node->getOperator();
if (Kind == OO_PlusPlus || Kind == OO_MinusMinus) {
if (Node->getNumArgs() == 1) {
- OS << OpStrings[Kind] << ' ';
+ OS << getOperatorSpelling(Kind) << ' ';
PrintExpr(Node->getArg(0));
} else {
PrintExpr(Node->getArg(0));
- OS << ' ' << OpStrings[Kind];
+ OS << ' ' << getOperatorSpelling(Kind);
}
} else if (Kind == OO_Arrow) {
PrintExpr(Node->getArg(0));
@@ -1638,11 +1649,11 @@ void StmtPrinter::VisitCXXOperatorCallExpr(CXXOperatorCallExpr *Node) {
PrintExpr(Node->getArg(1));
OS << ']';
} else if (Node->getNumArgs() == 1) {
- OS << OpStrings[Kind] << ' ';
+ OS << getOperatorSpelling(Kind) << ' ';
PrintExpr(Node->getArg(0));
} else if (Node->getNumArgs() == 2) {
PrintExpr(Node->getArg(0));
- OS << ' ' << OpStrings[Kind] << ' ';
+ OS << ' ' << getOperatorSpelling(Kind) << ' ';
PrintExpr(Node->getArg(1));
} else {
llvm_unreachable("unknown overloaded operator");
@@ -1692,6 +1703,14 @@ void StmtPrinter::VisitCXXConstCastExpr(CXXConstCastExpr *Node) {
VisitCXXNamedCastExpr(Node);
}
+void StmtPrinter::VisitBuiltinBitCastExpr(BuiltinBitCastExpr *Node) {
+ OS << "__builtin_bit_cast(";
+ Node->getTypeInfoAsWritten()->getType().print(OS, Policy);
+ OS << ", ";
+ PrintExpr(Node->getSubExpr());
+ OS << ")";
+}
+
void StmtPrinter::VisitCXXTypeidExpr(CXXTypeidExpr *Node) {
OS << "typeid(";
if (Node->isTypeOperand()) {
@@ -1896,13 +1915,22 @@ void StmtPrinter::VisitLambdaExpr(LambdaExpr *Node) {
llvm_unreachable("VLA type in explicit captures.");
}
+ if (C->isPackExpansion())
+ OS << "...";
+
if (Node->isInitCapture(C))
PrintExpr(C->getCapturedVar()->getInit());
}
OS << ']';
+ if (!Node->getExplicitTemplateParameters().empty()) {
+ Node->getTemplateParameterList()->print(
+ OS, Node->getLambdaClass()->getASTContext(),
+ /*OmitTemplateKW*/true);
+ }
+
if (Node->hasExplicitParameters()) {
- OS << " (";
+ OS << '(';
CXXMethodDecl *Method = Node->getCallOperator();
NeedComma = false;
for (const auto *P : Method->parameters()) {
@@ -1937,9 +1965,11 @@ void StmtPrinter::VisitLambdaExpr(LambdaExpr *Node) {
}
// Print the body.
- CompoundStmt *Body = Node->getBody();
OS << ' ';
- PrintStmt(Body);
+ if (Policy.TerseOutput)
+ OS << "{}";
+ else
+ PrintRawCompoundStmt(Node->getBody());
}
void StmtPrinter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *Node) {
@@ -1969,10 +1999,11 @@ void StmtPrinter::VisitCXXNewExpr(CXXNewExpr *E) {
if (E->isParenTypeId())
OS << "(";
std::string TypeS;
- if (Expr *Size = E->getArraySize()) {
+ if (Optional<Expr *> Size = E->getArraySize()) {
llvm::raw_string_ostream s(TypeS);
s << '[';
- Size->printPretty(s, Helper, Policy);
+ if (*Size)
+ (*Size)->printPretty(s, Helper, Policy);
s << ']';
}
E->getAllocatedType().print(OS, Policy, TypeS);
@@ -2380,12 +2411,21 @@ void Stmt::dumpPretty(const ASTContext &Context) const {
printPretty(llvm::errs(), nullptr, PrintingPolicy(Context.getLangOpts()));
}
-void Stmt::printPretty(raw_ostream &OS, PrinterHelper *Helper,
+void Stmt::printPretty(raw_ostream &Out, PrinterHelper *Helper,
const PrintingPolicy &Policy, unsigned Indentation,
- StringRef NL,
- const ASTContext *Context) const {
- StmtPrinter P(OS, Helper, Policy, Indentation, NL, Context);
- P.Visit(const_cast<Stmt*>(this));
+ StringRef NL, const ASTContext *Context) const {
+ StmtPrinter P(Out, Helper, Policy, Indentation, NL, Context);
+ P.Visit(const_cast<Stmt *>(this));
+}
+
+void Stmt::printJson(raw_ostream &Out, PrinterHelper *Helper,
+ const PrintingPolicy &Policy, bool AddQuotes) const {
+ std::string Buf;
+ llvm::raw_string_ostream TempOut(Buf);
+
+ printPretty(TempOut, Helper, Policy);
+
+ Out << JsonFormat(TempOut.str(), AddQuotes);
}
//===----------------------------------------------------------------------===//
diff --git a/lib/AST/StmtProfile.cpp b/lib/AST/StmtProfile.cpp
index ec4dac03d497..f92c3dc60ba5 100644
--- a/lib/AST/StmtProfile.cpp
+++ b/lib/AST/StmtProfile.cpp
@@ -1,9 +1,8 @@
//===---- StmtProfile.cpp - Profile implementation for Stmt ASTs ----------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -322,6 +321,9 @@ void StmtProfiler::VisitGCCAsmStmt(const GCCAsmStmt *S) {
ID.AddInteger(S->getNumClobbers());
for (unsigned I = 0, N = S->getNumClobbers(); I != N; ++I)
VisitStringLiteral(S->getClobberStringLiteral(I));
+ ID.AddInteger(S->getNumLabels());
+ for (auto *L : S->labels())
+ VisitDecl(L->getLabel());
}
void StmtProfiler::VisitMSAsmStmt(const MSAsmStmt *S) {
@@ -458,6 +460,11 @@ void OMPClauseProfiler::VisitOMPSimdlenClause(const OMPSimdlenClause *C) {
Profiler->VisitStmt(C->getSimdlen());
}
+void OMPClauseProfiler::VisitOMPAllocatorClause(const OMPAllocatorClause *C) {
+ if (C->getAllocator())
+ Profiler->VisitStmt(C->getAllocator());
+}
+
void OMPClauseProfiler::VisitOMPCollapseClause(const OMPCollapseClause *C) {
if (C->getNumForLoops())
Profiler->VisitStmt(C->getNumForLoops());
@@ -712,6 +719,11 @@ void OMPClauseProfiler::VisitOMPDeviceClause(const OMPDeviceClause *C) {
void OMPClauseProfiler::VisitOMPMapClause(const OMPMapClause *C) {
VisitOMPClauseList(C);
}
+void OMPClauseProfiler::VisitOMPAllocateClause(const OMPAllocateClause *C) {
+ if (Expr *Allocator = C->getAllocator())
+ Profiler->VisitStmt(Allocator);
+ VisitOMPClauseList(C);
+}
void OMPClauseProfiler::VisitOMPNumTeamsClause(const OMPNumTeamsClause *C) {
VistOMPClauseWithPreInit(C);
if (C->getNumTeams())
@@ -1260,13 +1272,14 @@ void StmtProfiler::VisitBlockExpr(const BlockExpr *S) {
void StmtProfiler::VisitGenericSelectionExpr(const GenericSelectionExpr *S) {
VisitExpr(S);
- for (unsigned i = 0; i != S->getNumAssocs(); ++i) {
- QualType T = S->getAssocType(i);
+ for (const GenericSelectionExpr::ConstAssociation &Assoc :
+ S->associations()) {
+ QualType T = Assoc.getType();
if (T.isNull())
ID.AddPointer(nullptr);
else
VisitType(T);
- VisitExpr(S->getAssocExpr(i));
+ VisitExpr(Assoc.getAssociationExpr());
}
}
@@ -1556,6 +1569,11 @@ void StmtProfiler::VisitCXXConstCastExpr(const CXXConstCastExpr *S) {
VisitCXXNamedCastExpr(S);
}
+void StmtProfiler::VisitBuiltinBitCastExpr(const BuiltinBitCastExpr *S) {
+ VisitExpr(S);
+ VisitType(S->getTypeInfoAsWritten()->getType());
+}
+
void StmtProfiler::VisitUserDefinedLiteral(const UserDefinedLiteral *S) {
VisitCallExpr(S);
}
@@ -1872,6 +1890,10 @@ void StmtProfiler::VisitTypoExpr(const TypoExpr *E) {
VisitExpr(E);
}
+void StmtProfiler::VisitSourceLocExpr(const SourceLocExpr *E) {
+ VisitExpr(E);
+}
+
void StmtProfiler::VisitObjCStringLiteral(const ObjCStringLiteral *S) {
VisitExpr(S);
}
diff --git a/lib/AST/StmtViz.cpp b/lib/AST/StmtViz.cpp
index 8be287e7cb21..4eb0da8a0e10 100644
--- a/lib/AST/StmtViz.cpp
+++ b/lib/AST/StmtViz.cpp
@@ -1,9 +1,8 @@
//===--- StmtViz.cpp - Graphviz visualization for Stmt ASTs -----*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/AST/TemplateBase.cpp b/lib/AST/TemplateBase.cpp
index a78927d229b9..cb4cbd2f76a1 100644
--- a/lib/AST/TemplateBase.cpp
+++ b/lib/AST/TemplateBase.cpp
@@ -1,9 +1,8 @@
//===- TemplateBase.cpp - Common template AST class implementation --------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/AST/TemplateName.cpp b/lib/AST/TemplateName.cpp
index 0a7a6bc3c6a7..06e1dcec7449 100644
--- a/lib/AST/TemplateName.cpp
+++ b/lib/AST/TemplateName.cpp
@@ -1,9 +1,8 @@
//===- TemplateName.cpp - C++ Template Name Representation ----------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -67,6 +66,8 @@ TemplateName::TemplateName(void *Ptr) {
TemplateName::TemplateName(TemplateDecl *Template) : Storage(Template) {}
TemplateName::TemplateName(OverloadedTemplateStorage *Storage)
: Storage(Storage) {}
+TemplateName::TemplateName(AssumedTemplateStorage *Storage)
+ : Storage(Storage) {}
TemplateName::TemplateName(SubstTemplateTemplateParmStorage *Storage)
: Storage(Storage) {}
TemplateName::TemplateName(SubstTemplateTemplateParmPackStorage *Storage)
@@ -88,6 +89,8 @@ TemplateName::NameKind TemplateName::getKind() const {
= Storage.get<UncommonTemplateNameStorage*>();
if (uncommon->getAsOverloadedStorage())
return OverloadedTemplate;
+ if (uncommon->getAsAssumedTemplateName())
+ return AssumedTemplate;
if (uncommon->getAsSubstTemplateTemplateParm())
return SubstTemplateTemplateParm;
return SubstTemplateTemplateParmPack;
@@ -114,6 +117,14 @@ OverloadedTemplateStorage *TemplateName::getAsOverloadedTemplate() const {
return nullptr;
}
+AssumedTemplateStorage *TemplateName::getAsAssumedTemplateName() const {
+ if (UncommonTemplateNameStorage *Uncommon =
+ Storage.dyn_cast<UncommonTemplateNameStorage *>())
+ return Uncommon->getAsAssumedTemplateName();
+
+ return nullptr;
+}
+
SubstTemplateTemplateParmStorage *
TemplateName::getAsSubstTemplateTemplateParm() const {
if (UncommonTemplateNameStorage *uncommon =
@@ -231,7 +242,9 @@ TemplateName::print(raw_ostream &OS, const PrintingPolicy &Policy,
} else if (SubstTemplateTemplateParmPackStorage *SubstPack
= getAsSubstTemplateTemplateParmPack())
OS << *SubstPack->getParameterPack();
- else {
+ else if (AssumedTemplateStorage *Assumed = getAsAssumedTemplateName()) {
+ Assumed->getDeclName().print(OS, Policy);
+ } else {
OverloadedTemplateStorage *OTS = getAsOverloadedTemplate();
(*OTS->begin())->printName(OS);
}
@@ -251,6 +264,20 @@ const DiagnosticBuilder &clang::operator<<(const DiagnosticBuilder &DB,
return DB << NameStr;
}
+const PartialDiagnostic&clang::operator<<(const PartialDiagnostic &PD,
+ TemplateName N) {
+ std::string NameStr;
+ llvm::raw_string_ostream OS(NameStr);
+ LangOptions LO;
+ LO.CPlusPlus = true;
+ LO.Bool = true;
+ OS << '\'';
+ N.print(OS, PrintingPolicy(LO));
+ OS << '\'';
+ OS.flush();
+ return PD << NameStr;
+}
+
void TemplateName::dump(raw_ostream &OS) const {
LangOptions LO; // FIXME!
LO.CPlusPlus = true;
diff --git a/lib/AST/TextNodeDumper.cpp b/lib/AST/TextNodeDumper.cpp
index b51a9006226a..cba9091b1065 100644
--- a/lib/AST/TextNodeDumper.cpp
+++ b/lib/AST/TextNodeDumper.cpp
@@ -1,9 +1,8 @@
//===--- TextNodeDumper.cpp - Printing of AST nodes -----------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -122,6 +121,9 @@ void TextNodeDumper::Visit(const Stmt *Node) {
dumpPointer(Node);
dumpSourceRange(Node->getSourceRange());
+ if (Node->isOMPStructuredBlock())
+ OS << " openmp_structured_block";
+
if (const auto *E = dyn_cast<Expr>(Node)) {
dumpType(E->getType());
@@ -221,6 +223,7 @@ void TextNodeDumper::Visit(const Decl *D) {
return;
}
+ Context = &D->getASTContext();
{
ColorScope Color(OS, ShowColors, DeclKindNameColor);
OS << D->getDeclKindName() << "Decl";
@@ -253,9 +256,25 @@ void TextNodeDumper::Visit(const Decl *D) {
if (D->isInvalidDecl())
OS << " invalid";
- if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
- if (FD->isConstexpr())
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ if (FD->isConstexprSpecified())
OS << " constexpr";
+ if (FD->isConsteval())
+ OS << " consteval";
+ }
+
+ if (!isa<FunctionDecl>(*D)) {
+ const auto *MD = dyn_cast<ObjCMethodDecl>(D);
+ if (!MD || !MD->isThisDeclarationADefinition()) {
+ const auto *DC = dyn_cast<DeclContext>(D);
+ if (DC && DC->hasExternalLexicalStorage()) {
+ ColorScope Color(OS, ShowColors, UndeserializedColor);
+ OS << " <undeserialized declarations>";
+ }
+ }
+ }
+
+ ConstDeclVisitor<TextNodeDumper>::Visit(D);
}
void TextNodeDumper::Visit(const CXXCtorInitializer *Init) {
@@ -302,6 +321,19 @@ void TextNodeDumper::Visit(const OMPClause *C) {
OS << " <implicit>";
}
+void TextNodeDumper::Visit(const GenericSelectionExpr::ConstAssociation &A) {
+ const TypeSourceInfo *TSI = A.getTypeSourceInfo();
+ if (TSI) {
+ OS << "case ";
+ dumpType(TSI->getType());
+ } else {
+ OS << "default";
+ }
+
+ if (A.isSelected())
+ OS << " selected";
+}
+
void TextNodeDumper::dumpPointer(const void *Ptr) {
ColorScope Color(OS, ShowColors, AddressColor);
OS << ' ' << Ptr;
@@ -416,12 +448,6 @@ void TextNodeDumper::dumpAccessSpecifier(AccessSpecifier AS) {
}
}
-void TextNodeDumper::dumpCXXTemporary(const CXXTemporary *Temporary) {
- OS << "(CXXTemporary";
- dumpPointer(Temporary);
- OS << ")";
-}
-
void TextNodeDumper::dumpDeclRef(const Decl *D, StringRef Label) {
if (!D)
return;
@@ -658,6 +684,14 @@ void TextNodeDumper::VisitCaseStmt(const CaseStmt *Node) {
OS << " gnu_range";
}
+void TextNodeDumper::VisitConstantExpr(const ConstantExpr *Node) {
+ if (Node->getResultAPValueKind() != APValue::None) {
+ ColorScope Color(OS, ShowColors, ValueColor);
+ OS << " ";
+ Node->getAPValueResult().printPretty(OS, *Context, Node->getType());
+ }
+}
+
void TextNodeDumper::VisitCallExpr(const CallExpr *Node) {
if (Node->usesADL())
OS << " adl";
@@ -687,6 +721,12 @@ void TextNodeDumper::VisitDeclRefExpr(const DeclRefExpr *Node) {
dumpBareDeclRef(Node->getFoundDecl());
OS << ")";
}
+ switch (Node->isNonOdrUse()) {
+ case NOUR_None: break;
+ case NOUR_Unevaluated: OS << " non_odr_use_unevaluated"; break;
+ case NOUR_Constant: OS << " non_odr_use_constant"; break;
+ case NOUR_Discarded: OS << " non_odr_use_discarded"; break;
+ }
}
void TextNodeDumper::VisitUnresolvedLookupExpr(
@@ -753,6 +793,11 @@ void TextNodeDumper::VisitInitListExpr(const InitListExpr *ILE) {
}
}
+void TextNodeDumper::VisitGenericSelectionExpr(const GenericSelectionExpr *E) {
+ if (E->isResultDependent())
+ OS << " result_dependent";
+}
+
void TextNodeDumper::VisitUnaryOperator(const UnaryOperator *Node) {
OS << " " << (Node->isPostfix() ? "postfix" : "prefix") << " '"
<< UnaryOperator::getOpcodeStr(Node->getOpcode()) << "'";
@@ -786,6 +831,12 @@ void TextNodeDumper::VisitUnaryExprOrTypeTraitExpr(
void TextNodeDumper::VisitMemberExpr(const MemberExpr *Node) {
OS << " " << (Node->isArrow() ? "->" : ".") << *Node->getMemberDecl();
dumpPointer(Node->getMemberDecl());
+ switch (Node->isNonOdrUse()) {
+ case NOUR_None: break;
+ case NOUR_Unevaluated: OS << " non_odr_use_unevaluated"; break;
+ case NOUR_Constant: OS << " non_odr_use_constant"; break;
+ case NOUR_Discarded: OS << " non_odr_use_discarded"; break;
+ }
}
void TextNodeDumper::VisitExtVectorElementExpr(
@@ -824,6 +875,8 @@ void TextNodeDumper::VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *Node) {
}
void TextNodeDumper::VisitCXXThisExpr(const CXXThisExpr *Node) {
+ if (Node->isImplicit())
+ OS << " implicit";
OS << " this";
}
@@ -855,8 +908,9 @@ void TextNodeDumper::VisitCXXConstructExpr(const CXXConstructExpr *Node) {
void TextNodeDumper::VisitCXXBindTemporaryExpr(
const CXXBindTemporaryExpr *Node) {
- OS << " ";
- dumpCXXTemporary(Node->getTemporary());
+ OS << " (CXXTemporary";
+ dumpPointer(Node);
+ OS << ")";
}
void TextNodeDumper::VisitCXXNewExpr(const CXXNewExpr *Node) {
@@ -1096,6 +1150,8 @@ void TextNodeDumper::VisitFunctionProtoType(const FunctionProtoType *T) {
OS << " volatile";
if (T->isRestrict())
OS << " restrict";
+ if (T->getExtProtoInfo().Variadic)
+ OS << " variadic";
switch (EPI.RefQualifier) {
case RQ_None:
break;
@@ -1166,3 +1222,721 @@ void TextNodeDumper::VisitPackExpansionType(const PackExpansionType *T) {
if (auto N = T->getNumExpansions())
OS << " expansions " << *N;
}
+
+void TextNodeDumper::VisitLabelDecl(const LabelDecl *D) { dumpName(D); }
+
+void TextNodeDumper::VisitTypedefDecl(const TypedefDecl *D) {
+ dumpName(D);
+ dumpType(D->getUnderlyingType());
+ if (D->isModulePrivate())
+ OS << " __module_private__";
+}
+
+void TextNodeDumper::VisitEnumDecl(const EnumDecl *D) {
+ if (D->isScoped()) {
+ if (D->isScopedUsingClassTag())
+ OS << " class";
+ else
+ OS << " struct";
+ }
+ dumpName(D);
+ if (D->isModulePrivate())
+ OS << " __module_private__";
+ if (D->isFixed())
+ dumpType(D->getIntegerType());
+}
+
+void TextNodeDumper::VisitRecordDecl(const RecordDecl *D) {
+ OS << ' ' << D->getKindName();
+ dumpName(D);
+ if (D->isModulePrivate())
+ OS << " __module_private__";
+ if (D->isCompleteDefinition())
+ OS << " definition";
+}
+
+void TextNodeDumper::VisitEnumConstantDecl(const EnumConstantDecl *D) {
+ dumpName(D);
+ dumpType(D->getType());
+}
+
+void TextNodeDumper::VisitIndirectFieldDecl(const IndirectFieldDecl *D) {
+ dumpName(D);
+ dumpType(D->getType());
+
+ for (const auto *Child : D->chain())
+ dumpDeclRef(Child);
+}
+
+void TextNodeDumper::VisitFunctionDecl(const FunctionDecl *D) {
+ dumpName(D);
+ dumpType(D->getType());
+
+ StorageClass SC = D->getStorageClass();
+ if (SC != SC_None)
+ OS << ' ' << VarDecl::getStorageClassSpecifierString(SC);
+ if (D->isInlineSpecified())
+ OS << " inline";
+ if (D->isVirtualAsWritten())
+ OS << " virtual";
+ if (D->isModulePrivate())
+ OS << " __module_private__";
+
+ if (D->isPure())
+ OS << " pure";
+ if (D->isDefaulted()) {
+ OS << " default";
+ if (D->isDeleted())
+ OS << "_delete";
+ }
+ if (D->isDeletedAsWritten())
+ OS << " delete";
+ if (D->isTrivial())
+ OS << " trivial";
+
+ if (const auto *FPT = D->getType()->getAs<FunctionProtoType>()) {
+ FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
+ switch (EPI.ExceptionSpec.Type) {
+ default:
+ break;
+ case EST_Unevaluated:
+ OS << " noexcept-unevaluated " << EPI.ExceptionSpec.SourceDecl;
+ break;
+ case EST_Uninstantiated:
+ OS << " noexcept-uninstantiated " << EPI.ExceptionSpec.SourceTemplate;
+ break;
+ }
+ }
+
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(D)) {
+ if (MD->size_overridden_methods() != 0) {
+ auto dumpOverride = [=](const CXXMethodDecl *D) {
+ SplitQualType T_split = D->getType().split();
+ OS << D << " " << D->getParent()->getName()
+ << "::" << D->getNameAsString() << " '"
+ << QualType::getAsString(T_split, PrintPolicy) << "'";
+ };
+
+ AddChild([=] {
+ auto Overrides = MD->overridden_methods();
+ OS << "Overrides: [ ";
+ dumpOverride(*Overrides.begin());
+ for (const auto *Override :
+ llvm::make_range(Overrides.begin() + 1, Overrides.end())) {
+ OS << ", ";
+ dumpOverride(Override);
+ }
+ OS << " ]";
+ });
+ }
+ }
+
+ // Since NumParams comes from the FunctionProtoType of the FunctionDecl and
+ // the Params are set later, it is possible for a dump during debugging to
+ // encounter a FunctionDecl that has been created but hasn't been assigned
+ // ParmVarDecls yet.
+ if (!D->param_empty() && !D->param_begin())
+ OS << " <<<NULL params x " << D->getNumParams() << ">>>";
+}
+
+void TextNodeDumper::VisitFieldDecl(const FieldDecl *D) {
+ dumpName(D);
+ dumpType(D->getType());
+ if (D->isMutable())
+ OS << " mutable";
+ if (D->isModulePrivate())
+ OS << " __module_private__";
+}
+
+void TextNodeDumper::VisitVarDecl(const VarDecl *D) {
+ dumpName(D);
+ dumpType(D->getType());
+ StorageClass SC = D->getStorageClass();
+ if (SC != SC_None)
+ OS << ' ' << VarDecl::getStorageClassSpecifierString(SC);
+ switch (D->getTLSKind()) {
+ case VarDecl::TLS_None:
+ break;
+ case VarDecl::TLS_Static:
+ OS << " tls";
+ break;
+ case VarDecl::TLS_Dynamic:
+ OS << " tls_dynamic";
+ break;
+ }
+ if (D->isModulePrivate())
+ OS << " __module_private__";
+ if (D->isNRVOVariable())
+ OS << " nrvo";
+ if (D->isInline())
+ OS << " inline";
+ if (D->isConstexpr())
+ OS << " constexpr";
+ if (D->hasInit()) {
+ switch (D->getInitStyle()) {
+ case VarDecl::CInit:
+ OS << " cinit";
+ break;
+ case VarDecl::CallInit:
+ OS << " callinit";
+ break;
+ case VarDecl::ListInit:
+ OS << " listinit";
+ break;
+ }
+ }
+ if (D->isParameterPack())
+ OS << " pack";
+}
+
+void TextNodeDumper::VisitBindingDecl(const BindingDecl *D) {
+ dumpName(D);
+ dumpType(D->getType());
+}
+
+void TextNodeDumper::VisitCapturedDecl(const CapturedDecl *D) {
+ if (D->isNothrow())
+ OS << " nothrow";
+}
+
+void TextNodeDumper::VisitImportDecl(const ImportDecl *D) {
+ OS << ' ' << D->getImportedModule()->getFullModuleName();
+
+ for (Decl *InitD :
+ D->getASTContext().getModuleInitializers(D->getImportedModule()))
+ dumpDeclRef(InitD, "initializer");
+}
+
+void TextNodeDumper::VisitPragmaCommentDecl(const PragmaCommentDecl *D) {
+ OS << ' ';
+ switch (D->getCommentKind()) {
+ case PCK_Unknown:
+ llvm_unreachable("unexpected pragma comment kind");
+ case PCK_Compiler:
+ OS << "compiler";
+ break;
+ case PCK_ExeStr:
+ OS << "exestr";
+ break;
+ case PCK_Lib:
+ OS << "lib";
+ break;
+ case PCK_Linker:
+ OS << "linker";
+ break;
+ case PCK_User:
+ OS << "user";
+ break;
+ }
+ StringRef Arg = D->getArg();
+ if (!Arg.empty())
+ OS << " \"" << Arg << "\"";
+}
+
+void TextNodeDumper::VisitPragmaDetectMismatchDecl(
+ const PragmaDetectMismatchDecl *D) {
+ OS << " \"" << D->getName() << "\" \"" << D->getValue() << "\"";
+}
+
+void TextNodeDumper::VisitOMPExecutableDirective(
+ const OMPExecutableDirective *D) {
+ if (D->isStandaloneDirective())
+ OS << " openmp_standalone_directive";
+}
+
+void TextNodeDumper::VisitOMPDeclareReductionDecl(
+ const OMPDeclareReductionDecl *D) {
+ dumpName(D);
+ dumpType(D->getType());
+ OS << " combiner";
+ dumpPointer(D->getCombiner());
+ if (const auto *Initializer = D->getInitializer()) {
+ OS << " initializer";
+ dumpPointer(Initializer);
+ switch (D->getInitializerKind()) {
+ case OMPDeclareReductionDecl::DirectInit:
+ OS << " omp_priv = ";
+ break;
+ case OMPDeclareReductionDecl::CopyInit:
+ OS << " omp_priv ()";
+ break;
+ case OMPDeclareReductionDecl::CallInit:
+ break;
+ }
+ }
+}
+
+void TextNodeDumper::VisitOMPRequiresDecl(const OMPRequiresDecl *D) {
+ for (const auto *C : D->clauselists()) {
+ AddChild([=] {
+ if (!C) {
+ ColorScope Color(OS, ShowColors, NullColor);
+ OS << "<<<NULL>>> OMPClause";
+ return;
+ }
+ {
+ ColorScope Color(OS, ShowColors, AttrColor);
+ StringRef ClauseName(getOpenMPClauseName(C->getClauseKind()));
+ OS << "OMP" << ClauseName.substr(/*Start=*/0, /*N=*/1).upper()
+ << ClauseName.drop_front() << "Clause";
+ }
+ dumpPointer(C);
+ dumpSourceRange(SourceRange(C->getBeginLoc(), C->getEndLoc()));
+ });
+ }
+}
+
+void TextNodeDumper::VisitOMPCapturedExprDecl(const OMPCapturedExprDecl *D) {
+ dumpName(D);
+ dumpType(D->getType());
+}
+
+void TextNodeDumper::VisitNamespaceDecl(const NamespaceDecl *D) {
+ dumpName(D);
+ if (D->isInline())
+ OS << " inline";
+ if (!D->isOriginalNamespace())
+ dumpDeclRef(D->getOriginalNamespace(), "original");
+}
+
+void TextNodeDumper::VisitUsingDirectiveDecl(const UsingDirectiveDecl *D) {
+ OS << ' ';
+ dumpBareDeclRef(D->getNominatedNamespace());
+}
+
+void TextNodeDumper::VisitNamespaceAliasDecl(const NamespaceAliasDecl *D) {
+ dumpName(D);
+ dumpDeclRef(D->getAliasedNamespace());
+}
+
+void TextNodeDumper::VisitTypeAliasDecl(const TypeAliasDecl *D) {
+ dumpName(D);
+ dumpType(D->getUnderlyingType());
+}
+
+void TextNodeDumper::VisitTypeAliasTemplateDecl(
+ const TypeAliasTemplateDecl *D) {
+ dumpName(D);
+}
+
+void TextNodeDumper::VisitCXXRecordDecl(const CXXRecordDecl *D) {
+ VisitRecordDecl(D);
+ if (!D->isCompleteDefinition())
+ return;
+
+ AddChild([=] {
+ {
+ ColorScope Color(OS, ShowColors, DeclKindNameColor);
+ OS << "DefinitionData";
+ }
+#define FLAG(fn, name) \
+ if (D->fn()) \
+ OS << " " #name;
+ FLAG(isParsingBaseSpecifiers, parsing_base_specifiers);
+
+ FLAG(isGenericLambda, generic);
+ FLAG(isLambda, lambda);
+
+ FLAG(canPassInRegisters, pass_in_registers);
+ FLAG(isEmpty, empty);
+ FLAG(isAggregate, aggregate);
+ FLAG(isStandardLayout, standard_layout);
+ FLAG(isTriviallyCopyable, trivially_copyable);
+ FLAG(isPOD, pod);
+ FLAG(isTrivial, trivial);
+ FLAG(isPolymorphic, polymorphic);
+ FLAG(isAbstract, abstract);
+ FLAG(isLiteral, literal);
+
+ FLAG(hasUserDeclaredConstructor, has_user_declared_ctor);
+ FLAG(hasConstexprNonCopyMoveConstructor, has_constexpr_non_copy_move_ctor);
+ FLAG(hasMutableFields, has_mutable_fields);
+ FLAG(hasVariantMembers, has_variant_members);
+ FLAG(allowConstDefaultInit, can_const_default_init);
+
+ AddChild([=] {
+ {
+ ColorScope Color(OS, ShowColors, DeclKindNameColor);
+ OS << "DefaultConstructor";
+ }
+ FLAG(hasDefaultConstructor, exists);
+ FLAG(hasTrivialDefaultConstructor, trivial);
+ FLAG(hasNonTrivialDefaultConstructor, non_trivial);
+ FLAG(hasUserProvidedDefaultConstructor, user_provided);
+ FLAG(hasConstexprDefaultConstructor, constexpr);
+ FLAG(needsImplicitDefaultConstructor, needs_implicit);
+ FLAG(defaultedDefaultConstructorIsConstexpr, defaulted_is_constexpr);
+ });
+
+ AddChild([=] {
+ {
+ ColorScope Color(OS, ShowColors, DeclKindNameColor);
+ OS << "CopyConstructor";
+ }
+ FLAG(hasSimpleCopyConstructor, simple);
+ FLAG(hasTrivialCopyConstructor, trivial);
+ FLAG(hasNonTrivialCopyConstructor, non_trivial);
+ FLAG(hasUserDeclaredCopyConstructor, user_declared);
+ FLAG(hasCopyConstructorWithConstParam, has_const_param);
+ FLAG(needsImplicitCopyConstructor, needs_implicit);
+ FLAG(needsOverloadResolutionForCopyConstructor,
+ needs_overload_resolution);
+ if (!D->needsOverloadResolutionForCopyConstructor())
+ FLAG(defaultedCopyConstructorIsDeleted, defaulted_is_deleted);
+ FLAG(implicitCopyConstructorHasConstParam, implicit_has_const_param);
+ });
+
+ AddChild([=] {
+ {
+ ColorScope Color(OS, ShowColors, DeclKindNameColor);
+ OS << "MoveConstructor";
+ }
+ FLAG(hasMoveConstructor, exists);
+ FLAG(hasSimpleMoveConstructor, simple);
+ FLAG(hasTrivialMoveConstructor, trivial);
+ FLAG(hasNonTrivialMoveConstructor, non_trivial);
+ FLAG(hasUserDeclaredMoveConstructor, user_declared);
+ FLAG(needsImplicitMoveConstructor, needs_implicit);
+ FLAG(needsOverloadResolutionForMoveConstructor,
+ needs_overload_resolution);
+ if (!D->needsOverloadResolutionForMoveConstructor())
+ FLAG(defaultedMoveConstructorIsDeleted, defaulted_is_deleted);
+ });
+
+ AddChild([=] {
+ {
+ ColorScope Color(OS, ShowColors, DeclKindNameColor);
+ OS << "CopyAssignment";
+ }
+ FLAG(hasTrivialCopyAssignment, trivial);
+ FLAG(hasNonTrivialCopyAssignment, non_trivial);
+ FLAG(hasCopyAssignmentWithConstParam, has_const_param);
+ FLAG(hasUserDeclaredCopyAssignment, user_declared);
+ FLAG(needsImplicitCopyAssignment, needs_implicit);
+ FLAG(needsOverloadResolutionForCopyAssignment, needs_overload_resolution);
+ FLAG(implicitCopyAssignmentHasConstParam, implicit_has_const_param);
+ });
+
+ AddChild([=] {
+ {
+ ColorScope Color(OS, ShowColors, DeclKindNameColor);
+ OS << "MoveAssignment";
+ }
+ FLAG(hasMoveAssignment, exists);
+ FLAG(hasSimpleMoveAssignment, simple);
+ FLAG(hasTrivialMoveAssignment, trivial);
+ FLAG(hasNonTrivialMoveAssignment, non_trivial);
+ FLAG(hasUserDeclaredMoveAssignment, user_declared);
+ FLAG(needsImplicitMoveAssignment, needs_implicit);
+ FLAG(needsOverloadResolutionForMoveAssignment, needs_overload_resolution);
+ });
+
+ AddChild([=] {
+ {
+ ColorScope Color(OS, ShowColors, DeclKindNameColor);
+ OS << "Destructor";
+ }
+ FLAG(hasSimpleDestructor, simple);
+ FLAG(hasIrrelevantDestructor, irrelevant);
+ FLAG(hasTrivialDestructor, trivial);
+ FLAG(hasNonTrivialDestructor, non_trivial);
+ FLAG(hasUserDeclaredDestructor, user_declared);
+ FLAG(needsImplicitDestructor, needs_implicit);
+ FLAG(needsOverloadResolutionForDestructor, needs_overload_resolution);
+ if (!D->needsOverloadResolutionForDestructor())
+ FLAG(defaultedDestructorIsDeleted, defaulted_is_deleted);
+ });
+ });
+
+ for (const auto &I : D->bases()) {
+ AddChild([=] {
+ if (I.isVirtual())
+ OS << "virtual ";
+ dumpAccessSpecifier(I.getAccessSpecifier());
+ dumpType(I.getType());
+ if (I.isPackExpansion())
+ OS << "...";
+ });
+ }
+}
+
+void TextNodeDumper::VisitFunctionTemplateDecl(const FunctionTemplateDecl *D) {
+ dumpName(D);
+}
+
+void TextNodeDumper::VisitClassTemplateDecl(const ClassTemplateDecl *D) {
+ dumpName(D);
+}
+
+void TextNodeDumper::VisitVarTemplateDecl(const VarTemplateDecl *D) {
+ dumpName(D);
+}
+
+void TextNodeDumper::VisitBuiltinTemplateDecl(const BuiltinTemplateDecl *D) {
+ dumpName(D);
+}
+
+void TextNodeDumper::VisitTemplateTypeParmDecl(const TemplateTypeParmDecl *D) {
+ if (D->wasDeclaredWithTypename())
+ OS << " typename";
+ else
+ OS << " class";
+ OS << " depth " << D->getDepth() << " index " << D->getIndex();
+ if (D->isParameterPack())
+ OS << " ...";
+ dumpName(D);
+}
+
+void TextNodeDumper::VisitNonTypeTemplateParmDecl(
+ const NonTypeTemplateParmDecl *D) {
+ dumpType(D->getType());
+ OS << " depth " << D->getDepth() << " index " << D->getIndex();
+ if (D->isParameterPack())
+ OS << " ...";
+ dumpName(D);
+}
+
+void TextNodeDumper::VisitTemplateTemplateParmDecl(
+ const TemplateTemplateParmDecl *D) {
+ OS << " depth " << D->getDepth() << " index " << D->getIndex();
+ if (D->isParameterPack())
+ OS << " ...";
+ dumpName(D);
+}
+
+void TextNodeDumper::VisitUsingDecl(const UsingDecl *D) {
+ OS << ' ';
+ if (D->getQualifier())
+ D->getQualifier()->print(OS, D->getASTContext().getPrintingPolicy());
+ OS << D->getNameAsString();
+}
+
+void TextNodeDumper::VisitUnresolvedUsingTypenameDecl(
+ const UnresolvedUsingTypenameDecl *D) {
+ OS << ' ';
+ if (D->getQualifier())
+ D->getQualifier()->print(OS, D->getASTContext().getPrintingPolicy());
+ OS << D->getNameAsString();
+}
+
+void TextNodeDumper::VisitUnresolvedUsingValueDecl(
+ const UnresolvedUsingValueDecl *D) {
+ OS << ' ';
+ if (D->getQualifier())
+ D->getQualifier()->print(OS, D->getASTContext().getPrintingPolicy());
+ OS << D->getNameAsString();
+ dumpType(D->getType());
+}
+
+void TextNodeDumper::VisitUsingShadowDecl(const UsingShadowDecl *D) {
+ OS << ' ';
+ dumpBareDeclRef(D->getTargetDecl());
+}
+
+void TextNodeDumper::VisitConstructorUsingShadowDecl(
+ const ConstructorUsingShadowDecl *D) {
+ if (D->constructsVirtualBase())
+ OS << " virtual";
+
+ AddChild([=] {
+ OS << "target ";
+ dumpBareDeclRef(D->getTargetDecl());
+ });
+
+ AddChild([=] {
+ OS << "nominated ";
+ dumpBareDeclRef(D->getNominatedBaseClass());
+ OS << ' ';
+ dumpBareDeclRef(D->getNominatedBaseClassShadowDecl());
+ });
+
+ AddChild([=] {
+ OS << "constructed ";
+ dumpBareDeclRef(D->getConstructedBaseClass());
+ OS << ' ';
+ dumpBareDeclRef(D->getConstructedBaseClassShadowDecl());
+ });
+}
+
+void TextNodeDumper::VisitLinkageSpecDecl(const LinkageSpecDecl *D) {
+ switch (D->getLanguage()) {
+ case LinkageSpecDecl::lang_c:
+ OS << " C";
+ break;
+ case LinkageSpecDecl::lang_cxx:
+ OS << " C++";
+ break;
+ }
+}
+
+void TextNodeDumper::VisitAccessSpecDecl(const AccessSpecDecl *D) {
+ OS << ' ';
+ dumpAccessSpecifier(D->getAccess());
+}
+
+void TextNodeDumper::VisitFriendDecl(const FriendDecl *D) {
+ if (TypeSourceInfo *T = D->getFriendType())
+ dumpType(T->getType());
+}
+
+void TextNodeDumper::VisitObjCIvarDecl(const ObjCIvarDecl *D) {
+ dumpName(D);
+ dumpType(D->getType());
+ if (D->getSynthesize())
+ OS << " synthesize";
+
+ switch (D->getAccessControl()) {
+ case ObjCIvarDecl::None:
+ OS << " none";
+ break;
+ case ObjCIvarDecl::Private:
+ OS << " private";
+ break;
+ case ObjCIvarDecl::Protected:
+ OS << " protected";
+ break;
+ case ObjCIvarDecl::Public:
+ OS << " public";
+ break;
+ case ObjCIvarDecl::Package:
+ OS << " package";
+ break;
+ }
+}
+
+void TextNodeDumper::VisitObjCMethodDecl(const ObjCMethodDecl *D) {
+ if (D->isInstanceMethod())
+ OS << " -";
+ else
+ OS << " +";
+ dumpName(D);
+ dumpType(D->getReturnType());
+
+ if (D->isVariadic())
+ OS << " variadic";
+}
+
+void TextNodeDumper::VisitObjCTypeParamDecl(const ObjCTypeParamDecl *D) {
+ dumpName(D);
+ switch (D->getVariance()) {
+ case ObjCTypeParamVariance::Invariant:
+ break;
+
+ case ObjCTypeParamVariance::Covariant:
+ OS << " covariant";
+ break;
+
+ case ObjCTypeParamVariance::Contravariant:
+ OS << " contravariant";
+ break;
+ }
+
+ if (D->hasExplicitBound())
+ OS << " bounded";
+ dumpType(D->getUnderlyingType());
+}
+
+void TextNodeDumper::VisitObjCCategoryDecl(const ObjCCategoryDecl *D) {
+ dumpName(D);
+ dumpDeclRef(D->getClassInterface());
+ dumpDeclRef(D->getImplementation());
+ for (const auto *P : D->protocols())
+ dumpDeclRef(P);
+}
+
+void TextNodeDumper::VisitObjCCategoryImplDecl(const ObjCCategoryImplDecl *D) {
+ dumpName(D);
+ dumpDeclRef(D->getClassInterface());
+ dumpDeclRef(D->getCategoryDecl());
+}
+
+void TextNodeDumper::VisitObjCProtocolDecl(const ObjCProtocolDecl *D) {
+ dumpName(D);
+
+ for (const auto *Child : D->protocols())
+ dumpDeclRef(Child);
+}
+
+void TextNodeDumper::VisitObjCInterfaceDecl(const ObjCInterfaceDecl *D) {
+ dumpName(D);
+ dumpDeclRef(D->getSuperClass(), "super");
+
+ dumpDeclRef(D->getImplementation());
+ for (const auto *Child : D->protocols())
+ dumpDeclRef(Child);
+}
+
+void TextNodeDumper::VisitObjCImplementationDecl(
+ const ObjCImplementationDecl *D) {
+ dumpName(D);
+ dumpDeclRef(D->getSuperClass(), "super");
+ dumpDeclRef(D->getClassInterface());
+}
+
+void TextNodeDumper::VisitObjCCompatibleAliasDecl(
+ const ObjCCompatibleAliasDecl *D) {
+ dumpName(D);
+ dumpDeclRef(D->getClassInterface());
+}
+
+void TextNodeDumper::VisitObjCPropertyDecl(const ObjCPropertyDecl *D) {
+ dumpName(D);
+ dumpType(D->getType());
+
+ if (D->getPropertyImplementation() == ObjCPropertyDecl::Required)
+ OS << " required";
+ else if (D->getPropertyImplementation() == ObjCPropertyDecl::Optional)
+ OS << " optional";
+
+ ObjCPropertyDecl::PropertyAttributeKind Attrs = D->getPropertyAttributes();
+ if (Attrs != ObjCPropertyDecl::OBJC_PR_noattr) {
+ if (Attrs & ObjCPropertyDecl::OBJC_PR_readonly)
+ OS << " readonly";
+ if (Attrs & ObjCPropertyDecl::OBJC_PR_assign)
+ OS << " assign";
+ if (Attrs & ObjCPropertyDecl::OBJC_PR_readwrite)
+ OS << " readwrite";
+ if (Attrs & ObjCPropertyDecl::OBJC_PR_retain)
+ OS << " retain";
+ if (Attrs & ObjCPropertyDecl::OBJC_PR_copy)
+ OS << " copy";
+ if (Attrs & ObjCPropertyDecl::OBJC_PR_nonatomic)
+ OS << " nonatomic";
+ if (Attrs & ObjCPropertyDecl::OBJC_PR_atomic)
+ OS << " atomic";
+ if (Attrs & ObjCPropertyDecl::OBJC_PR_weak)
+ OS << " weak";
+ if (Attrs & ObjCPropertyDecl::OBJC_PR_strong)
+ OS << " strong";
+ if (Attrs & ObjCPropertyDecl::OBJC_PR_unsafe_unretained)
+ OS << " unsafe_unretained";
+ if (Attrs & ObjCPropertyDecl::OBJC_PR_class)
+ OS << " class";
+ if (Attrs & ObjCPropertyDecl::OBJC_PR_getter)
+ dumpDeclRef(D->getGetterMethodDecl(), "getter");
+ if (Attrs & ObjCPropertyDecl::OBJC_PR_setter)
+ dumpDeclRef(D->getSetterMethodDecl(), "setter");
+ }
+}
+
+void TextNodeDumper::VisitObjCPropertyImplDecl(const ObjCPropertyImplDecl *D) {
+ dumpName(D->getPropertyDecl());
+ if (D->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize)
+ OS << " synthesize";
+ else
+ OS << " dynamic";
+ dumpDeclRef(D->getPropertyDecl());
+ dumpDeclRef(D->getPropertyIvarDecl());
+}
+
+void TextNodeDumper::VisitBlockDecl(const BlockDecl *D) {
+ if (D->isVariadic())
+ OS << " variadic";
+
+ if (D->capturesCXXThis())
+ OS << " captures_this";
+}
+
+void TextNodeDumper::VisitConceptDecl(const ConceptDecl *D) {
+ dumpName(D);
+}
diff --git a/lib/AST/Type.cpp b/lib/AST/Type.cpp
index 0dbc88c04521..ed75a0b5bcd8 100644
--- a/lib/AST/Type.cpp
+++ b/lib/AST/Type.cpp
@@ -1,9 +1,8 @@
//===- Type.cpp - Type representation and manipulation --------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -23,6 +22,7 @@
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
#include "clang/AST/NestedNameSpecifier.h"
+#include "clang/AST/NonTrivialTypeVisitor.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/TemplateBase.h"
#include "clang/AST/TemplateName.h"
@@ -723,25 +723,30 @@ const ObjCObjectPointerType *ObjCObjectPointerType::stripObjCKindOfTypeAndQuals(
return ctx.getObjCObjectPointerType(obj)->castAs<ObjCObjectPointerType>();
}
-template<typename F>
-static QualType simpleTransform(ASTContext &ctx, QualType type, F &&f);
-
namespace {
-/// Visitor used by simpleTransform() to perform the transformation.
-template<typename F>
-struct SimpleTransformVisitor
- : public TypeVisitor<SimpleTransformVisitor<F>, QualType> {
+/// Visitor used to perform a simple type transformation that does not change
+/// the semantics of the type.
+template <typename Derived>
+struct SimpleTransformVisitor : public TypeVisitor<Derived, QualType> {
ASTContext &Ctx;
- F &&TheFunc;
QualType recurse(QualType type) {
- return simpleTransform(Ctx, type, std::move(TheFunc));
+ // Split out the qualifiers from the type.
+ SplitQualType splitType = type.split();
+
+ // Visit the type itself.
+ QualType result = static_cast<Derived *>(this)->Visit(splitType.Ty);
+ if (result.isNull())
+ return result;
+
+ // Reconstruct the transformed type by applying the local qualifiers
+ // from the split type.
+ return Ctx.getQualifiedType(result, splitType.Quals);
}
public:
- SimpleTransformVisitor(ASTContext &ctx, F &&f)
- : Ctx(ctx), TheFunc(std::move(f)) {}
+ explicit SimpleTransformVisitor(ASTContext &ctx) : Ctx(ctx) {}
// None of the clients of this transformation can occur where
// there are dependent types, so skip dependent types.
@@ -752,6 +757,17 @@ public:
#define TRIVIAL_TYPE_CLASS(Class) \
QualType Visit##Class##Type(const Class##Type *T) { return QualType(T, 0); }
+#define SUGARED_TYPE_CLASS(Class) \
+ QualType Visit##Class##Type(const Class##Type *T) { \
+ if (!T->isSugared()) \
+ return QualType(T, 0); \
+ QualType desugaredType = recurse(T->desugar()); \
+ if (desugaredType.isNull()) \
+ return {}; \
+ if (desugaredType.getAsOpaquePtr() == T->desugar().getAsOpaquePtr()) \
+ return QualType(T, 0); \
+ return desugaredType; \
+ }
TRIVIAL_TYPE_CLASS(Builtin)
@@ -955,8 +971,9 @@ public:
return Ctx.getParenType(innerType);
}
- TRIVIAL_TYPE_CLASS(Typedef)
- TRIVIAL_TYPE_CLASS(ObjCTypeParam)
+ SUGARED_TYPE_CLASS(Typedef)
+ SUGARED_TYPE_CLASS(ObjCTypeParam)
+ SUGARED_TYPE_CLASS(MacroQualified)
QualType VisitAdjustedType(const AdjustedType *T) {
QualType originalType = recurse(T->getOriginalType());
@@ -987,15 +1004,15 @@ public:
return Ctx.getDecayedType(originalType);
}
- TRIVIAL_TYPE_CLASS(TypeOfExpr)
- TRIVIAL_TYPE_CLASS(TypeOf)
- TRIVIAL_TYPE_CLASS(Decltype)
- TRIVIAL_TYPE_CLASS(UnaryTransform)
+ SUGARED_TYPE_CLASS(TypeOfExpr)
+ SUGARED_TYPE_CLASS(TypeOf)
+ SUGARED_TYPE_CLASS(Decltype)
+ SUGARED_TYPE_CLASS(UnaryTransform)
TRIVIAL_TYPE_CLASS(Record)
TRIVIAL_TYPE_CLASS(Enum)
// FIXME: Non-trivial to implement, but important for C++
- TRIVIAL_TYPE_CLASS(Elaborated)
+ SUGARED_TYPE_CLASS(Elaborated)
QualType VisitAttributedType(const AttributedType *T) {
QualType modifiedType = recurse(T->getModifiedType());
@@ -1030,7 +1047,7 @@ public:
}
// FIXME: Non-trivial to implement, but important for C++
- TRIVIAL_TYPE_CLASS(TemplateSpecialization)
+ SUGARED_TYPE_CLASS(TemplateSpecialization)
QualType VisitAutoType(const AutoType *T) {
if (!T->isDeduced())
@@ -1049,7 +1066,7 @@ public:
}
// FIXME: Non-trivial to implement, but important for C++
- TRIVIAL_TYPE_CLASS(PackExpansion)
+ SUGARED_TYPE_CLASS(PackExpansion)
QualType VisitObjCObjectType(const ObjCObjectType *T) {
QualType baseType = recurse(T->getBaseType());
@@ -1107,222 +1124,245 @@ public:
}
#undef TRIVIAL_TYPE_CLASS
+#undef SUGARED_TYPE_CLASS
};
-} // namespace
-
-/// Perform a simple type transformation that does not change the
-/// semantics of the type.
-template<typename F>
-static QualType simpleTransform(ASTContext &ctx, QualType type, F &&f) {
- // Transform the type. If it changed, return the transformed result.
- QualType transformed = f(type);
- if (transformed.getAsOpaquePtr() != type.getAsOpaquePtr())
- return transformed;
-
- // Split out the qualifiers from the type.
- SplitQualType splitType = type.split();
-
- // Visit the type itself.
- SimpleTransformVisitor<F> visitor(ctx, std::forward<F>(f));
- QualType result = visitor.Visit(splitType.Ty);
- if (result.isNull())
- return result;
+struct SubstObjCTypeArgsVisitor
+ : public SimpleTransformVisitor<SubstObjCTypeArgsVisitor> {
+ using BaseType = SimpleTransformVisitor<SubstObjCTypeArgsVisitor>;
- // Reconstruct the transformed type by applying the local qualifiers
- // from the split type.
- return ctx.getQualifiedType(result, splitType.Quals);
-}
+ ArrayRef<QualType> TypeArgs;
+ ObjCSubstitutionContext SubstContext;
-/// Substitute the given type arguments for Objective-C type
-/// parameters within the given type, recursively.
-QualType QualType::substObjCTypeArgs(
- ASTContext &ctx,
- ArrayRef<QualType> typeArgs,
- ObjCSubstitutionContext context) const {
- return simpleTransform(ctx, *this,
- [&](QualType type) -> QualType {
- SplitQualType splitType = type.split();
+ SubstObjCTypeArgsVisitor(ASTContext &ctx, ArrayRef<QualType> typeArgs,
+ ObjCSubstitutionContext context)
+ : BaseType(ctx), TypeArgs(typeArgs), SubstContext(context) {}
+ QualType VisitObjCTypeParamType(const ObjCTypeParamType *OTPTy) {
// Replace an Objective-C type parameter reference with the corresponding
// type argument.
- if (const auto *OTPTy = dyn_cast<ObjCTypeParamType>(splitType.Ty)) {
- ObjCTypeParamDecl *typeParam = OTPTy->getDecl();
- // If we have type arguments, use them.
- if (!typeArgs.empty()) {
- QualType argType = typeArgs[typeParam->getIndex()];
- if (OTPTy->qual_empty())
- return ctx.getQualifiedType(argType, splitType.Quals);
-
- // Apply protocol lists if exists.
- bool hasError;
- SmallVector<ObjCProtocolDecl*, 8> protocolsVec;
- protocolsVec.append(OTPTy->qual_begin(),
- OTPTy->qual_end());
- ArrayRef<ObjCProtocolDecl *> protocolsToApply = protocolsVec;
- QualType resultTy = ctx.applyObjCProtocolQualifiers(argType,
- protocolsToApply, hasError, true/*allowOnPointerType*/);
-
- return ctx.getQualifiedType(resultTy, splitType.Quals);
- }
+ ObjCTypeParamDecl *typeParam = OTPTy->getDecl();
+ // If we have type arguments, use them.
+ if (!TypeArgs.empty()) {
+ QualType argType = TypeArgs[typeParam->getIndex()];
+ if (OTPTy->qual_empty())
+ return argType;
+
+ // Apply protocol lists if exists.
+ bool hasError;
+ SmallVector<ObjCProtocolDecl *, 8> protocolsVec;
+ protocolsVec.append(OTPTy->qual_begin(), OTPTy->qual_end());
+ ArrayRef<ObjCProtocolDecl *> protocolsToApply = protocolsVec;
+ return Ctx.applyObjCProtocolQualifiers(
+ argType, protocolsToApply, hasError, true/*allowOnPointerType*/);
+ }
- switch (context) {
- case ObjCSubstitutionContext::Ordinary:
- case ObjCSubstitutionContext::Parameter:
- case ObjCSubstitutionContext::Superclass:
- // Substitute the bound.
- return ctx.getQualifiedType(typeParam->getUnderlyingType(),
- splitType.Quals);
-
- case ObjCSubstitutionContext::Result:
- case ObjCSubstitutionContext::Property: {
- // Substitute the __kindof form of the underlying type.
- const auto *objPtr = typeParam->getUnderlyingType()
- ->castAs<ObjCObjectPointerType>();
-
- // __kindof types, id, and Class don't need an additional
- // __kindof.
- if (objPtr->isKindOfType() || objPtr->isObjCIdOrClassType())
- return ctx.getQualifiedType(typeParam->getUnderlyingType(),
- splitType.Quals);
-
- // Add __kindof.
- const auto *obj = objPtr->getObjectType();
- QualType resultTy = ctx.getObjCObjectType(obj->getBaseType(),
- obj->getTypeArgsAsWritten(),
- obj->getProtocols(),
- /*isKindOf=*/true);
-
- // Rebuild object pointer type.
- resultTy = ctx.getObjCObjectPointerType(resultTy);
- return ctx.getQualifiedType(resultTy, splitType.Quals);
- }
- }
+ switch (SubstContext) {
+ case ObjCSubstitutionContext::Ordinary:
+ case ObjCSubstitutionContext::Parameter:
+ case ObjCSubstitutionContext::Superclass:
+ // Substitute the bound.
+ return typeParam->getUnderlyingType();
+
+ case ObjCSubstitutionContext::Result:
+ case ObjCSubstitutionContext::Property: {
+ // Substitute the __kindof form of the underlying type.
+ const auto *objPtr =
+ typeParam->getUnderlyingType()->castAs<ObjCObjectPointerType>();
+
+ // __kindof types, id, and Class don't need an additional
+ // __kindof.
+ if (objPtr->isKindOfType() || objPtr->isObjCIdOrClassType())
+ return typeParam->getUnderlyingType();
+
+ // Add __kindof.
+ const auto *obj = objPtr->getObjectType();
+ QualType resultTy = Ctx.getObjCObjectType(
+ obj->getBaseType(), obj->getTypeArgsAsWritten(), obj->getProtocols(),
+ /*isKindOf=*/true);
+
+ // Rebuild object pointer type.
+ return Ctx.getObjCObjectPointerType(resultTy);
+ }
+ }
+ llvm_unreachable("Unexpected ObjCSubstitutionContext!");
+ }
+
+ QualType VisitFunctionType(const FunctionType *funcType) {
+ // If we have a function type, update the substitution context
+ // appropriately.
+
+ //Substitute result type.
+ QualType returnType = funcType->getReturnType().substObjCTypeArgs(
+ Ctx, TypeArgs, ObjCSubstitutionContext::Result);
+ if (returnType.isNull())
+ return {};
+
+ // Handle non-prototyped functions, which only substitute into the result
+ // type.
+ if (isa<FunctionNoProtoType>(funcType)) {
+ // If the return type was unchanged, do nothing.
+ if (returnType.getAsOpaquePtr() ==
+ funcType->getReturnType().getAsOpaquePtr())
+ return BaseType::VisitFunctionType(funcType);
+
+ // Otherwise, build a new type.
+ return Ctx.getFunctionNoProtoType(returnType, funcType->getExtInfo());
}
- // If we have a function type, update the context appropriately.
- if (const auto *funcType = dyn_cast<FunctionType>(splitType.Ty)) {
- // Substitute result type.
- QualType returnType = funcType->getReturnType().substObjCTypeArgs(
- ctx,
- typeArgs,
- ObjCSubstitutionContext::Result);
- if (returnType.isNull())
+ const auto *funcProtoType = cast<FunctionProtoType>(funcType);
+
+ // Transform parameter types.
+ SmallVector<QualType, 4> paramTypes;
+ bool paramChanged = false;
+ for (auto paramType : funcProtoType->getParamTypes()) {
+ QualType newParamType = paramType.substObjCTypeArgs(
+ Ctx, TypeArgs, ObjCSubstitutionContext::Parameter);
+ if (newParamType.isNull())
return {};
- // Handle non-prototyped functions, which only substitute into the result
- // type.
- if (isa<FunctionNoProtoType>(funcType)) {
- // If the return type was unchanged, do nothing.
- if (returnType.getAsOpaquePtr()
- == funcType->getReturnType().getAsOpaquePtr())
- return type;
+ if (newParamType.getAsOpaquePtr() != paramType.getAsOpaquePtr())
+ paramChanged = true;
- // Otherwise, build a new type.
- return ctx.getFunctionNoProtoType(returnType, funcType->getExtInfo());
- }
+ paramTypes.push_back(newParamType);
+ }
- const auto *funcProtoType = cast<FunctionProtoType>(funcType);
-
- // Transform parameter types.
- SmallVector<QualType, 4> paramTypes;
- bool paramChanged = false;
- for (auto paramType : funcProtoType->getParamTypes()) {
- QualType newParamType = paramType.substObjCTypeArgs(
- ctx,
- typeArgs,
- ObjCSubstitutionContext::Parameter);
- if (newParamType.isNull())
+ // Transform extended info.
+ FunctionProtoType::ExtProtoInfo info = funcProtoType->getExtProtoInfo();
+ bool exceptionChanged = false;
+ if (info.ExceptionSpec.Type == EST_Dynamic) {
+ SmallVector<QualType, 4> exceptionTypes;
+ for (auto exceptionType : info.ExceptionSpec.Exceptions) {
+ QualType newExceptionType = exceptionType.substObjCTypeArgs(
+ Ctx, TypeArgs, ObjCSubstitutionContext::Ordinary);
+ if (newExceptionType.isNull())
return {};
- if (newParamType.getAsOpaquePtr() != paramType.getAsOpaquePtr())
- paramChanged = true;
+ if (newExceptionType.getAsOpaquePtr() != exceptionType.getAsOpaquePtr())
+ exceptionChanged = true;
- paramTypes.push_back(newParamType);
+ exceptionTypes.push_back(newExceptionType);
}
- // Transform extended info.
- FunctionProtoType::ExtProtoInfo info = funcProtoType->getExtProtoInfo();
- bool exceptionChanged = false;
- if (info.ExceptionSpec.Type == EST_Dynamic) {
- SmallVector<QualType, 4> exceptionTypes;
- for (auto exceptionType : info.ExceptionSpec.Exceptions) {
- QualType newExceptionType = exceptionType.substObjCTypeArgs(
- ctx,
- typeArgs,
- ObjCSubstitutionContext::Ordinary);
- if (newExceptionType.isNull())
- return {};
-
- if (newExceptionType.getAsOpaquePtr()
- != exceptionType.getAsOpaquePtr())
- exceptionChanged = true;
-
- exceptionTypes.push_back(newExceptionType);
- }
-
- if (exceptionChanged) {
- info.ExceptionSpec.Exceptions =
- llvm::makeArrayRef(exceptionTypes).copy(ctx);
- }
+ if (exceptionChanged) {
+ info.ExceptionSpec.Exceptions =
+ llvm::makeArrayRef(exceptionTypes).copy(Ctx);
}
+ }
- if (returnType.getAsOpaquePtr()
- == funcProtoType->getReturnType().getAsOpaquePtr() &&
- !paramChanged && !exceptionChanged)
- return type;
+ if (returnType.getAsOpaquePtr() ==
+ funcProtoType->getReturnType().getAsOpaquePtr() &&
+ !paramChanged && !exceptionChanged)
+ return BaseType::VisitFunctionType(funcType);
- return ctx.getFunctionType(returnType, paramTypes, info);
- }
+ return Ctx.getFunctionType(returnType, paramTypes, info);
+ }
+ QualType VisitObjCObjectType(const ObjCObjectType *objcObjectType) {
// Substitute into the type arguments of a specialized Objective-C object
// type.
- if (const auto *objcObjectType = dyn_cast<ObjCObjectType>(splitType.Ty)) {
- if (objcObjectType->isSpecializedAsWritten()) {
- SmallVector<QualType, 4> newTypeArgs;
- bool anyChanged = false;
- for (auto typeArg : objcObjectType->getTypeArgsAsWritten()) {
- QualType newTypeArg = typeArg.substObjCTypeArgs(
- ctx, typeArgs,
- ObjCSubstitutionContext::Ordinary);
- if (newTypeArg.isNull())
- return {};
-
- if (newTypeArg.getAsOpaquePtr() != typeArg.getAsOpaquePtr()) {
- // If we're substituting based on an unspecialized context type,
- // produce an unspecialized type.
- ArrayRef<ObjCProtocolDecl *> protocols(
- objcObjectType->qual_begin(),
- objcObjectType->getNumProtocols());
- if (typeArgs.empty() &&
- context != ObjCSubstitutionContext::Superclass) {
- return ctx.getObjCObjectType(
- objcObjectType->getBaseType(), {},
- protocols,
- objcObjectType->isKindOfTypeAsWritten());
- }
-
- anyChanged = true;
+ if (objcObjectType->isSpecializedAsWritten()) {
+ SmallVector<QualType, 4> newTypeArgs;
+ bool anyChanged = false;
+ for (auto typeArg : objcObjectType->getTypeArgsAsWritten()) {
+ QualType newTypeArg = typeArg.substObjCTypeArgs(
+ Ctx, TypeArgs, ObjCSubstitutionContext::Ordinary);
+ if (newTypeArg.isNull())
+ return {};
+
+ if (newTypeArg.getAsOpaquePtr() != typeArg.getAsOpaquePtr()) {
+ // If we're substituting based on an unspecialized context type,
+ // produce an unspecialized type.
+ ArrayRef<ObjCProtocolDecl *> protocols(
+ objcObjectType->qual_begin(), objcObjectType->getNumProtocols());
+ if (TypeArgs.empty() &&
+ SubstContext != ObjCSubstitutionContext::Superclass) {
+ return Ctx.getObjCObjectType(
+ objcObjectType->getBaseType(), {}, protocols,
+ objcObjectType->isKindOfTypeAsWritten());
}
- newTypeArgs.push_back(newTypeArg);
+ anyChanged = true;
}
- if (anyChanged) {
- ArrayRef<ObjCProtocolDecl *> protocols(
- objcObjectType->qual_begin(),
- objcObjectType->getNumProtocols());
- return ctx.getObjCObjectType(objcObjectType->getBaseType(),
- newTypeArgs, protocols,
- objcObjectType->isKindOfTypeAsWritten());
- }
+ newTypeArgs.push_back(newTypeArg);
}
- return type;
+ if (anyChanged) {
+ ArrayRef<ObjCProtocolDecl *> protocols(
+ objcObjectType->qual_begin(), objcObjectType->getNumProtocols());
+ return Ctx.getObjCObjectType(objcObjectType->getBaseType(), newTypeArgs,
+ protocols,
+ objcObjectType->isKindOfTypeAsWritten());
+ }
}
- return type;
- });
+ return BaseType::VisitObjCObjectType(objcObjectType);
+ }
+
+ QualType VisitAttributedType(const AttributedType *attrType) {
+ QualType newType = BaseType::VisitAttributedType(attrType);
+ if (newType.isNull())
+ return {};
+
+ const auto *newAttrType = dyn_cast<AttributedType>(newType.getTypePtr());
+ if (!newAttrType || newAttrType->getAttrKind() != attr::ObjCKindOf)
+ return newType;
+
+ // Find out if it's an Objective-C object or object pointer type;
+ QualType newEquivType = newAttrType->getEquivalentType();
+ const ObjCObjectPointerType *ptrType =
+ newEquivType->getAs<ObjCObjectPointerType>();
+ const ObjCObjectType *objType = ptrType
+ ? ptrType->getObjectType()
+ : newEquivType->getAs<ObjCObjectType>();
+ if (!objType)
+ return newType;
+
+ // Rebuild the "equivalent" type, which pushes __kindof down into
+ // the object type.
+ newEquivType = Ctx.getObjCObjectType(
+ objType->getBaseType(), objType->getTypeArgsAsWritten(),
+ objType->getProtocols(),
+ // There is no need to apply kindof on an unqualified id type.
+ /*isKindOf=*/objType->isObjCUnqualifiedId() ? false : true);
+
+ // If we started with an object pointer type, rebuild it.
+ if (ptrType)
+ newEquivType = Ctx.getObjCObjectPointerType(newEquivType);
+
+ // Rebuild the attributed type.
+ return Ctx.getAttributedType(newAttrType->getAttrKind(),
+ newAttrType->getModifiedType(), newEquivType);
+ }
+};
+
+struct StripObjCKindOfTypeVisitor
+ : public SimpleTransformVisitor<StripObjCKindOfTypeVisitor> {
+ using BaseType = SimpleTransformVisitor<StripObjCKindOfTypeVisitor>;
+
+ explicit StripObjCKindOfTypeVisitor(ASTContext &ctx) : BaseType(ctx) {}
+
+ QualType VisitObjCObjectType(const ObjCObjectType *objType) {
+ if (!objType->isKindOfType())
+ return BaseType::VisitObjCObjectType(objType);
+
+ QualType baseType = objType->getBaseType().stripObjCKindOfType(Ctx);
+ return Ctx.getObjCObjectType(baseType, objType->getTypeArgsAsWritten(),
+ objType->getProtocols(),
+ /*isKindOf=*/false);
+ }
+};
+
+} // namespace
+
+/// Substitute the given type arguments for Objective-C type
+/// parameters within the given type, recursively.
+QualType QualType::substObjCTypeArgs(ASTContext &ctx,
+ ArrayRef<QualType> typeArgs,
+ ObjCSubstitutionContext context) const {
+ SubstObjCTypeArgsVisitor visitor(ctx, typeArgs, context);
+ return visitor.recurse(*this);
}
QualType QualType::substObjCMemberType(QualType objectType,
@@ -1337,25 +1377,8 @@ QualType QualType::substObjCMemberType(QualType objectType,
QualType QualType::stripObjCKindOfType(const ASTContext &constCtx) const {
// FIXME: Because ASTContext::getAttributedType() is non-const.
auto &ctx = const_cast<ASTContext &>(constCtx);
- return simpleTransform(ctx, *this,
- [&](QualType type) -> QualType {
- SplitQualType splitType = type.split();
- if (auto *objType = splitType.Ty->getAs<ObjCObjectType>()) {
- if (!objType->isKindOfType())
- return type;
-
- QualType baseType
- = objType->getBaseType().stripObjCKindOfType(ctx);
- return ctx.getQualifiedType(
- ctx.getObjCObjectType(baseType,
- objType->getTypeArgsAsWritten(),
- objType->getProtocols(),
- /*isKindOf=*/false),
- splitType.Quals);
- }
-
- return type;
- });
+ StripObjCKindOfTypeVisitor visitor(ctx);
+ return visitor.recurse(*this);
}
QualType QualType::getAtomicUnqualifiedType() const {
@@ -1713,9 +1736,17 @@ namespace {
return Visit(T->getModifiedType());
}
+ Type *VisitMacroQualifiedType(const MacroQualifiedType *T) {
+ return Visit(T->getUnderlyingType());
+ }
+
Type *VisitAdjustedType(const AdjustedType *T) {
return Visit(T->getOriginalType());
}
+
+ Type *VisitPackExpansionType(const PackExpansionType *T) {
+ return Visit(T->getPattern());
+ }
};
} // namespace
@@ -2245,6 +2276,18 @@ bool QualType::isNonWeakInMRRWithObjCWeak(const ASTContext &Context) const {
getObjCLifetime() != Qualifiers::OCL_Weak;
}
+bool QualType::hasNonTrivialToPrimitiveDefaultInitializeCUnion(const RecordDecl *RD) {
+ return RD->hasNonTrivialToPrimitiveDefaultInitializeCUnion();
+}
+
+bool QualType::hasNonTrivialToPrimitiveDestructCUnion(const RecordDecl *RD) {
+ return RD->hasNonTrivialToPrimitiveDestructCUnion();
+}
+
+bool QualType::hasNonTrivialToPrimitiveCopyCUnion(const RecordDecl *RD) {
+ return RD->hasNonTrivialToPrimitiveCopyCUnion();
+}
+
QualType::PrimitiveDefaultInitializeKind
QualType::isNonTrivialToPrimitiveDefaultInitialize() const {
if (const auto *RT =
@@ -2990,6 +3033,7 @@ CanThrowResult FunctionProtoType::canThrow() const {
case EST_DynamicNone:
case EST_BasicNoexcept:
case EST_NoexceptTrue:
+ case EST_NoThrow:
return CT_Cannot;
case EST_None:
@@ -3082,6 +3126,20 @@ QualType TypedefType::desugar() const {
return getDecl()->getUnderlyingType();
}
+QualType MacroQualifiedType::desugar() const { return getUnderlyingType(); }
+
+QualType MacroQualifiedType::getModifiedType() const {
+ // Step over MacroQualifiedTypes from the same macro to find the type
+ // ultimately qualified by the macro qualifier.
+ QualType Inner = cast<AttributedType>(getUnderlyingType())->getModifiedType();
+ while (auto *InnerMQT = dyn_cast<MacroQualifiedType>(Inner)) {
+ if (InnerMQT->getMacroIdentifier() != getMacroIdentifier())
+ break;
+ Inner = InnerMQT->getModifiedType();
+ }
+ return Inner;
+}
+
TypeOfExprType::TypeOfExprType(Expr *E, QualType can)
: Type(TypeOfExpr, can, E->isTypeDependent(),
E->isInstantiationDependent(),
@@ -3206,6 +3264,7 @@ bool AttributedType::isQualifier() const {
case attr::TypeNullable:
case attr::TypeNullUnspecified:
case attr::LifetimeBound:
+ case attr::AddressSpace:
return true;
// All other type attributes aren't qualifiers; they rewrite the modified
@@ -3831,7 +3890,11 @@ AttributedType::getImmediateNullability() const {
}
Optional<NullabilityKind> AttributedType::stripOuterNullability(QualType &T) {
- if (auto attributed = dyn_cast<AttributedType>(T.getTypePtr())) {
+ QualType AttrTy = T;
+ if (auto MacroTy = dyn_cast<MacroQualifiedType>(T))
+ AttrTy = MacroTy->getUnderlyingType();
+
+ if (auto attributed = dyn_cast<AttributedType>(AttrTy)) {
if (auto nullability = attributed->getImmediateNullability()) {
T = attributed->getModifiedType();
return nullability;
@@ -4016,25 +4079,8 @@ CXXRecordDecl *MemberPointerType::getMostRecentCXXRecordDecl() const {
void clang::FixedPointValueToString(SmallVectorImpl<char> &Str,
llvm::APSInt Val, unsigned Scale) {
- if (Val.isSigned() && Val.isNegative() && Val != -Val) {
- Val = -Val;
- Str.push_back('-');
- }
-
- llvm::APSInt IntPart = Val >> Scale;
-
- // Add 4 digits to hold the value after multiplying 10 (the radix)
- unsigned Width = Val.getBitWidth() + 4;
- llvm::APInt FractPart = Val.zextOrTrunc(Scale).zext(Width);
- llvm::APInt FractPartMask = llvm::APInt::getAllOnesValue(Scale).zext(Width);
- llvm::APInt RadixInt = llvm::APInt(Width, 10);
-
- IntPart.toString(Str, /*radix=*/10);
- Str.push_back('.');
- do {
- (FractPart * RadixInt)
- .lshr(Scale)
- .toString(Str, /*radix=*/10, Val.isSigned());
- FractPart = (FractPart * RadixInt) & FractPartMask;
- } while (FractPart != 0);
+ FixedPointSemantics FXSema(Val.getBitWidth(), Scale, Val.isSigned(),
+ /*IsSaturated=*/false,
+ /*HasUnsignedPadding=*/false);
+ APFixedPoint(Val, FXSema).toString(Str);
}
diff --git a/lib/AST/TypeLoc.cpp b/lib/AST/TypeLoc.cpp
index b7b2f188d716..abe4c4eb25e6 100644
--- a/lib/AST/TypeLoc.cpp
+++ b/lib/AST/TypeLoc.cpp
@@ -1,9 +1,8 @@
//===- TypeLoc.cpp - Type Source Info Wrapper -----------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/AST/TypePrinter.cpp b/lib/AST/TypePrinter.cpp
index 32c75afb4381..8d5c37299e5f 100644
--- a/lib/AST/TypePrinter.cpp
+++ b/lib/AST/TypePrinter.cpp
@@ -1,9 +1,8 @@
//===- TypePrinter.cpp - Pretty-Print Clang Types -------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -258,11 +257,18 @@ bool TypePrinter::canPrefixQualifiers(const Type *T,
case Type::FunctionProto:
case Type::FunctionNoProto:
case Type::Paren:
- case Type::Attributed:
case Type::PackExpansion:
case Type::SubstTemplateTypeParm:
+ case Type::MacroQualified:
CanPrefixQualifiers = false;
break;
+
+ case Type::Attributed: {
+ // We still want to print the address_space before the type if it is an
+ // address_space attribute.
+ const auto *AttrTy = cast<AttributedType>(T);
+ CanPrefixQualifiers = AttrTy->getAttrKind() == attr::AddressSpace;
+ }
}
return CanPrefixQualifiers;
@@ -728,6 +734,8 @@ FunctionProtoType::printExceptionSpecification(raw_ostream &OS,
OS << getExceptionType(I).stream(Policy);
}
OS << ')';
+ } else if (EST_NoThrow == getExceptionSpecType()) {
+ OS << " __attribute__((nothrow))";
} else if (isNoexceptExceptionSpec(getExceptionSpecType())) {
OS << " noexcept";
// FIXME:Is it useful to print out the expression for a non-dependent
@@ -810,8 +818,8 @@ void TypePrinter::printFunctionProtoAfter(const FunctionProtoType *T,
printFunctionAfter(Info, OS);
- if (!T->getTypeQuals().empty())
- OS << " " << T->getTypeQuals().getAsString();
+ if (!T->getMethodQuals().empty())
+ OS << " " << T->getMethodQuals().getAsString();
switch (T->getRefQualifier()) {
case RQ_None:
@@ -958,6 +966,21 @@ void TypePrinter::printTypedefBefore(const TypedefType *T, raw_ostream &OS) {
printTypeSpec(T->getDecl(), OS);
}
+void TypePrinter::printMacroQualifiedBefore(const MacroQualifiedType *T,
+ raw_ostream &OS) {
+ StringRef MacroName = T->getMacroIdentifier()->getName();
+ OS << MacroName << " ";
+
+ // Since this type is meant to print the macro instead of the whole attribute,
+ // we trim any attributes and go directly to the original modified type.
+ printBefore(T->getModifiedType(), OS);
+}
+
+void TypePrinter::printMacroQualifiedAfter(const MacroQualifiedType *T,
+ raw_ostream &OS) {
+ printAfter(T->getModifiedType(), OS);
+}
+
void TypePrinter::printTypedefAfter(const TypedefType *T, raw_ostream &OS) {}
void TypePrinter::printTypeOfExprBefore(const TypeOfExprType *T,
@@ -1212,8 +1235,18 @@ void TypePrinter::printTemplateTypeParmBefore(const TemplateTypeParmType *T,
raw_ostream &OS) {
if (IdentifierInfo *Id = T->getIdentifier())
OS << Id->getName();
- else
- OS << "type-parameter-" << T->getDepth() << '-' << T->getIndex();
+ else {
+ bool IsLambdaAutoParam = false;
+ if (auto D = T->getDecl()) {
+ if (auto M = dyn_cast_or_null<CXXMethodDecl>(D->getDeclContext()))
+ IsLambdaAutoParam = D->isImplicit() && M->getParent()->isLambda();
+ }
+
+ if (IsLambdaAutoParam)
+ OS << "auto";
+ else
+ OS << "type-parameter-" << T->getDepth() << '-' << T->getIndex();
+ }
spaceBeforePlaceHolder(OS);
}
@@ -1378,7 +1411,10 @@ void TypePrinter::printAttributedBefore(const AttributedType *T,
if (T->getAttrKind() == attr::ObjCKindOf)
OS << "__kindof ";
- printBefore(T->getModifiedType(), OS);
+ if (T->getAttrKind() == attr::AddressSpace)
+ printBefore(T->getEquivalentType(), OS);
+ else
+ printBefore(T->getModifiedType(), OS);
if (T->isMSTypeSpec()) {
switch (T->getAttrKind()) {
@@ -1624,6 +1660,19 @@ static const TemplateArgument &getArgument(const TemplateArgumentLoc &A) {
return A.getArgument();
}
+static void printArgument(const TemplateArgument &A, const PrintingPolicy &PP,
+ llvm::raw_ostream &OS) {
+ A.print(PP, OS);
+}
+
+static void printArgument(const TemplateArgumentLoc &A,
+ const PrintingPolicy &PP, llvm::raw_ostream &OS) {
+ const TemplateArgument::ArgKind &Kind = A.getArgument().getKind();
+ if (Kind == TemplateArgument::ArgKind::Type)
+ return A.getTypeSourceInfo()->getType().print(OS, PP);
+ return A.getArgument().print(PP, OS);
+}
+
template<typename TA>
static void printTo(raw_ostream &OS, ArrayRef<TA> Args,
const PrintingPolicy &Policy, bool SkipBrackets) {
@@ -1645,7 +1694,8 @@ static void printTo(raw_ostream &OS, ArrayRef<TA> Args,
} else {
if (!FirstArg)
OS << Comma;
- Argument.print(Policy, ArgOS);
+ // Tries to print the argument with location info if exists.
+ printArgument(Arg, Policy, ArgOS);
}
StringRef ArgString = ArgOS.str();
@@ -1755,17 +1805,19 @@ void Qualifiers::print(raw_ostream &OS, const PrintingPolicy& Policy,
case LangAS::opencl_private:
break;
case LangAS::opencl_constant:
- case LangAS::cuda_constant:
OS << "__constant";
break;
case LangAS::opencl_generic:
OS << "__generic";
break;
case LangAS::cuda_device:
- OS << "__device";
+ OS << "__device__";
+ break;
+ case LangAS::cuda_constant:
+ OS << "__constant__";
break;
case LangAS::cuda_shared:
- OS << "__shared";
+ OS << "__shared__";
break;
default:
OS << "__attribute__((address_space(";
diff --git a/lib/AST/VTTBuilder.cpp b/lib/AST/VTTBuilder.cpp
index a3f3dbdfb4f9..53d0ef09f14c 100644
--- a/lib/AST/VTTBuilder.cpp
+++ b/lib/AST/VTTBuilder.cpp
@@ -1,9 +1,8 @@
//===- VTTBuilder.cpp - C++ VTT layout builder ----------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/AST/VTableBuilder.cpp b/lib/AST/VTableBuilder.cpp
index 846a6085743e..0c699571555d 100644
--- a/lib/AST/VTableBuilder.cpp
+++ b/lib/AST/VTableBuilder.cpp
@@ -1,9 +1,8 @@
//===--- VTableBuilder.cpp - C++ vtable layout builder --------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -480,7 +479,7 @@ static bool HasSameVirtualSignature(const CXXMethodDecl *LHS,
// Force the signatures to match. We can't rely on the overrides
// list here because there isn't necessarily an inheritance
// relationship between the two methods.
- if (LT->getTypeQuals() != RT->getTypeQuals())
+ if (LT->getMethodQuals() != RT->getMethodQuals())
return false;
return LT->getParamTypes() == RT->getParamTypes();
}
@@ -847,6 +846,8 @@ private:
: BaseOffset(CharUnits::Zero()),
BaseOffsetInLayoutClass(CharUnits::Zero()),
VTableIndex(0) { }
+
+ MethodInfo(MethodInfo const&) = default;
};
typedef llvm::DenseMap<const CXXMethodDecl *, MethodInfo> MethodInfoMapTy;
@@ -1061,8 +1062,7 @@ void ItaniumVTableBuilder::AddThunk(const CXXMethodDecl *MD,
SmallVectorImpl<ThunkInfo> &ThunksVector = Thunks[MD];
// Check if we have this thunk already.
- if (std::find(ThunksVector.begin(), ThunksVector.end(), Thunk) !=
- ThunksVector.end())
+ if (llvm::find(ThunksVector, Thunk) != ThunksVector.end())
return;
ThunksVector.push_back(Thunk);
@@ -1272,7 +1272,7 @@ ThisAdjustment ItaniumVTableBuilder::ComputeThisAdjustment(
// We don't have vcall offsets for this virtual base, go ahead and
// build them.
VCallAndVBaseOffsetBuilder Builder(MostDerivedClass, MostDerivedClass,
- /*FinalOverriders=*/nullptr,
+ /*Overriders=*/nullptr,
BaseSubobject(Offset.VirtualBase,
CharUnits::Zero()),
/*BaseIsVirtual=*/true,
@@ -2245,7 +2245,7 @@ ItaniumVTableContext::getVirtualBaseOffsetOffset(const CXXRecordDecl *RD,
if (I != VirtualBaseClassOffsetOffsets.end())
return I->second;
- VCallAndVBaseOffsetBuilder Builder(RD, RD, /*FinalOverriders=*/nullptr,
+ VCallAndVBaseOffsetBuilder Builder(RD, RD, /*Overriders=*/nullptr,
BaseSubobject(RD, CharUnits::Zero()),
/*BaseIsVirtual=*/false,
/*OffsetInLayoutClass=*/CharUnits::Zero());
@@ -2451,8 +2451,7 @@ private:
SmallVector<ThunkInfo, 1> &ThunksVector = Thunks[MD];
// Check if we have this thunk already.
- if (std::find(ThunksVector.begin(), ThunksVector.end(), Thunk) !=
- ThunksVector.end())
+ if (llvm::find(ThunksVector, Thunk) != ThunksVector.end())
return;
ThunksVector.push_back(Thunk);
@@ -3189,8 +3188,8 @@ void VFTableBuilder::dumpLayout(raw_ostream &Out) {
const CXXMethodDecl *MD = MethodNameAndDecl.second;
ThunkInfoVectorTy ThunksVector = Thunks[MD];
- std::stable_sort(ThunksVector.begin(), ThunksVector.end(),
- [](const ThunkInfo &LHS, const ThunkInfo &RHS) {
+ llvm::stable_sort(ThunksVector, [](const ThunkInfo &LHS,
+ const ThunkInfo &RHS) {
// Keep different thunks with the same adjustments in the order they
// were put into the vector.
return std::tie(LHS.This, LHS.Return) < std::tie(RHS.This, RHS.Return);
diff --git a/lib/ASTMatchers/ASTMatchFinder.cpp b/lib/ASTMatchers/ASTMatchFinder.cpp
index dec2e2ad1f93..f407e0875ac4 100644
--- a/lib/ASTMatchers/ASTMatchFinder.cpp
+++ b/lib/ASTMatchers/ASTMatchFinder.cpp
@@ -1,9 +1,8 @@
//===--- ASTMatchFinder.cpp - Structural query framework ------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -84,20 +83,12 @@ public:
// descendants of a traversed node. max_depth is the maximum depth
// to traverse: use 1 for matching the children and INT_MAX for
// matching the descendants.
- MatchChildASTVisitor(const DynTypedMatcher *Matcher,
- ASTMatchFinder *Finder,
- BoundNodesTreeBuilder *Builder,
- int MaxDepth,
- ASTMatchFinder::TraversalKind Traversal,
+ MatchChildASTVisitor(const DynTypedMatcher *Matcher, ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder, int MaxDepth,
+ ast_type_traits::TraversalKind Traversal,
ASTMatchFinder::BindKind Bind)
- : Matcher(Matcher),
- Finder(Finder),
- Builder(Builder),
- CurrentDepth(0),
- MaxDepth(MaxDepth),
- Traversal(Traversal),
- Bind(Bind),
- Matches(false) {}
+ : Matcher(Matcher), Finder(Finder), Builder(Builder), CurrentDepth(0),
+ MaxDepth(MaxDepth), Traversal(Traversal), Bind(Bind), Matches(false) {}
// Returns true if a match is found in the subtree rooted at the
// given AST node. This is done via a set of mutually recursive
@@ -152,7 +143,8 @@ public:
ScopedIncrement ScopedDepth(&CurrentDepth);
Stmt *StmtToTraverse = StmtNode;
- if (Traversal == ASTMatchFinder::TK_IgnoreImplicitCastsAndParentheses) {
+ if (Traversal ==
+ ast_type_traits::TraversalKind::TK_IgnoreImplicitCastsAndParentheses) {
if (Expr *ExprNode = dyn_cast_or_null<Expr>(StmtNode))
StmtToTraverse = ExprNode->IgnoreParenImpCasts();
}
@@ -300,7 +292,7 @@ private:
BoundNodesTreeBuilder ResultBindings;
int CurrentDepth;
const int MaxDepth;
- const ASTMatchFinder::TraversalKind Traversal;
+ const ast_type_traits::TraversalKind Traversal;
const ASTMatchFinder::BindKind Bind;
bool Matches;
};
@@ -394,7 +386,8 @@ public:
bool memoizedMatchesRecursively(const ast_type_traits::DynTypedNode &Node,
const DynTypedMatcher &Matcher,
BoundNodesTreeBuilder *Builder, int MaxDepth,
- TraversalKind Traversal, BindKind Bind) {
+ ast_type_traits::TraversalKind Traversal,
+ BindKind Bind) {
// For AST-nodes that don't have an identity, we can't memoize.
if (!Node.getMemoizationData() || !Builder->isComparable())
return matchesRecursively(Node, Matcher, Builder, MaxDepth, Traversal,
@@ -428,7 +421,8 @@ public:
bool matchesRecursively(const ast_type_traits::DynTypedNode &Node,
const DynTypedMatcher &Matcher,
BoundNodesTreeBuilder *Builder, int MaxDepth,
- TraversalKind Traversal, BindKind Bind) {
+ ast_type_traits::TraversalKind Traversal,
+ BindKind Bind) {
MatchChildASTVisitor Visitor(
&Matcher, this, Builder, MaxDepth, Traversal, Bind);
return Visitor.findMatch(Node);
@@ -442,7 +436,7 @@ public:
bool matchesChildOf(const ast_type_traits::DynTypedNode &Node,
const DynTypedMatcher &Matcher,
BoundNodesTreeBuilder *Builder,
- TraversalKind Traversal,
+ ast_type_traits::TraversalKind Traversal,
BindKind Bind) override {
if (ResultCache.size() > MaxMemoizationEntries)
ResultCache.clear();
@@ -457,7 +451,8 @@ public:
if (ResultCache.size() > MaxMemoizationEntries)
ResultCache.clear();
return memoizedMatchesRecursively(Node, Matcher, Builder, INT_MAX,
- TK_AsIs, Bind);
+ ast_type_traits::TraversalKind::TK_AsIs,
+ Bind);
}
// Implements ASTMatchFinder::matchesAncestorOf.
bool matchesAncestorOf(const ast_type_traits::DynTypedNode &Node,
diff --git a/lib/ASTMatchers/ASTMatchersInternal.cpp b/lib/ASTMatchers/ASTMatchersInternal.cpp
index e1aae172a8d6..4ee32fbe94b1 100644
--- a/lib/ASTMatchers/ASTMatchersInternal.cpp
+++ b/lib/ASTMatchers/ASTMatchersInternal.cpp
@@ -1,9 +1,8 @@
//===- ASTMatchersInternal.cpp - Structural query framework ---------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -728,6 +727,7 @@ const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundLiteralExpr>
compoundLiteralExpr;
const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNullPtrLiteralExpr>
cxxNullPtrLiteralExpr;
+const internal::VariadicDynCastAllOfMatcher<Stmt, ChooseExpr> chooseExpr;
const internal::VariadicDynCastAllOfMatcher<Stmt, GNUNullExpr> gnuNullExpr;
const internal::VariadicDynCastAllOfMatcher<Stmt, AtomicExpr> atomicExpr;
const internal::VariadicDynCastAllOfMatcher<Stmt, StmtExpr> stmtExpr;
@@ -845,5 +845,12 @@ AST_TYPELOC_TRAVERSE_MATCHER_DEF(
AST_POLYMORPHIC_SUPPORTED_TYPES(BlockPointerType, MemberPointerType,
PointerType, ReferenceType));
+const internal::VariadicDynCastAllOfMatcher<Stmt, OMPExecutableDirective>
+ ompExecutableDirective;
+const internal::VariadicDynCastAllOfMatcher<OMPClause, OMPDefaultClause>
+ ompDefaultClause;
+const internal::VariadicDynCastAllOfMatcher<Decl, CXXDeductionGuideDecl>
+ cxxDeductionGuideDecl;
+
} // end namespace ast_matchers
} // end namespace clang
diff --git a/lib/ASTMatchers/Dynamic/Diagnostics.cpp b/lib/ASTMatchers/Dynamic/Diagnostics.cpp
index 9cddcf93caef..8656bca870ec 100644
--- a/lib/ASTMatchers/Dynamic/Diagnostics.cpp
+++ b/lib/ASTMatchers/Dynamic/Diagnostics.cpp
@@ -1,9 +1,8 @@
//===--- Diagnostics.cpp - Helper class for error diagnostics -----*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/ASTMatchers/Dynamic/Marshallers.h b/lib/ASTMatchers/Dynamic/Marshallers.h
index c6c89351afd3..fac2fc98e09c 100644
--- a/lib/ASTMatchers/Dynamic/Marshallers.h
+++ b/lib/ASTMatchers/Dynamic/Marshallers.h
@@ -1,9 +1,8 @@
//===- Marshallers.h - Generic matcher function marshallers -----*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -27,6 +26,7 @@
#include "clang/ASTMatchers/Dynamic/VariantValue.h"
#include "clang/Basic/AttrKinds.h"
#include "clang/Basic/LLVM.h"
+#include "clang/Basic/OpenMPKinds.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/None.h"
#include "llvm/ADT/STLExtras.h"
@@ -166,6 +166,28 @@ public:
}
};
+template <> struct ArgTypeTraits<OpenMPClauseKind> {
+private:
+ static Optional<OpenMPClauseKind> getClauseKind(llvm::StringRef ClauseKind) {
+ return llvm::StringSwitch<Optional<OpenMPClauseKind>>(ClauseKind)
+#define OPENMP_CLAUSE(TextualSpelling, Class) \
+ .Case("OMPC_" #TextualSpelling, OMPC_##TextualSpelling)
+#include "clang/Basic/OpenMPKinds.def"
+ .Default(llvm::None);
+ }
+
+public:
+ static bool is(const VariantValue &Value) {
+ return Value.isString() && getClauseKind(Value.getString());
+ }
+
+ static OpenMPClauseKind get(const VariantValue &Value) {
+ return *getClauseKind(Value.getString());
+ }
+
+ static ArgKind getKind() { return ArgKind(ArgKind::AK_String); }
+};
+
/// Matcher descriptor interface.
///
/// Provides a \c create() method that constructs the matcher from the provided
diff --git a/lib/ASTMatchers/Dynamic/Parser.cpp b/lib/ASTMatchers/Dynamic/Parser.cpp
index 5db10048fdf8..e3b00b46832c 100644
--- a/lib/ASTMatchers/Dynamic/Parser.cpp
+++ b/lib/ASTMatchers/Dynamic/Parser.cpp
@@ -1,9 +1,8 @@
//===- Parser.cpp - Matcher expression parser -----------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
diff --git a/lib/ASTMatchers/Dynamic/Registry.cpp b/lib/ASTMatchers/Dynamic/Registry.cpp
index e6e48467967e..33058053571a 100644
--- a/lib/ASTMatchers/Dynamic/Registry.cpp
+++ b/lib/ASTMatchers/Dynamic/Registry.cpp
@@ -1,9 +1,8 @@
//===- Registry.cpp - Matcher registry ------------------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -91,6 +90,7 @@ void RegistryMaps::registerMatcher(
} while (false)
/// Generate a registry map with all the known matchers.
+/// Please keep sorted alphabetically!
RegistryMaps::RegistryMaps() {
// TODO: Here is the list of the missing matchers, grouped by reason.
//
@@ -130,12 +130,12 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(argumentCountIs);
REGISTER_MATCHER(arraySubscriptExpr);
REGISTER_MATCHER(arrayType);
- REGISTER_MATCHER(asmStmt);
REGISTER_MATCHER(asString);
+ REGISTER_MATCHER(asmStmt);
REGISTER_MATCHER(atomicExpr);
REGISTER_MATCHER(atomicType);
- REGISTER_MATCHER(autoreleasePoolStmt)
REGISTER_MATCHER(autoType);
+ REGISTER_MATCHER(autoreleasePoolStmt)
REGISTER_MATCHER(binaryConditionalOperator);
REGISTER_MATCHER(binaryOperator);
REGISTER_MATCHER(blockDecl);
@@ -144,10 +144,12 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(booleanType);
REGISTER_MATCHER(breakStmt);
REGISTER_MATCHER(builtinType);
+ REGISTER_MATCHER(cStyleCastExpr);
REGISTER_MATCHER(callExpr);
REGISTER_MATCHER(caseStmt);
REGISTER_MATCHER(castExpr);
REGISTER_MATCHER(characterLiteral);
+ REGISTER_MATCHER(chooseExpr);
REGISTER_MATCHER(classTemplateDecl);
REGISTER_MATCHER(classTemplateSpecializationDecl);
REGISTER_MATCHER(complexType);
@@ -158,7 +160,6 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(constantExpr);
REGISTER_MATCHER(containsDeclaration);
REGISTER_MATCHER(continueStmt);
- REGISTER_MATCHER(cStyleCastExpr);
REGISTER_MATCHER(cudaKernelCallExpr);
REGISTER_MATCHER(cxxBindTemporaryExpr);
REGISTER_MATCHER(cxxBoolLiteral);
@@ -168,6 +169,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(cxxConstructorDecl);
REGISTER_MATCHER(cxxConversionDecl);
REGISTER_MATCHER(cxxCtorInitializer);
+ REGISTER_MATCHER(cxxDeductionGuideDecl);
REGISTER_MATCHER(cxxDefaultArgExpr);
REGISTER_MATCHER(cxxDeleteExpr);
REGISTER_MATCHER(cxxDependentScopeMemberExpr);
@@ -191,10 +193,10 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(cxxUnresolvedConstructExpr);
REGISTER_MATCHER(decayedType);
REGISTER_MATCHER(decl);
- REGISTER_MATCHER(declaratorDecl);
REGISTER_MATCHER(declCountIs);
REGISTER_MATCHER(declRefExpr);
REGISTER_MATCHER(declStmt);
+ REGISTER_MATCHER(declaratorDecl);
REGISTER_MATCHER(decltypeType);
REGISTER_MATCHER(defaultStmt);
REGISTER_MATCHER(dependentSizedArrayType);
@@ -212,7 +214,6 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(expr);
REGISTER_MATCHER(exprWithCleanups);
REGISTER_MATCHER(fieldDecl);
- REGISTER_MATCHER(indirectFieldDecl);
REGISTER_MATCHER(floatLiteral);
REGISTER_MATCHER(forEach);
REGISTER_MATCHER(forEachArgumentWithParam);
@@ -233,6 +234,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(has);
REGISTER_MATCHER(hasAncestor);
REGISTER_MATCHER(hasAnyArgument);
+ REGISTER_MATCHER(hasAnyClause);
REGISTER_MATCHER(hasAnyConstructorInitializer);
REGISTER_MATCHER(hasAnyDeclaration);
REGISTER_MATCHER(hasAnyName);
@@ -255,8 +257,8 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(hasCondition);
REGISTER_MATCHER(hasConditionVariableStatement);
REGISTER_MATCHER(hasDecayedType);
- REGISTER_MATCHER(hasDeclaration);
REGISTER_MATCHER(hasDeclContext);
+ REGISTER_MATCHER(hasDeclaration);
REGISTER_MATCHER(hasDeducedType);
REGISTER_MATCHER(hasDefaultArgument);
REGISTER_MATCHER(hasDefinition);
@@ -266,6 +268,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(hasEitherOperand);
REGISTER_MATCHER(hasElementType);
REGISTER_MATCHER(hasElse);
+ REGISTER_MATCHER(hasExplicitSpecifier);
REGISTER_MATCHER(hasExternalFormalLinkage);
REGISTER_MATCHER(hasFalseExpression);
REGISTER_MATCHER(hasGlobalStorage);
@@ -290,12 +293,12 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(hasParameter);
REGISTER_MATCHER(hasParent);
REGISTER_MATCHER(hasQualifier);
+ REGISTER_MATCHER(hasRHS);
REGISTER_MATCHER(hasRangeInit);
REGISTER_MATCHER(hasReceiver);
REGISTER_MATCHER(hasReceiverType);
REGISTER_MATCHER(hasReplacementType);
REGISTER_MATCHER(hasReturnValue);
- REGISTER_MATCHER(hasRHS);
REGISTER_MATCHER(hasSelector);
REGISTER_MATCHER(hasSingleDecl);
REGISTER_MATCHER(hasSize);
@@ -303,6 +306,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(hasSourceExpression);
REGISTER_MATCHER(hasSpecializedTemplate);
REGISTER_MATCHER(hasStaticStorageDuration);
+ REGISTER_MATCHER(hasStructuredBlock);
REGISTER_MATCHER(hasSyntacticForm);
REGISTER_MATCHER(hasTargetDecl);
REGISTER_MATCHER(hasTemplateArgument);
@@ -318,6 +322,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(hasUnqualifiedDesugaredType);
REGISTER_MATCHER(hasValueType);
REGISTER_MATCHER(ifStmt);
+ REGISTER_MATCHER(ignoringElidableConstructorCall);
REGISTER_MATCHER(ignoringImpCasts);
REGISTER_MATCHER(ignoringImplicit);
REGISTER_MATCHER(ignoringParenCasts);
@@ -326,10 +331,12 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(implicitCastExpr);
REGISTER_MATCHER(implicitValueInitExpr);
REGISTER_MATCHER(incompleteArrayType);
+ REGISTER_MATCHER(indirectFieldDecl);
REGISTER_MATCHER(initListExpr);
REGISTER_MATCHER(injectedClassNameType);
REGISTER_MATCHER(innerType);
REGISTER_MATCHER(integerLiteral);
+ REGISTER_MATCHER(isAllowedToContainClauseKind);
REGISTER_MATCHER(isAnonymous);
REGISTER_MATCHER(isAnyCharacter);
REGISTER_MATCHER(isAnyPointer);
@@ -340,16 +347,18 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(isBitField);
REGISTER_MATCHER(isCatchAll);
REGISTER_MATCHER(isClass);
+ REGISTER_MATCHER(isClassMessage);
+ REGISTER_MATCHER(isClassMethod);
REGISTER_MATCHER(isConst);
- REGISTER_MATCHER(isConstexpr);
REGISTER_MATCHER(isConstQualified);
+ REGISTER_MATCHER(isConstexpr);
REGISTER_MATCHER(isCopyAssignmentOperator);
REGISTER_MATCHER(isCopyConstructor);
REGISTER_MATCHER(isDefaultConstructor);
REGISTER_MATCHER(isDefaulted);
REGISTER_MATCHER(isDefinition);
- REGISTER_MATCHER(isDeleted);
REGISTER_MATCHER(isDelegatingConstructor);
+ REGISTER_MATCHER(isDeleted);
REGISTER_MATCHER(isExceptionVariable);
REGISTER_MATCHER(isExpansionInFileMatching);
REGISTER_MATCHER(isExpansionInMainFile);
@@ -360,13 +369,15 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(isExternC);
REGISTER_MATCHER(isFinal);
REGISTER_MATCHER(isImplicit);
+ REGISTER_MATCHER(isInStdNamespace);
+ REGISTER_MATCHER(isInTemplateInstantiation);
REGISTER_MATCHER(isInline);
REGISTER_MATCHER(isInstanceMessage);
+ REGISTER_MATCHER(isInstanceMethod);
REGISTER_MATCHER(isInstantiated);
REGISTER_MATCHER(isInstantiationDependent);
REGISTER_MATCHER(isInteger);
REGISTER_MATCHER(isIntegral);
- REGISTER_MATCHER(isInTemplateInstantiation);
REGISTER_MATCHER(isLambda);
REGISTER_MATCHER(isListInitialization);
REGISTER_MATCHER(isMain);
@@ -375,13 +386,17 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(isMoveConstructor);
REGISTER_MATCHER(isNoReturn);
REGISTER_MATCHER(isNoThrow);
+ REGISTER_MATCHER(isNoneKind);
+ REGISTER_MATCHER(isOMPStructuredBlock);
REGISTER_MATCHER(isOverride);
REGISTER_MATCHER(isPrivate);
REGISTER_MATCHER(isProtected);
REGISTER_MATCHER(isPublic);
REGISTER_MATCHER(isPure);
REGISTER_MATCHER(isScoped);
+ REGISTER_MATCHER(isSharedKind);
REGISTER_MATCHER(isSignedInteger);
+ REGISTER_MATCHER(isStandaloneDirective);
REGISTER_MATCHER(isStaticLocal);
REGISTER_MATCHER(isStaticStorageClass);
REGISTER_MATCHER(isStruct);
@@ -396,11 +411,11 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(isVirtualAsWritten);
REGISTER_MATCHER(isVolatileQualified);
REGISTER_MATCHER(isWritten);
+ REGISTER_MATCHER(lValueReferenceType);
REGISTER_MATCHER(labelDecl);
REGISTER_MATCHER(labelStmt);
REGISTER_MATCHER(lambdaExpr);
REGISTER_MATCHER(linkageSpecDecl);
- REGISTER_MATCHER(lValueReferenceType);
REGISTER_MATCHER(matchesName);
REGISTER_MATCHER(matchesSelector);
REGISTER_MATCHER(materializeTemporaryExpr);
@@ -408,9 +423,9 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(memberExpr);
REGISTER_MATCHER(memberPointerType);
REGISTER_MATCHER(namedDecl);
+ REGISTER_MATCHER(namesType);
REGISTER_MATCHER(namespaceAliasDecl);
REGISTER_MATCHER(namespaceDecl);
- REGISTER_MATCHER(namesType);
REGISTER_MATCHER(nestedNameSpecifier);
REGISTER_MATCHER(nestedNameSpecifierLoc);
REGISTER_MATCHER(nonTypeTemplateParmDecl);
@@ -433,6 +448,8 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(objcThrowStmt);
REGISTER_MATCHER(objcTryStmt);
REGISTER_MATCHER(ofClass);
+ REGISTER_MATCHER(ompDefaultClause);
+ REGISTER_MATCHER(ompExecutableDirective);
REGISTER_MATCHER(on);
REGISTER_MATCHER(onImplicitObjectArgument);
REGISTER_MATCHER(opaqueValueExpr);
@@ -445,18 +462,18 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(pointerType);
REGISTER_MATCHER(predefinedExpr);
REGISTER_MATCHER(qualType);
+ REGISTER_MATCHER(rValueReferenceType);
REGISTER_MATCHER(realFloatingPointType);
REGISTER_MATCHER(recordDecl);
REGISTER_MATCHER(recordType);
REGISTER_MATCHER(referenceType);
REGISTER_MATCHER(refersToDeclaration);
REGISTER_MATCHER(refersToIntegralType);
- REGISTER_MATCHER(refersToType);
REGISTER_MATCHER(refersToTemplate);
+ REGISTER_MATCHER(refersToType);
REGISTER_MATCHER(requiresZeroInitialization);
- REGISTER_MATCHER(returns);
REGISTER_MATCHER(returnStmt);
- REGISTER_MATCHER(rValueReferenceType);
+ REGISTER_MATCHER(returns);
REGISTER_MATCHER(sizeOfExpr);
REGISTER_MATCHER(specifiesNamespace);
REGISTER_MATCHER(specifiesType);
@@ -483,10 +500,10 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(type);
REGISTER_MATCHER(typeAliasDecl);
REGISTER_MATCHER(typeAliasTemplateDecl);
+ REGISTER_MATCHER(typeLoc);
REGISTER_MATCHER(typedefDecl);
REGISTER_MATCHER(typedefNameDecl);
REGISTER_MATCHER(typedefType);
- REGISTER_MATCHER(typeLoc);
REGISTER_MATCHER(unaryExprOrTypeTraitExpr);
REGISTER_MATCHER(unaryOperator);
REGISTER_MATCHER(unaryTransformType);
diff --git a/lib/ASTMatchers/Dynamic/VariantValue.cpp b/lib/ASTMatchers/Dynamic/VariantValue.cpp
index 06d95eaa7563..118ca2a41cb1 100644
--- a/lib/ASTMatchers/Dynamic/VariantValue.cpp
+++ b/lib/ASTMatchers/Dynamic/VariantValue.cpp
@@ -1,9 +1,8 @@
//===--- VariantValue.cpp - Polymorphic value type -*- C++ -*-===/
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
diff --git a/lib/Analysis/AnalysisDeclContext.cpp b/lib/Analysis/AnalysisDeclContext.cpp
index 30160bc239ae..b6a429ff49eb 100644
--- a/lib/Analysis/AnalysisDeclContext.cpp
+++ b/lib/Analysis/AnalysisDeclContext.cpp
@@ -1,9 +1,8 @@
//===- AnalysisDeclContext.cpp - Analysis context for Path Sens analysis --===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -31,6 +30,7 @@
#include "clang/Analysis/CFG.h"
#include "clang/Analysis/CFGStmtMap.h"
#include "clang/Analysis/Support/BumpVector.h"
+#include "clang/Basic/JsonSupport.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
@@ -71,7 +71,7 @@ AnalysisDeclContextManager::AnalysisDeclContextManager(
bool addLoopExit, bool addScopes, bool synthesizeBodies,
bool addStaticInitBranch, bool addCXXNewAllocator,
bool addRichCXXConstructors, bool markElidedCXXConstructors,
- CodeInjector *injector)
+ bool addVirtualBaseBranches, CodeInjector *injector)
: Injector(injector), FunctionBodyFarm(ASTCtx, injector),
SynthesizeBodies(synthesizeBodies) {
cfgBuildOptions.PruneTriviallyFalseEdges = !useUnoptimizedCFG;
@@ -85,6 +85,7 @@ AnalysisDeclContextManager::AnalysisDeclContextManager(
cfgBuildOptions.AddCXXNewAllocator = addCXXNewAllocator;
cfgBuildOptions.AddRichCXXConstructors = addRichCXXConstructors;
cfgBuildOptions.MarkElidedCXXConstructors = markElidedCXXConstructors;
+ cfgBuildOptions.AddVirtualBaseBranches = addVirtualBaseBranches;
}
void AnalysisDeclContextManager::clear() { Contexts.clear(); }
@@ -463,17 +464,17 @@ bool LocationContext::isParentOf(const LocationContext *LC) const {
return false;
}
-static void printLocation(raw_ostream &OS, const SourceManager &SM,
- SourceLocation SLoc) {
- if (SLoc.isFileID() && SM.isInMainFile(SLoc))
- OS << "line " << SM.getExpansionLineNumber(SLoc);
+static void printLocation(raw_ostream &Out, const SourceManager &SM,
+ SourceLocation Loc) {
+ if (Loc.isFileID() && SM.isInMainFile(Loc))
+ Out << SM.getExpansionLineNumber(Loc);
else
- SLoc.print(OS, SM);
+ Loc.print(Out, SM);
}
-void LocationContext::dumpStack(
- raw_ostream &OS, StringRef Indent, const char *NL, const char *Sep,
- std::function<void(const LocationContext *)> printMoreInfoPerContext) const {
+void LocationContext::dumpStack(raw_ostream &Out, const char *NL,
+ std::function<void(const LocationContext *)>
+ printMoreInfoPerContext) const {
ASTContext &Ctx = getAnalysisDeclContext()->getASTContext();
PrintingPolicy PP(Ctx.getLangOpts());
PP.TerseOutput = 1;
@@ -485,38 +486,91 @@ void LocationContext::dumpStack(
for (const LocationContext *LCtx = this; LCtx; LCtx = LCtx->getParent()) {
switch (LCtx->getKind()) {
case StackFrame:
- OS << Indent << '#' << Frame << ' ';
+ Out << "\t#" << Frame << ' ';
++Frame;
if (const auto *D = dyn_cast<NamedDecl>(LCtx->getDecl()))
- OS << "Calling " << D->getQualifiedNameAsString();
+ Out << "Calling " << D->getQualifiedNameAsString();
else
- OS << "Calling anonymous code";
+ Out << "Calling anonymous code";
if (const Stmt *S = cast<StackFrameContext>(LCtx)->getCallSite()) {
- OS << " at ";
- printLocation(OS, SM, S->getBeginLoc());
+ Out << " at line ";
+ printLocation(Out, SM, S->getBeginLoc());
}
break;
case Scope:
- OS << "Entering scope";
+ Out << "Entering scope";
break;
case Block:
- OS << "Invoking block";
+ Out << "Invoking block";
if (const Decl *D = cast<BlockInvocationContext>(LCtx)->getDecl()) {
- OS << " defined at ";
- printLocation(OS, SM, D->getBeginLoc());
+ Out << " defined at line ";
+ printLocation(Out, SM, D->getBeginLoc());
}
break;
}
- OS << NL;
+ Out << NL;
printMoreInfoPerContext(LCtx);
}
}
-LLVM_DUMP_METHOD void LocationContext::dumpStack() const {
- dumpStack(llvm::errs());
+void LocationContext::printJson(raw_ostream &Out, const char *NL,
+ unsigned int Space, bool IsDot,
+ std::function<void(const LocationContext *)>
+ printMoreInfoPerContext) const {
+ ASTContext &Ctx = getAnalysisDeclContext()->getASTContext();
+ PrintingPolicy PP(Ctx.getLangOpts());
+ PP.TerseOutput = 1;
+
+ const SourceManager &SM =
+ getAnalysisDeclContext()->getASTContext().getSourceManager();
+
+ unsigned Frame = 0;
+ for (const LocationContext *LCtx = this; LCtx; LCtx = LCtx->getParent()) {
+ Indent(Out, Space, IsDot)
+ << "{ \"lctx_id\": " << LCtx->getID() << ", \"location_context\": \"";
+ switch (LCtx->getKind()) {
+ case StackFrame:
+ Out << '#' << Frame << " Call\", \"calling\": \"";
+ ++Frame;
+ if (const auto *D = dyn_cast<NamedDecl>(LCtx->getDecl()))
+ Out << D->getQualifiedNameAsString();
+ else
+ Out << "anonymous code";
+
+ Out << "\", \"location\": ";
+ if (const Stmt *S = cast<StackFrameContext>(LCtx)->getCallSite()) {
+ printSourceLocationAsJson(Out, S->getBeginLoc(), SM);
+ } else {
+ Out << "null";
+ }
+
+ Out << ", \"items\": ";
+ break;
+ case Scope:
+ Out << "Entering scope\" ";
+ break;
+ case Block:
+ Out << "Invoking block\" ";
+ if (const Decl *D = cast<BlockInvocationContext>(LCtx)->getDecl()) {
+ Out << ", \"location\": ";
+ printSourceLocationAsJson(Out, D->getBeginLoc(), SM);
+ Out << ' ';
+ }
+ break;
+ }
+
+ printMoreInfoPerContext(LCtx);
+
+ Out << '}';
+ if (LCtx->getParent())
+ Out << ',';
+ Out << NL;
+ }
}
+LLVM_DUMP_METHOD void LocationContext::dump() const { printJson(llvm::errs()); }
+
//===----------------------------------------------------------------------===//
// Lazily generated map to query the external variables referenced by a Block.
//===----------------------------------------------------------------------===//
diff --git a/lib/Analysis/BodyFarm.cpp b/lib/Analysis/BodyFarm.cpp
index 35f046406763..576f86516017 100644
--- a/lib/Analysis/BodyFarm.cpp
+++ b/lib/Analysis/BodyFarm.cpp
@@ -1,9 +1,8 @@
//== BodyFarm.cpp - Factory for conjuring up fake bodies ----------*- C++ -*-//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -221,7 +220,7 @@ MemberExpr *ASTMaker::makeMemberExpression(Expr *base, ValueDecl *MemberDecl,
SourceLocation(), MemberDecl, FoundDecl,
DeclarationNameInfo(MemberDecl->getDeclName(), SourceLocation()),
/* TemplateArgumentListInfo=*/ nullptr, MemberDecl->getType(), ValueKind,
- OK_Ordinary);
+ OK_Ordinary, NOUR_None);
}
ValueDecl *ASTMaker::findMemberField(const RecordDecl *RD, StringRef Name) {
@@ -294,7 +293,7 @@ static CallExpr *create_call_once_lambda_call(ASTContext &C, ASTMaker M,
return CXXOperatorCallExpr::Create(
/*AstContext=*/C, OO_Call, callOperatorDeclRef,
- /*args=*/CallArgs,
+ /*Args=*/CallArgs,
/*QualType=*/C.VoidTy,
/*ExprValueType=*/VK_RValue,
/*SourceLocation=*/SourceLocation(), FPOptions());
@@ -466,10 +465,10 @@ static Stmt *create_call_once(ASTContext &C, const FunctionDecl *D) {
auto *Out =
IfStmt::Create(C, SourceLocation(),
/* IsConstexpr=*/false,
- /* init=*/nullptr,
- /* var=*/nullptr,
- /* cond=*/FlagCheck,
- /* then=*/M.makeCompound({CallbackCall, FlagAssignment}));
+ /* Init=*/nullptr,
+ /* Var=*/nullptr,
+ /* Cond=*/FlagCheck,
+ /* Then=*/M.makeCompound({CallbackCall, FlagAssignment}));
return Out;
}
@@ -512,7 +511,7 @@ static Stmt *create_dispatch_once(ASTContext &C, const FunctionDecl *D) {
CallExpr *CE = CallExpr::Create(
/*ASTContext=*/C,
/*StmtClass=*/M.makeLvalueToRvalue(/*Expr=*/Block),
- /*args=*/None,
+ /*Args=*/None,
/*QualType=*/C.VoidTy,
/*ExprValueType=*/VK_RValue,
/*SourceLocation=*/SourceLocation());
@@ -550,10 +549,10 @@ static Stmt *create_dispatch_once(ASTContext &C, const FunctionDecl *D) {
// (5) Create the 'if' statement.
auto *If = IfStmt::Create(C, SourceLocation(),
/* IsConstexpr=*/false,
- /* init=*/nullptr,
- /* var=*/nullptr,
- /* cond=*/GuardCondition,
- /* then=*/CS);
+ /* Init=*/nullptr,
+ /* Var=*/nullptr,
+ /* Cond=*/GuardCondition,
+ /* Then=*/CS);
return If;
}
@@ -658,16 +657,14 @@ static Stmt *create_OSAtomicCompareAndSwap(ASTContext &C, const FunctionDecl *D)
/// Construct the If.
auto *If = IfStmt::Create(C, SourceLocation(),
/* IsConstexpr=*/false,
- /* init=*/nullptr,
- /* var=*/nullptr, Comparison, Body,
+ /* Init=*/nullptr,
+ /* Var=*/nullptr, Comparison, Body,
SourceLocation(), Else);
return If;
}
Stmt *BodyFarm::getBody(const FunctionDecl *D) {
- D = D->getCanonicalDecl();
-
Optional<Stmt *> &Val = Bodies[D];
if (Val.hasValue())
return Val.getValue();
@@ -807,6 +804,11 @@ Stmt *BodyFarm::getBody(const ObjCMethodDecl *D) {
D = D->getCanonicalDecl();
+ // We should not try to synthesize explicitly redefined accessors.
+ // We do not know for sure how they behave.
+ if (!D->isImplicit())
+ return nullptr;
+
Optional<Stmt *> &Val = Bodies[D];
if (Val.hasValue())
return Val.getValue();
diff --git a/lib/Analysis/CFG.cpp b/lib/Analysis/CFG.cpp
index 96130c25be8a..0ed1e988a196 100644
--- a/lib/Analysis/CFG.cpp
+++ b/lib/Analysis/CFG.cpp
@@ -1,9 +1,8 @@
//===- CFG.cpp - Classes for representing and building CFGs ---------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -28,10 +27,11 @@
#include "clang/AST/StmtObjC.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/AST/Type.h"
-#include "clang/Analysis/Support/BumpVector.h"
#include "clang/Analysis/ConstructionContext.h"
+#include "clang/Analysis/Support/BumpVector.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/ExceptionSpecificationType.h"
+#include "clang/Basic/JsonSupport.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/SourceLocation.h"
@@ -549,6 +549,7 @@ private:
CFGBlock *VisitExprWithCleanups(ExprWithCleanups *E, AddStmtChoice asc);
CFGBlock *VisitForStmt(ForStmt *F);
CFGBlock *VisitGotoStmt(GotoStmt *G);
+ CFGBlock *VisitGCCAsmStmt(GCCAsmStmt *G, AddStmtChoice asc);
CFGBlock *VisitIfStmt(IfStmt *I);
CFGBlock *VisitImplicitCastExpr(ImplicitCastExpr *E, AddStmtChoice asc);
CFGBlock *VisitConstantExpr(ConstantExpr *E, AddStmtChoice asc);
@@ -588,6 +589,8 @@ private:
CFGBlock *VisitStmt(Stmt *S, AddStmtChoice asc);
CFGBlock *VisitChildren(Stmt *S);
CFGBlock *VisitNoRecurse(Expr *E, AddStmtChoice asc);
+ CFGBlock *VisitOMPExecutableDirective(OMPExecutableDirective *D,
+ AddStmtChoice asc);
void maybeAddScopeBeginForVarDecl(CFGBlock *B, const VarDecl *VD,
const Stmt *S) {
@@ -1379,6 +1382,15 @@ void CFGBuilder::findConstructionContexts(
findConstructionContexts(Layer, CO->getRHS());
break;
}
+ case Stmt::InitListExprClass: {
+ auto *ILE = cast<InitListExpr>(Child);
+ if (ILE->isTransparent()) {
+ findConstructionContexts(Layer, ILE->getInit(0));
+ break;
+ }
+ // TODO: Handle other cases. For now, fail to find construction contexts.
+ break;
+ }
default:
break;
}
@@ -1423,13 +1435,41 @@ std::unique_ptr<CFG> CFGBuilder::buildCFG(const Decl *D, Stmt *Statement) {
if (badCFG)
return nullptr;
- // For C++ constructor add initializers to CFG.
- if (const CXXConstructorDecl *CD = dyn_cast_or_null<CXXConstructorDecl>(D)) {
+ // For C++ constructor add initializers to CFG. Constructors of virtual bases
+ // are ignored unless the object is of the most derived class.
+ // class VBase { VBase() = default; VBase(int) {} };
+ // class A : virtual public VBase { A() : VBase(0) {} };
+ // class B : public A {};
+ // B b; // Constructor calls in order: VBase(), A(), B().
+ // // VBase(0) is ignored because A isn't the most derived class.
+ // This may result in the virtual base(s) being already initialized at this
+ // point, in which case we should jump right onto non-virtual bases and
+ // fields. To handle this, make a CFG branch. We only need to add one such
+ // branch per constructor, since the Standard states that all virtual bases
+ // shall be initialized before non-virtual bases and direct data members.
+ if (const auto *CD = dyn_cast_or_null<CXXConstructorDecl>(D)) {
+ CFGBlock *VBaseSucc = nullptr;
for (auto *I : llvm::reverse(CD->inits())) {
+ if (BuildOpts.AddVirtualBaseBranches && !VBaseSucc &&
+ I->isBaseInitializer() && I->isBaseVirtual()) {
+ // We've reached the first virtual base init while iterating in reverse
+ // order. Make a new block for virtual base initializers so that we
+ // could skip them.
+ VBaseSucc = Succ = B ? B : &cfg->getExit();
+ Block = createBlock();
+ }
B = addInitializer(I);
if (badCFG)
return nullptr;
}
+ if (VBaseSucc) {
+ // Make a branch block for potentially skipping virtual base initializers.
+ Succ = VBaseSucc;
+ B = createBlock();
+ B->setTerminator(
+ CFGTerminator(nullptr, CFGTerminator::VirtualBaseBranch));
+ addSuccessor(B, Block, true);
+ }
}
if (B)
@@ -1441,22 +1481,38 @@ std::unique_ptr<CFG> CFGBuilder::buildCFG(const Decl *D, Stmt *Statement) {
E = BackpatchBlocks.end(); I != E; ++I ) {
CFGBlock *B = I->block;
- const GotoStmt *G = cast<GotoStmt>(B->getTerminator());
- LabelMapTy::iterator LI = LabelMap.find(G->getLabel());
-
- // If there is no target for the goto, then we are looking at an
- // incomplete AST. Handle this by not registering a successor.
- if (LI == LabelMap.end()) continue;
-
- JumpTarget JT = LI->second;
- prependAutomaticObjLifetimeWithTerminator(B, I->scopePosition,
- JT.scopePosition);
- prependAutomaticObjDtorsWithTerminator(B, I->scopePosition,
- JT.scopePosition);
- const VarDecl *VD = prependAutomaticObjScopeEndWithTerminator(
- B, I->scopePosition, JT.scopePosition);
- appendScopeBegin(JT.block, VD, G);
- addSuccessor(B, JT.block);
+ if (auto *G = dyn_cast<GotoStmt>(B->getTerminator())) {
+ LabelMapTy::iterator LI = LabelMap.find(G->getLabel());
+ // If there is no target for the goto, then we are looking at an
+ // incomplete AST. Handle this by not registering a successor.
+ if (LI == LabelMap.end())
+ continue;
+ JumpTarget JT = LI->second;
+ prependAutomaticObjLifetimeWithTerminator(B, I->scopePosition,
+ JT.scopePosition);
+ prependAutomaticObjDtorsWithTerminator(B, I->scopePosition,
+ JT.scopePosition);
+ const VarDecl *VD = prependAutomaticObjScopeEndWithTerminator(
+ B, I->scopePosition, JT.scopePosition);
+ appendScopeBegin(JT.block, VD, G);
+ addSuccessor(B, JT.block);
+ };
+ if (auto *G = dyn_cast<GCCAsmStmt>(B->getTerminator())) {
+ CFGBlock *Successor = (I+1)->block;
+ for (auto *L : G->labels()) {
+ LabelMapTy::iterator LI = LabelMap.find(L->getLabel());
+ // If there is no target for the goto, then we are looking at an
+ // incomplete AST. Handle this by not registering a successor.
+ if (LI == LabelMap.end())
+ continue;
+ JumpTarget JT = LI->second;
+ // Successor has been added, so skip it.
+ if (JT.block == Successor)
+ continue;
+ addSuccessor(B, JT.block);
+ }
+ I++;
+ }
}
// Add successors to the Indirect Goto Dispatch block (if we have one).
@@ -1761,6 +1817,9 @@ void CFGBuilder::addImplicitDtorsForDestructor(const CXXDestructorDecl *DD) {
// At the end destroy virtual base objects.
for (const auto &VI : RD->vbases()) {
+ // TODO: Add a VirtualBaseBranch to see if the most derived class
+ // (which is different from the current class) is responsible for
+ // destroying them.
const CXXRecordDecl *CD = VI.getType()->getAsCXXRecordDecl();
if (!CD->hasTrivialDestructor()) {
autoCreateBlock();
@@ -1948,7 +2007,7 @@ void CFGBuilder::prependAutomaticObjDtorsWithTerminator(CFGBlock *Blk,
= Blk->beginAutomaticObjDtorsInsert(Blk->end(), B.distance(E), C);
for (LocalScope::const_iterator I = B; I != E; ++I)
InsertPos = Blk->insertAutomaticObjDtor(InsertPos, *I,
- Blk->getTerminator());
+ Blk->getTerminatorStmt());
}
/// prependAutomaticObjLifetimeWithTerminator - Prepend lifetime CFGElements for
@@ -1963,8 +2022,10 @@ void CFGBuilder::prependAutomaticObjLifetimeWithTerminator(
BumpVectorContext &C = cfg->getBumpVectorContext();
CFGBlock::iterator InsertPos =
Blk->beginLifetimeEndsInsert(Blk->end(), B.distance(E), C);
- for (LocalScope::const_iterator I = B; I != E; ++I)
- InsertPos = Blk->insertLifetimeEnds(InsertPos, *I, Blk->getTerminator());
+ for (LocalScope::const_iterator I = B; I != E; ++I) {
+ InsertPos =
+ Blk->insertLifetimeEnds(InsertPos, *I, Blk->getTerminatorStmt());
+ }
}
/// prependAutomaticObjScopeEndWithTerminator - Prepend scope end CFGElements for
@@ -1983,7 +2044,7 @@ CFGBuilder::prependAutomaticObjScopeEndWithTerminator(
LocalScope::const_iterator PlaceToInsert = B;
for (LocalScope::const_iterator I = B; I != E; ++I)
PlaceToInsert = I;
- Blk->insertScopeEnd(InsertPos, *PlaceToInsert, Blk->getTerminator());
+ Blk->insertScopeEnd(InsertPos, *PlaceToInsert, Blk->getTerminatorStmt());
return *PlaceToInsert;
}
@@ -1999,6 +2060,10 @@ CFGBlock *CFGBuilder::Visit(Stmt * S, AddStmtChoice asc) {
if (Expr *E = dyn_cast<Expr>(S))
S = E->IgnoreParens();
+ if (Context->getLangOpts().OpenMP)
+ if (auto *D = dyn_cast<OMPExecutableDirective>(S))
+ return VisitOMPExecutableDirective(D, asc);
+
switch (S->getStmtClass()) {
default:
return VisitStmt(S, asc);
@@ -2100,6 +2165,9 @@ CFGBlock *CFGBuilder::Visit(Stmt * S, AddStmtChoice asc) {
case Stmt::GotoStmtClass:
return VisitGotoStmt(cast<GotoStmt>(S));
+ case Stmt::GCCAsmStmtClass:
+ return VisitGCCAsmStmt(cast<GCCAsmStmt>(S), asc);
+
case Stmt::IfStmtClass:
return VisitIfStmt(cast<IfStmt>(S));
@@ -2459,7 +2527,8 @@ CFGBlock *CFGBuilder::VisitCallExpr(CallExpr *C, AddStmtChoice asc) {
NoReturn = true;
if (FD->hasAttr<NoThrowAttr>())
AddEHEdge = false;
- if (FD->getBuiltinID() == Builtin::BI__builtin_object_size)
+ if (FD->getBuiltinID() == Builtin::BI__builtin_object_size ||
+ FD->getBuiltinID() == Builtin::BI__builtin_dynamic_object_size)
OmitArguments = true;
}
@@ -2862,8 +2931,8 @@ CFGBlock *CFGBuilder::VisitIfStmt(IfStmt *I) {
// Add the successors. If we know that specific branches are
// unreachable, inform addSuccessor() of that knowledge.
- addSuccessor(Block, ThenBlock, /* isReachable = */ !KnownVal.isFalse());
- addSuccessor(Block, ElseBlock, /* isReachable = */ !KnownVal.isTrue());
+ addSuccessor(Block, ThenBlock, /* IsReachable = */ !KnownVal.isFalse());
+ addSuccessor(Block, ElseBlock, /* IsReachable = */ !KnownVal.isTrue());
// Add the condition as the last statement in the new block. This may
// create new blocks as the condition may contain control-flow. Any newly
@@ -3103,6 +3172,28 @@ CFGBlock *CFGBuilder::VisitGotoStmt(GotoStmt *G) {
return Block;
}
+CFGBlock *CFGBuilder::VisitGCCAsmStmt(GCCAsmStmt *G, AddStmtChoice asc) {
+ // Goto is a control-flow statement. Thus we stop processing the current
+ // block and create a new one.
+
+ if (!G->isAsmGoto())
+ return VisitStmt(G, asc);
+
+ if (Block) {
+ Succ = Block;
+ if (badCFG)
+ return nullptr;
+ }
+ Block = createBlock();
+ Block->setTerminator(G);
+ // We will backpatch this block later for all the labels.
+ BackpatchBlocks.push_back(JumpSource(Block, ScopePos));
+ // Save "Succ" in BackpatchBlocks. In the backpatch processing, "Succ" is
+ // used to avoid adding "Succ" again.
+ BackpatchBlocks.push_back(JumpSource(Succ, ScopePos));
+ return Block;
+}
+
CFGBlock *CFGBuilder::VisitForStmt(ForStmt *F) {
CFGBlock *LoopSuccessor = nullptr;
@@ -4330,8 +4421,8 @@ CFGBlock *CFGBuilder::VisitCXXNewExpr(CXXNewExpr *NE,
if (BuildOpts.AddCXXNewAllocator)
appendNewAllocator(Block, NE);
- if (NE->isArray())
- Block = Visit(NE->getArraySize());
+ if (NE->isArray() && *NE->getArraySize())
+ Block = Visit(*NE->getArraySize());
for (CXXNewExpr::arg_iterator I = NE->placement_arg_begin(),
E = NE->placement_arg_end(); I != E; ++I)
@@ -4603,7 +4694,8 @@ void CFGBuilder::InsertTempDtorDecisionBlock(const TempDtorContext &Context,
}
assert(Context.TerminatorExpr);
CFGBlock *Decision = createBlock(false);
- Decision->setTerminator(CFGTerminator(Context.TerminatorExpr, true));
+ Decision->setTerminator(CFGTerminator(Context.TerminatorExpr,
+ CFGTerminator::TemporaryDtorsBranch));
addSuccessor(Decision, Block, !Context.KnownExecuted.isFalse());
addSuccessor(Decision, FalseSucc ? FalseSucc : Context.Succ,
!Context.KnownExecuted.isTrue());
@@ -4642,6 +4734,37 @@ CFGBlock *CFGBuilder::VisitConditionalOperatorForTemporaryDtors(
return Block;
}
+CFGBlock *CFGBuilder::VisitOMPExecutableDirective(OMPExecutableDirective *D,
+ AddStmtChoice asc) {
+ if (asc.alwaysAdd(*this, D)) {
+ autoCreateBlock();
+ appendStmt(Block, D);
+ }
+
+ // Iterate over all used expression in clauses.
+ CFGBlock *B = Block;
+
+ // Reverse the elements to process them in natural order. Iterators are not
+ // bidirectional, so we need to create temp vector.
+ SmallVector<Stmt *, 8> Used(
+ OMPExecutableDirective::used_clauses_children(D->clauses()));
+ for (Stmt *S : llvm::reverse(Used)) {
+ assert(S && "Expected non-null used-in-clause child.");
+ if (CFGBlock *R = Visit(S))
+ B = R;
+ }
+ // Visit associated structured block if any.
+ if (!D->isStandaloneDirective())
+ if (Stmt *S = D->getStructuredBlock()) {
+ if (!isa<CompoundStmt>(S))
+ addLocalScopeAndDtors(S);
+ if (CFGBlock *R = addStmt(S))
+ B = R;
+ }
+
+ return B;
+}
+
/// createBlock - Constructs and adds a new CFGBlock to the CFG. The block has
/// no successors or predecessors. If this is the first block created in the
/// CFG, it is automatically set to be the Entry and Exit of the CFG.
@@ -4668,6 +4791,51 @@ std::unique_ptr<CFG> CFG::buildCFG(const Decl *D, Stmt *Statement,
return Builder.buildCFG(D, Statement);
}
+bool CFG::isLinear() const {
+ // Quick path: if we only have the ENTRY block, the EXIT block, and some code
+ // in between, then we have no room for control flow.
+ if (size() <= 3)
+ return true;
+
+ // Traverse the CFG until we find a branch.
+ // TODO: While this should still be very fast,
+ // maybe we should cache the answer.
+ llvm::SmallPtrSet<const CFGBlock *, 4> Visited;
+ const CFGBlock *B = Entry;
+ while (B != Exit) {
+ auto IteratorAndFlag = Visited.insert(B);
+ if (!IteratorAndFlag.second) {
+ // We looped back to a block that we've already visited. Not linear.
+ return false;
+ }
+
+ // Iterate over reachable successors.
+ const CFGBlock *FirstReachableB = nullptr;
+ for (const CFGBlock::AdjacentBlock &AB : B->succs()) {
+ if (!AB.isReachable())
+ continue;
+
+ if (FirstReachableB == nullptr) {
+ FirstReachableB = &*AB;
+ } else {
+ // We've encountered a branch. It's not a linear CFG.
+ return false;
+ }
+ }
+
+ if (!FirstReachableB) {
+ // We reached a dead end. EXIT is unreachable. This is linear enough.
+ return true;
+ }
+
+ // There's only one way to move forward. Proceed.
+ B = FirstReachableB;
+ }
+
+ // We reached EXIT and found no branches.
+ return true;
+}
+
const CXXDestructorDecl *
CFGImplicitDtor::getDestructorDecl(ASTContext &astContext) const {
switch (getKind()) {
@@ -4766,7 +4934,7 @@ bool CFGBlock::FilterEdge(const CFGBlock::FilterOptions &F,
// If the 'To' has no label or is labeled but the label isn't a
// CaseStmt then filter this edge.
if (const SwitchStmt *S =
- dyn_cast_or_null<SwitchStmt>(From->getTerminator().getStmt())) {
+ dyn_cast_or_null<SwitchStmt>(From->getTerminatorStmt())) {
if (S->isAllEnumCasesCovered()) {
const Stmt *L = To->getLabel();
if (!L || !isa<CaseStmt>(L))
@@ -5001,9 +5169,18 @@ public:
public:
void print(CFGTerminator T) {
- if (T.isTemporaryDtorsBranch())
+ switch (T.getKind()) {
+ case CFGTerminator::StmtBranch:
+ Visit(T.getStmt());
+ break;
+ case CFGTerminator::TemporaryDtorsBranch:
OS << "(Temp Dtor) ";
- Visit(T.getStmt());
+ Visit(T.getStmt());
+ break;
+ case CFGTerminator::VirtualBaseBranch:
+ OS << "(See if most derived ctor has already initialized vbases)";
+ break;
+ }
}
};
@@ -5312,7 +5489,7 @@ static void print_block(raw_ostream &OS, const CFG* cfg,
}
// Print the terminator of this block.
- if (B.getTerminator()) {
+ if (B.getTerminator().isValid()) {
if (ShowColors)
OS.changeColor(raw_ostream::GREEN);
@@ -5464,8 +5641,43 @@ void CFGBlock::printTerminator(raw_ostream &OS,
TPrinter.print(getTerminator());
}
+/// printTerminatorJson - Pretty-prints the terminator in JSON format.
+void CFGBlock::printTerminatorJson(raw_ostream &Out, const LangOptions &LO,
+ bool AddQuotes) const {
+ std::string Buf;
+ llvm::raw_string_ostream TempOut(Buf);
+
+ printTerminator(TempOut, LO);
+
+ Out << JsonFormat(TempOut.str(), AddQuotes);
+}
+
+const Expr *CFGBlock::getLastCondition() const {
+ // If the terminator is a temporary dtor or a virtual base, etc, we can't
+ // retrieve a meaningful condition, bail out.
+ if (Terminator.getKind() != CFGTerminator::StmtBranch)
+ return nullptr;
+
+ // Also, if this method was called on a block that doesn't have 2 successors,
+ // this block doesn't have retrievable condition.
+ if (succ_size() < 2)
+ return nullptr;
+
+ auto StmtElem = rbegin()->getAs<CFGStmt>();
+ if (!StmtElem)
+ return nullptr;
+
+ const Stmt *Cond = StmtElem->getStmt();
+ if (isa<ObjCForCollectionStmt>(Cond))
+ return nullptr;
+
+ // Only ObjCForCollectionStmt is known not to be a non-Expr terminator, hence
+ // the cast<>.
+ return cast<Expr>(Cond)->IgnoreParens();
+}
+
Stmt *CFGBlock::getTerminatorCondition(bool StripParens) {
- Stmt *Terminator = this->Terminator;
+ Stmt *Terminator = getTerminatorStmt();
if (!Terminator)
return nullptr;
diff --git a/lib/Analysis/CFGReachabilityAnalysis.cpp b/lib/Analysis/CFGReachabilityAnalysis.cpp
index cdad5b57aee8..2b5d6c466cde 100644
--- a/lib/Analysis/CFGReachabilityAnalysis.cpp
+++ b/lib/Analysis/CFGReachabilityAnalysis.cpp
@@ -1,9 +1,8 @@
//===- CFGReachabilityAnalysis.cpp - Basic reachability analysis ----------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Analysis/CFGStmtMap.cpp b/lib/Analysis/CFGStmtMap.cpp
index 3eed0d52f848..d1c23e3c879b 100644
--- a/lib/Analysis/CFGStmtMap.cpp
+++ b/lib/Analysis/CFGStmtMap.cpp
@@ -1,9 +1,8 @@
//===--- CFGStmtMap.h - Map from Stmt* to CFGBlock* -----------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -71,7 +70,7 @@ static void Accumulate(SMap &SM, CFGBlock *B) {
// Finally, look at the terminator. If the terminator was already added
// because it is a block-level expression in another block, overwrite
// that mapping.
- if (Stmt *Term = B->getTerminator())
+ if (Stmt *Term = B->getTerminatorStmt())
SM[Term] = B;
}
diff --git a/lib/Analysis/CallGraph.cpp b/lib/Analysis/CallGraph.cpp
index 66a6f1a9bcea..7eda80ea0505 100644
--- a/lib/Analysis/CallGraph.cpp
+++ b/lib/Analysis/CallGraph.cpp
@@ -1,9 +1,8 @@
//===- CallGraph.cpp - AST-based Call graph -------------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Analysis/CloneDetection.cpp b/lib/Analysis/CloneDetection.cpp
index 88402e2adaa7..74328e8ae67f 100644
--- a/lib/Analysis/CloneDetection.cpp
+++ b/lib/Analysis/CloneDetection.cpp
@@ -1,9 +1,8 @@
//===--- CloneDetection.cpp - Finds code clones in an AST -------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
@@ -368,11 +367,7 @@ void RecursiveCloneTypeIIHashConstraint::constrain(
}
// Sort hash_codes in StmtsByHash.
- std::stable_sort(StmtsByHash.begin(), StmtsByHash.end(),
- [](std::pair<size_t, StmtSequence> LHS,
- std::pair<size_t, StmtSequence> RHS) {
- return LHS.first < RHS.first;
- });
+ llvm::stable_sort(StmtsByHash, llvm::less_first());
// Check for each StmtSequence if its successor has the same hash value.
// We don't check the last StmtSequence as it has no successor.
diff --git a/lib/Analysis/CocoaConventions.cpp b/lib/Analysis/CocoaConventions.cpp
index b2d416c171a1..b2ef426dead2 100644
--- a/lib/Analysis/CocoaConventions.cpp
+++ b/lib/Analysis/CocoaConventions.cpp
@@ -1,9 +1,8 @@
//===- CocoaConventions.h - Special handling of Cocoa conventions -*- C++ -*--//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Analysis/CodeInjector.cpp b/lib/Analysis/CodeInjector.cpp
index 76bf364444d1..412de96a13b9 100644
--- a/lib/Analysis/CodeInjector.cpp
+++ b/lib/Analysis/CodeInjector.cpp
@@ -1,9 +1,8 @@
//===-- CodeInjector.cpp ----------------------------------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Analysis/ConstructionContext.cpp b/lib/Analysis/ConstructionContext.cpp
index 8169d4a93a6d..6ba1e2173d2c 100644
--- a/lib/Analysis/ConstructionContext.cpp
+++ b/lib/Analysis/ConstructionContext.cpp
@@ -1,9 +1,8 @@
//===- ConstructionContext.cpp - CFG constructor information --------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Analysis/Consumed.cpp b/lib/Analysis/Consumed.cpp
index 16eeaba2f61b..eee36d9caf7f 100644
--- a/lib/Analysis/Consumed.cpp
+++ b/lib/Analysis/Consumed.cpp
@@ -1,9 +1,8 @@
//===- Consumed.cpp -------------------------------------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -77,7 +76,7 @@ static SourceLocation getFirstStmtLoc(const CFGBlock *Block) {
static SourceLocation getLastStmtLoc(const CFGBlock *Block) {
// Find the source location of the last statement in the block, if the block
// is not empty.
- if (const Stmt *StmtNode = Block->getTerminator()) {
+ if (const Stmt *StmtNode = Block->getTerminatorStmt()) {
return StmtNode->getBeginLoc();
} else {
for (CFGBlock::const_reverse_iterator BI = Block->rbegin(),
diff --git a/lib/Analysis/Dominators.cpp b/lib/Analysis/Dominators.cpp
index 1b7dd8c804e1..f7ad68673d0f 100644
--- a/lib/Analysis/Dominators.cpp
+++ b/lib/Analysis/Dominators.cpp
@@ -1,14 +1,19 @@
//===- Dominators.cpp - Implementation of dominators tree for Clang CFG ---===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "clang/Analysis/Analyses/Dominators.h"
-using namespace clang;
+namespace clang {
+
+template <>
+void CFGDominatorTreeImpl</*IsPostDom=*/true>::anchor() {}
+
+template <>
+void CFGDominatorTreeImpl</*IsPostDom=*/false>::anchor() {}
-void DominatorTree::anchor() {}
+} // end of namespace clang
diff --git a/lib/Analysis/ExprMutationAnalyzer.cpp b/lib/Analysis/ExprMutationAnalyzer.cpp
index 8414cb5c726a..fb5a139e82ab 100644
--- a/lib/Analysis/ExprMutationAnalyzer.cpp
+++ b/lib/Analysis/ExprMutationAnalyzer.cpp
@@ -1,9 +1,8 @@
//===---------- ExprMutationAnalyzer.cpp ----------------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "clang/Analysis/Analyses/ExprMutationAnalyzer.h"
@@ -25,6 +24,18 @@ AST_MATCHER_P(CXXForRangeStmt, hasRangeStmt,
return InnerMatcher.matches(*Range, Finder, Builder);
}
+AST_MATCHER_P(Expr, maybeEvalCommaExpr,
+ ast_matchers::internal::Matcher<Expr>, InnerMatcher) {
+ const Expr* Result = &Node;
+ while (const auto *BOComma =
+ dyn_cast_or_null<BinaryOperator>(Result->IgnoreParens())) {
+ if (!BOComma->isCommaOp())
+ break;
+ Result = BOComma->getRHS();
+ }
+ return InnerMatcher.matches(*Result, Finder, Builder);
+}
+
const ast_matchers::internal::VariadicDynCastAllOfMatcher<Stmt, CXXTypeidExpr>
cxxTypeidExpr;
@@ -194,24 +205,28 @@ const Stmt *ExprMutationAnalyzer::findDeclPointeeMutation(
const Stmt *ExprMutationAnalyzer::findDirectMutation(const Expr *Exp) {
// LHS of any assignment operators.
const auto AsAssignmentLhs =
- binaryOperator(isAssignmentOperator(), hasLHS(equalsNode(Exp)));
+ binaryOperator(isAssignmentOperator(),
+ hasLHS(maybeEvalCommaExpr(equalsNode(Exp))));
// Operand of increment/decrement operators.
const auto AsIncDecOperand =
unaryOperator(anyOf(hasOperatorName("++"), hasOperatorName("--")),
- hasUnaryOperand(equalsNode(Exp)));
+ hasUnaryOperand(maybeEvalCommaExpr(equalsNode(Exp))));
// Invoking non-const member function.
// A member function is assumed to be non-const when it is unresolved.
const auto NonConstMethod = cxxMethodDecl(unless(isConst()));
const auto AsNonConstThis =
- expr(anyOf(cxxMemberCallExpr(callee(NonConstMethod), on(equalsNode(Exp))),
+ expr(anyOf(cxxMemberCallExpr(callee(NonConstMethod),
+ on(maybeEvalCommaExpr(equalsNode(Exp)))),
cxxOperatorCallExpr(callee(NonConstMethod),
- hasArgument(0, equalsNode(Exp))),
+ hasArgument(0,
+ maybeEvalCommaExpr(equalsNode(Exp)))),
callExpr(callee(expr(anyOf(
- unresolvedMemberExpr(hasObjectExpression(equalsNode(Exp))),
+ unresolvedMemberExpr(
+ hasObjectExpression(maybeEvalCommaExpr(equalsNode(Exp)))),
cxxDependentScopeMemberExpr(
- hasObjectExpression(equalsNode(Exp)))))))));
+ hasObjectExpression(maybeEvalCommaExpr(equalsNode(Exp))))))))));
// Taking address of 'Exp'.
// We're assuming 'Exp' is mutated as soon as its address is taken, though in
@@ -221,10 +236,11 @@ const Stmt *ExprMutationAnalyzer::findDirectMutation(const Expr *Exp) {
unaryOperator(hasOperatorName("&"),
// A NoOp implicit cast is adding const.
unless(hasParent(implicitCastExpr(hasCastKind(CK_NoOp)))),
- hasUnaryOperand(equalsNode(Exp)));
+ hasUnaryOperand(maybeEvalCommaExpr(equalsNode(Exp))));
const auto AsPointerFromArrayDecay =
castExpr(hasCastKind(CK_ArrayToPointerDecay),
- unless(hasParent(arraySubscriptExpr())), has(equalsNode(Exp)));
+ unless(hasParent(arraySubscriptExpr())),
+ has(maybeEvalCommaExpr(equalsNode(Exp))));
// Treat calling `operator->()` of move-only classes as taking address.
// These are typically smart pointers with unique ownership so we treat
// mutation of pointee as mutation of the smart pointer itself.
@@ -232,7 +248,8 @@ const Stmt *ExprMutationAnalyzer::findDirectMutation(const Expr *Exp) {
cxxOperatorCallExpr(hasOverloadedOperatorName("->"),
callee(cxxMethodDecl(ofClass(isMoveOnly()),
returns(nonConstPointerType()))),
- argumentCountIs(1), hasArgument(0, equalsNode(Exp)));
+ argumentCountIs(1),
+ hasArgument(0, maybeEvalCommaExpr(equalsNode(Exp))));
// Used as non-const-ref argument when calling a function.
// An argument is assumed to be non-const-ref when the function is unresolved.
@@ -240,7 +257,8 @@ const Stmt *ExprMutationAnalyzer::findDirectMutation(const Expr *Exp) {
// findFunctionArgMutation which has additional smarts for handling forwarding
// references.
const auto NonConstRefParam = forEachArgumentWithParam(
- equalsNode(Exp), parmVarDecl(hasType(nonConstReferenceType())));
+ maybeEvalCommaExpr(equalsNode(Exp)),
+ parmVarDecl(hasType(nonConstReferenceType())));
const auto NotInstantiated = unless(hasDeclaration(isInstantiated()));
const auto AsNonConstRefArg = anyOf(
callExpr(NonConstRefParam, NotInstantiated),
@@ -248,8 +266,8 @@ const Stmt *ExprMutationAnalyzer::findDirectMutation(const Expr *Exp) {
callExpr(callee(expr(anyOf(unresolvedLookupExpr(), unresolvedMemberExpr(),
cxxDependentScopeMemberExpr(),
hasType(templateTypeParmType())))),
- hasAnyArgument(equalsNode(Exp))),
- cxxUnresolvedConstructExpr(hasAnyArgument(equalsNode(Exp))));
+ hasAnyArgument(maybeEvalCommaExpr(equalsNode(Exp)))),
+ cxxUnresolvedConstructExpr(hasAnyArgument(maybeEvalCommaExpr(equalsNode(Exp)))));
// Captured by a lambda by reference.
// If we're initializing a capture with 'Exp' directly then we're initializing
@@ -262,7 +280,8 @@ const Stmt *ExprMutationAnalyzer::findDirectMutation(const Expr *Exp) {
// For returning by value there will be an ImplicitCastExpr <LValueToRValue>.
// For returning by const-ref there will be an ImplicitCastExpr <NoOp> (for
// adding const.)
- const auto AsNonConstRefReturn = returnStmt(hasReturnValue(equalsNode(Exp)));
+ const auto AsNonConstRefReturn = returnStmt(hasReturnValue(
+ maybeEvalCommaExpr(equalsNode(Exp))));
const auto Matches =
match(findAll(stmt(anyOf(AsAssignmentLhs, AsIncDecOperand, AsNonConstThis,
diff --git a/lib/Analysis/LiveVariables.cpp b/lib/Analysis/LiveVariables.cpp
index afe2d264907f..2cd607d8a493 100644
--- a/lib/Analysis/LiveVariables.cpp
+++ b/lib/Analysis/LiveVariables.cpp
@@ -1,9 +1,8 @@
//=- LiveVariables.cpp - Live Variable Analysis for Source CFGs ----------*-==//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -502,7 +501,7 @@ LiveVariablesImpl::runOnBlock(const CFGBlock *block,
TransferFunctions TF(*this, val, obs, block);
// Visit the terminator (if any).
- if (const Stmt *term = block->getTerminator())
+ if (const Stmt *term = block->getTerminatorStmt())
TF.Visit(const_cast<Stmt*>(term));
// Apply the transfer function for all Stmts in the block.
diff --git a/lib/Analysis/ObjCNoReturn.cpp b/lib/Analysis/ObjCNoReturn.cpp
index f27568c0c322..fe1edb496859 100644
--- a/lib/Analysis/ObjCNoReturn.cpp
+++ b/lib/Analysis/ObjCNoReturn.cpp
@@ -1,9 +1,8 @@
//= ObjCNoReturn.cpp - Handling of Cocoa APIs known not to return --*- C++ -*---
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Analysis/PostOrderCFGView.cpp b/lib/Analysis/PostOrderCFGView.cpp
index d5d0bafe664c..f79d0007cb3d 100644
--- a/lib/Analysis/PostOrderCFGView.cpp
+++ b/lib/Analysis/PostOrderCFGView.cpp
@@ -1,9 +1,8 @@
//===- PostOrderCFGView.cpp - Post order view of CFG blocks ---------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Analysis/ProgramPoint.cpp b/lib/Analysis/ProgramPoint.cpp
index 2d016cb13353..97e90965d007 100644
--- a/lib/Analysis/ProgramPoint.cpp
+++ b/lib/Analysis/ProgramPoint.cpp
@@ -1,9 +1,8 @@
//==- ProgramPoint.cpp - Program Points for Path-Sensitive Analysis -*- C++ -*-/
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -13,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "clang/Analysis/ProgramPoint.h"
+#include "clang/Basic/JsonSupport.h"
using namespace clang;
@@ -44,151 +44,141 @@ ProgramPoint ProgramPoint::getProgramPoint(const Stmt *S, ProgramPoint::Kind K,
}
LLVM_DUMP_METHOD void ProgramPoint::dump() const {
- return print(/*CR=*/"\n", llvm::errs());
-}
-
-static void printLocation(raw_ostream &Out, SourceLocation SLoc,
- const SourceManager &SM,
- StringRef CR,
- StringRef Postfix) {
- if (SLoc.isFileID()) {
- Out << CR << "line=" << SM.getExpansionLineNumber(SLoc)
- << " col=" << SM.getExpansionColumnNumber(SLoc) << Postfix;
- }
+ return printJson(llvm::errs());
}
-void ProgramPoint::print(StringRef CR, llvm::raw_ostream &Out) const {
+void ProgramPoint::printJson(llvm::raw_ostream &Out, const char *NL) const {
const ASTContext &Context =
getLocationContext()->getAnalysisDeclContext()->getASTContext();
const SourceManager &SM = Context.getSourceManager();
+ const PrintingPolicy &PP = Context.getPrintingPolicy();
+ const bool AddQuotes = true;
+
+ Out << "\"kind\": \"";
switch (getKind()) {
case ProgramPoint::BlockEntranceKind:
- Out << "Block Entrance: B"
+ Out << "BlockEntrance\""
+ << ", \"block_id\": "
<< castAs<BlockEntrance>().getBlock()->getBlockID();
break;
case ProgramPoint::FunctionExitKind: {
auto FEP = getAs<FunctionExitPoint>();
- Out << "Function Exit: B" << FEP->getBlock()->getBlockID();
+ Out << "FunctionExit\""
+ << ", \"block_id\": " << FEP->getBlock()->getBlockID()
+ << ", \"stmt_id\": ";
+
if (const ReturnStmt *RS = FEP->getStmt()) {
- Out << CR << " Return: S" << RS->getID(Context) << CR;
- RS->printPretty(Out, /*helper=*/nullptr, Context.getPrintingPolicy(),
- /*Indentation=*/2, /*NewlineSymbol=*/CR);
+ Out << RS->getID(Context) << ", \"stmt\": ";
+ RS->printJson(Out, nullptr, PP, AddQuotes);
+ } else {
+ Out << "null, \"stmt\": null";
}
break;
}
case ProgramPoint::BlockExitKind:
- assert(false);
+ llvm_unreachable("BlockExitKind");
break;
-
case ProgramPoint::CallEnterKind:
- Out << "CallEnter";
+ Out << "CallEnter\"";
break;
-
case ProgramPoint::CallExitBeginKind:
- Out << "CallExitBegin";
+ Out << "CallExitBegin\"";
break;
-
case ProgramPoint::CallExitEndKind:
- Out << "CallExitEnd";
- break;
-
- case ProgramPoint::PostStmtPurgeDeadSymbolsKind:
- Out << "PostStmtPurgeDeadSymbols";
- break;
-
- case ProgramPoint::PreStmtPurgeDeadSymbolsKind:
- Out << "PreStmtPurgeDeadSymbols";
+ Out << "CallExitEnd\"";
break;
-
case ProgramPoint::EpsilonKind:
- Out << "Epsilon Point";
+ Out << "EpsilonPoint\"";
break;
- case ProgramPoint::LoopExitKind: {
- LoopExit LE = castAs<LoopExit>();
- Out << "LoopExit: " << LE.getLoopStmt()->getStmtClassName();
+ case ProgramPoint::LoopExitKind:
+ Out << "LoopExit\", \"stmt\": \""
+ << castAs<LoopExit>().getLoopStmt()->getStmtClassName() << '\"';
break;
- }
case ProgramPoint::PreImplicitCallKind: {
ImplicitCallPoint PC = castAs<ImplicitCallPoint>();
- Out << "PreCall: ";
- PC.getDecl()->print(Out, Context.getLangOpts());
- printLocation(Out, PC.getLocation(), SM, CR, /*Postfix=*/CR);
+ Out << "PreCall\", \"decl\": \""
+ << PC.getDecl()->getAsFunction()->getQualifiedNameAsString()
+ << "\", \"location\": ";
+ printSourceLocationAsJson(Out, PC.getLocation(), SM);
break;
}
case ProgramPoint::PostImplicitCallKind: {
ImplicitCallPoint PC = castAs<ImplicitCallPoint>();
- Out << "PostCall: ";
- PC.getDecl()->print(Out, Context.getLangOpts());
- printLocation(Out, PC.getLocation(), SM, CR, /*Postfix=*/CR);
+ Out << "PostCall\", \"decl\": \""
+ << PC.getDecl()->getAsFunction()->getQualifiedNameAsString()
+ << "\", \"location\": ";
+ printSourceLocationAsJson(Out, PC.getLocation(), SM);
break;
}
case ProgramPoint::PostInitializerKind: {
- Out << "PostInitializer: ";
+ Out << "PostInitializer\", ";
const CXXCtorInitializer *Init = castAs<PostInitializer>().getInitializer();
- if (const FieldDecl *FD = Init->getAnyMember())
- Out << *FD;
- else {
+ if (const FieldDecl *FD = Init->getAnyMember()) {
+ Out << "\"field_decl\": \"" << *FD << '\"';
+ } else {
+ Out << "\"type\": \"";
QualType Ty = Init->getTypeSourceInfo()->getType();
Ty = Ty.getLocalUnqualifiedType();
Ty.print(Out, Context.getLangOpts());
+ Out << '\"';
}
break;
}
case ProgramPoint::BlockEdgeKind: {
const BlockEdge &E = castAs<BlockEdge>();
- Out << "Edge: (B" << E.getSrc()->getBlockID() << ", B"
- << E.getDst()->getBlockID() << ')';
-
- if (const Stmt *T = E.getSrc()->getTerminator()) {
- SourceLocation SLoc = T->getBeginLoc();
-
- Out << "\\|Terminator: ";
- E.getSrc()->printTerminator(Out, Context.getLangOpts());
- printLocation(Out, SLoc, SM, CR, /*Postfix=*/"");
-
- if (isa<SwitchStmt>(T)) {
- const Stmt *Label = E.getDst()->getLabel();
-
- if (Label) {
- if (const auto *C = dyn_cast<CaseStmt>(Label)) {
- Out << CR << "case ";
- if (C->getLHS())
- C->getLHS()->printPretty(
- Out, nullptr, Context.getPrintingPolicy(),
- /*Indentation=*/0, /*NewlineSymbol=*/CR);
-
- if (const Stmt *RHS = C->getRHS()) {
- Out << " .. ";
- RHS->printPretty(Out, nullptr, Context.getPrintingPolicy(),
- /*Indetation=*/0, /*NewlineSymbol=*/CR);
- }
-
- Out << ":";
+ const Stmt *T = E.getSrc()->getTerminatorStmt();
+ Out << "Edge\", \"src_id\": " << E.getSrc()->getBlockID()
+ << ", \"dst_id\": " << E.getDst()->getBlockID() << ", \"terminator\": ";
+
+ if (!T) {
+ Out << "null, \"term_kind\": null";
+ break;
+ }
+
+ E.getSrc()->printTerminatorJson(Out, Context.getLangOpts(),
+ /*AddQuotes=*/true);
+ Out << ", \"location\": ";
+ printSourceLocationAsJson(Out, T->getBeginLoc(), SM);
+
+ Out << ", \"term_kind\": \"";
+ if (isa<SwitchStmt>(T)) {
+ Out << "SwitchStmt\", \"case\": ";
+ if (const Stmt *Label = E.getDst()->getLabel()) {
+ if (const auto *C = dyn_cast<CaseStmt>(Label)) {
+ Out << "{ \"lhs\": ";
+ if (const Stmt *LHS = C->getLHS()) {
+ LHS->printJson(Out, nullptr, PP, AddQuotes);
+ } else {
+ Out << "null";
+ }
+
+ Out << ", \"rhs\": ";
+ if (const Stmt *RHS = C->getRHS()) {
+ RHS->printJson(Out, nullptr, PP, AddQuotes);
} else {
- assert(isa<DefaultStmt>(Label));
- Out << CR << "default:";
+ Out << "null";
}
- } else
- Out << CR << "(implicit) default:";
- } else if (isa<IndirectGotoStmt>(T)) {
- // FIXME
+ Out << " }";
+ } else {
+ assert(isa<DefaultStmt>(Label));
+ Out << "\"default\"";
+ }
} else {
- Out << CR << "Condition: ";
- if (*E.getSrc()->succ_begin() == E.getDst())
- Out << "true";
- else
- Out << "false";
+ Out << "\"implicit default\"";
}
-
- Out << CR;
+ } else if (isa<IndirectGotoStmt>(T)) {
+ // FIXME: More info.
+ Out << "IndirectGotoStmt\"";
+ } else {
+ Out << "Condition\", \"value\": "
+ << (*E.getSrc()->succ_begin() == E.getDst() ? "true" : "false");
}
-
break;
}
@@ -196,23 +186,44 @@ void ProgramPoint::print(StringRef CR, llvm::raw_ostream &Out) const {
const Stmt *S = castAs<StmtPoint>().getStmt();
assert(S != nullptr && "Expecting non-null Stmt");
- Out << S->getStmtClassName() << " S" << S->getID(Context) << " <"
- << (const void *)S << "> ";
- S->printPretty(Out, /*helper=*/nullptr, Context.getPrintingPolicy(),
- /*Indentation=*/2, /*NewlineSymbol=*/CR);
- printLocation(Out, S->getBeginLoc(), SM, CR, /*Postfix=*/"");
+ Out << "Statement\", \"stmt_kind\": \"" << S->getStmtClassName()
+ << "\", \"stmt_id\": " << S->getID(Context)
+ << ", \"pointer\": \"" << (const void *)S << "\", \"pretty\": ";
+
+ S->printJson(Out, nullptr, PP, AddQuotes);
- if (getAs<PreStmt>())
- Out << CR << "PreStmt" << CR;
+ Out << ", \"location\": ";
+ printSourceLocationAsJson(Out, S->getBeginLoc(), SM);
+
+ Out << ", \"stmt_point_kind\": \"";
+ if (getAs<PreLoad>())
+ Out << "PreLoad";
+ else if (getAs<PreStore>())
+ Out << "PreStore";
+ else if (getAs<PostAllocatorCall>())
+ Out << "PostAllocatorCall";
+ else if (getAs<PostCondition>())
+ Out << "PostCondition";
else if (getAs<PostLoad>())
- Out << CR << "PostLoad" << CR;
- else if (getAs<PostStore>())
- Out << CR << "PostStore" << CR;
+ Out << "PostLoad";
else if (getAs<PostLValue>())
- Out << CR << "PostLValue" << CR;
- else if (getAs<PostAllocatorCall>())
- Out << CR << "PostAllocatorCall" << CR;
+ Out << "PostLValue";
+ else if (getAs<PostStore>())
+ Out << "PostStore";
+ else if (getAs<PostStmt>())
+ Out << "PostStmt";
+ else if (getAs<PostStmtPurgeDeadSymbols>())
+ Out << "PostStmtPurgeDeadSymbols";
+ else if (getAs<PreStmtPurgeDeadSymbols>())
+ Out << "PreStmtPurgeDeadSymbols";
+ else if (getAs<PreStmt>())
+ Out << "PreStmt";
+ else {
+ Out << "\nKind: '" << getKind();
+ llvm_unreachable("' is unhandled StmtPoint kind!");
+ }
+ Out << '\"';
break;
}
}
diff --git a/lib/Analysis/ReachableCode.cpp b/lib/Analysis/ReachableCode.cpp
index 87f4f7010f98..2fea88ea2eff 100644
--- a/lib/Analysis/ReachableCode.cpp
+++ b/lib/Analysis/ReachableCode.cpp
@@ -1,9 +1,8 @@
-//=- ReachableCodePathInsensitive.cpp ---------------------------*- C++ --*-==//
+//===-- ReachableCode.cpp - Code Reachability Analysis --------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -49,7 +48,7 @@ static bool isTrivialExpression(const Expr *Ex) {
static bool isTrivialDoWhile(const CFGBlock *B, const Stmt *S) {
// Check if the block ends with a do...while() and see if 'S' is the
// condition.
- if (const Stmt *Term = B->getTerminator()) {
+ if (const Stmt *Term = B->getTerminatorStmt()) {
if (const DoStmt *DS = dyn_cast<DoStmt>(Term)) {
const Expr *Cond = DS->getCond()->IgnoreParenCasts();
return Cond == S && isTrivialExpression(Cond);
@@ -117,7 +116,7 @@ static bool isDeadReturn(const CFGBlock *B, const Stmt *S) {
// the call to the destructor.
assert(Current->succ_size() == 2);
Current = *(Current->succ_begin() + 1);
- } else if (!Current->getTerminator() && Current->succ_size() == 1) {
+ } else if (!Current->getTerminatorStmt() && Current->succ_size() == 1) {
// If there is only one successor, we're not dealing with outgoing control
// flow. Thus, look into the next block.
Current = *Current->succ_begin();
@@ -193,9 +192,10 @@ static bool isConfigurationValue(const Stmt *S,
if (!S)
return false;
- S = S->IgnoreImplicit();
+ if (const auto *Ex = dyn_cast<Expr>(S))
+ S = Ex->IgnoreImplicit();
- if (const Expr *Ex = dyn_cast<Expr>(S))
+ if (const auto *Ex = dyn_cast<Expr>(S))
S = Ex->IgnoreCasts();
// Special case looking for the sigil '()' around an integer literal.
@@ -292,7 +292,7 @@ static bool isConfigurationValue(const ValueDecl *D, Preprocessor &PP) {
/// Returns true if we should always explore all successors of a block.
static bool shouldTreatSuccessorsAsReachable(const CFGBlock *B,
Preprocessor &PP) {
- if (const Stmt *Term = B->getTerminator()) {
+ if (const Stmt *Term = B->getTerminatorStmt()) {
if (isa<SwitchStmt>(Term))
return true;
// Specially handle '||' and '&&'.
@@ -461,12 +461,11 @@ const Stmt *DeadCodeScan::findDeadCode(const clang::CFGBlock *Block) {
return S;
}
- if (CFGTerminator T = Block->getTerminator()) {
- if (!T.isTemporaryDtorsBranch()) {
- const Stmt *S = T.getStmt();
- if (isValidDeadStmt(S))
- return S;
- }
+ CFGTerminator T = Block->getTerminator();
+ if (T.isStmtBranch()) {
+ const Stmt *S = T.getStmt();
+ if (S && isValidDeadStmt(S))
+ return S;
}
return nullptr;
diff --git a/lib/StaticAnalyzer/Core/RetainSummaryManager.cpp b/lib/Analysis/RetainSummaryManager.cpp
index 2e40cc33381c..132053fd2c24 100644
--- a/lib/StaticAnalyzer/Core/RetainSummaryManager.cpp
+++ b/lib/Analysis/RetainSummaryManager.cpp
@@ -1,9 +1,8 @@
//== RetainSummaryManager.cpp - Summaries for reference counting --*- C++ -*--//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -13,8 +12,8 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/StaticAnalyzer/Core/RetainSummaryManager.h"
#include "clang/Analysis/DomainSpecific/CocoaConventions.h"
+#include "clang/Analysis/RetainSummaryManager.h"
#include "clang/AST/Attr.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
@@ -146,13 +145,30 @@ static bool isSubclass(const Decl *D,
}
static bool isOSObjectSubclass(const Decl *D) {
- return isSubclass(D, "OSObject");
+ return D && isSubclass(D, "OSMetaClassBase");
}
static bool isOSObjectDynamicCast(StringRef S) {
return S == "safeMetaCast";
}
+static bool isOSObjectRequiredCast(StringRef S) {
+ return S == "requiredMetaCast";
+}
+
+static bool isOSObjectThisCast(StringRef S) {
+ return S == "metaCast";
+}
+
+
+static bool isOSObjectPtr(QualType QT) {
+ return isOSObjectSubclass(QT->getPointeeCXXRecordDecl());
+}
+
+static bool isISLObjectRef(QualType Ty) {
+ return StringRef(Ty.getAsString()).startswith("isl_");
+}
+
static bool isOSIteratorSubclass(const Decl *D) {
return isSubclass(D, "OSIterator");
}
@@ -199,36 +215,58 @@ static bool isOSObjectRelated(const CXXMethodDecl *MD) {
return false;
}
+bool
+RetainSummaryManager::isKnownSmartPointer(QualType QT) {
+ QT = QT.getCanonicalType();
+ const auto *RD = QT->getAsCXXRecordDecl();
+ if (!RD)
+ return false;
+ const IdentifierInfo *II = RD->getIdentifier();
+ if (II && II->getName() == "smart_ptr")
+ if (const auto *ND = dyn_cast<NamespaceDecl>(RD->getDeclContext()))
+ if (ND->getNameAsString() == "os")
+ return true;
+ return false;
+}
+
const RetainSummary *
RetainSummaryManager::getSummaryForOSObject(const FunctionDecl *FD,
StringRef FName, QualType RetTy) {
+ assert(TrackOSObjects &&
+ "Requesting a summary for an OSObject but OSObjects are not tracked");
+
if (RetTy->isPointerType()) {
const CXXRecordDecl *PD = RetTy->getPointeeType()->getAsCXXRecordDecl();
if (PD && isOSObjectSubclass(PD)) {
- if (const IdentifierInfo *II = FD->getIdentifier()) {
- if (isOSObjectDynamicCast(II->getName()))
- return getDefaultSummary();
-
- // All objects returned with functions *not* starting with
- // get, or iterators, are returned at +1.
- if ((!II->getName().startswith("get") &&
- !II->getName().startswith("Get")) ||
- isOSIteratorSubclass(PD)) {
- return getOSSummaryCreateRule(FD);
- } else {
- return getOSSummaryGetRule(FD);
- }
+ if (isOSObjectDynamicCast(FName) || isOSObjectRequiredCast(FName) ||
+ isOSObjectThisCast(FName))
+ return getDefaultSummary();
+
+ // TODO: Add support for the slightly common *Matching(table) idiom.
+ // Cf. IOService::nameMatching() etc. - these function have an unusual
+ // contract of returning at +0 or +1 depending on their last argument.
+ if (FName.endswith("Matching")) {
+ return getPersistentStopSummary();
+ }
+
+ // All objects returned with functions *not* starting with 'get',
+ // or iterators, are returned at +1.
+ if ((!FName.startswith("get") && !FName.startswith("Get")) ||
+ isOSIteratorSubclass(PD)) {
+ return getOSSummaryCreateRule(FD);
+ } else {
+ return getOSSummaryGetRule(FD);
}
}
}
if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) {
const CXXRecordDecl *Parent = MD->getParent();
- if (TrackOSObjects && Parent && isOSObjectSubclass(Parent)) {
- if (FName == "release")
+ if (Parent && isOSObjectSubclass(Parent)) {
+ if (FName == "release" || FName == "taggedRelease")
return getOSSummaryReleaseRule(FD);
- if (FName == "retain")
+ if (FName == "retain" || FName == "taggedRetain")
return getOSSummaryRetainRule(FD);
if (FName == "free")
@@ -473,19 +511,19 @@ RetainSummaryManager::generateSummary(const FunctionDecl *FD,
if (const RetainSummary *S = getSummaryForOSObject(FD, FName, RetTy))
return S;
- if (TrackObjCAndCFObjects)
- if (const RetainSummary *S =
- getSummaryForObjCOrCFObject(FD, FName, RetTy, FT, AllowAnnotations))
- return S;
-
if (const auto *MD = dyn_cast<CXXMethodDecl>(FD))
- if (!(TrackOSObjects && isOSObjectRelated(MD)))
+ if (!isOSObjectRelated(MD))
return getPersistentSummary(RetEffect::MakeNoRet(),
ArgEffects(AF.getEmptyMap()),
ArgEffect(DoNothing),
ArgEffect(StopTracking),
ArgEffect(DoNothing));
+ if (TrackObjCAndCFObjects)
+ if (const RetainSummary *S =
+ getSummaryForObjCOrCFObject(FD, FName, RetTy, FT, AllowAnnotations))
+ return S;
+
return getDefaultSummary();
}
@@ -540,100 +578,118 @@ static ArgEffect getStopTrackingHardEquivalent(ArgEffect E) {
llvm_unreachable("Unknown ArgEffect kind");
}
-void RetainSummaryManager::updateSummaryForCall(const RetainSummary *&S,
- const CallEvent &Call) {
- if (Call.hasNonZeroCallbackArg()) {
- ArgEffect RecEffect =
- getStopTrackingHardEquivalent(S->getReceiverEffect());
- ArgEffect DefEffect =
- getStopTrackingHardEquivalent(S->getDefaultArgEffect());
-
- ArgEffects ScratchArgs(AF.getEmptyMap());
- ArgEffects CustomArgEffects = S->getArgEffects();
- for (ArgEffects::iterator I = CustomArgEffects.begin(),
- E = CustomArgEffects.end();
- I != E; ++I) {
- ArgEffect Translated = getStopTrackingHardEquivalent(I->second);
- if (Translated.getKind() != DefEffect.getKind())
- ScratchArgs = AF.add(ScratchArgs, I->first, Translated);
- }
+const RetainSummary *
+RetainSummaryManager::updateSummaryForNonZeroCallbackArg(const RetainSummary *S,
+ AnyCall &C) {
+ ArgEffect RecEffect = getStopTrackingHardEquivalent(S->getReceiverEffect());
+ ArgEffect DefEffect = getStopTrackingHardEquivalent(S->getDefaultArgEffect());
- RetEffect RE = RetEffect::MakeNoRetHard();
-
- // Special cases where the callback argument CANNOT free the return value.
- // This can generally only happen if we know that the callback will only be
- // called when the return value is already being deallocated.
- if (const SimpleFunctionCall *FC = dyn_cast<SimpleFunctionCall>(&Call)) {
- if (IdentifierInfo *Name = FC->getDecl()->getIdentifier()) {
- // When the CGBitmapContext is deallocated, the callback here will free
- // the associated data buffer.
- // The callback in dispatch_data_create frees the buffer, but not
- // the data object.
- if (Name->isStr("CGBitmapContextCreateWithData") ||
- Name->isStr("dispatch_data_create"))
- RE = S->getRetEffect();
- }
- }
+ ArgEffects ScratchArgs(AF.getEmptyMap());
+ ArgEffects CustomArgEffects = S->getArgEffects();
+ for (ArgEffects::iterator I = CustomArgEffects.begin(),
+ E = CustomArgEffects.end();
+ I != E; ++I) {
+ ArgEffect Translated = getStopTrackingHardEquivalent(I->second);
+ if (Translated.getKind() != DefEffect.getKind())
+ ScratchArgs = AF.add(ScratchArgs, I->first, Translated);
+ }
- S = getPersistentSummary(RE, ScratchArgs, RecEffect, DefEffect);
+ RetEffect RE = RetEffect::MakeNoRetHard();
+
+ // Special cases where the callback argument CANNOT free the return value.
+ // This can generally only happen if we know that the callback will only be
+ // called when the return value is already being deallocated.
+ if (const IdentifierInfo *Name = C.getIdentifier()) {
+ // When the CGBitmapContext is deallocated, the callback here will free
+ // the associated data buffer.
+ // The callback in dispatch_data_create frees the buffer, but not
+ // the data object.
+ if (Name->isStr("CGBitmapContextCreateWithData") ||
+ Name->isStr("dispatch_data_create"))
+ RE = S->getRetEffect();
}
- // Special case '[super init];' and '[self init];'
- //
- // Even though calling '[super init]' without assigning the result to self
- // and checking if the parent returns 'nil' is a bad pattern, it is common.
- // Additionally, our Self Init checker already warns about it. To avoid
- // overwhelming the user with messages from both checkers, we model the case
- // of '[super init]' in cases when it is not consumed by another expression
- // as if the call preserves the value of 'self'; essentially, assuming it can
- // never fail and return 'nil'.
- // Note, we don't want to just stop tracking the value since we want the
- // RetainCount checker to report leaks and use-after-free if SelfInit checker
- // is turned off.
- if (const ObjCMethodCall *MC = dyn_cast<ObjCMethodCall>(&Call)) {
- if (MC->getMethodFamily() == OMF_init && MC->isReceiverSelfOrSuper()) {
-
- // Check if the message is not consumed, we know it will not be used in
- // an assignment, ex: "self = [super init]".
- const Expr *ME = MC->getOriginExpr();
- const LocationContext *LCtx = MC->getLocationContext();
- ParentMap &PM = LCtx->getAnalysisDeclContext()->getParentMap();
- if (!PM.isConsumedExpr(ME)) {
- RetainSummaryTemplate ModifiableSummaryTemplate(S, *this);
- ModifiableSummaryTemplate->setReceiverEffect(ArgEffect(DoNothing));
- ModifiableSummaryTemplate->setRetEffect(RetEffect::MakeNoRet());
- }
+ return getPersistentSummary(RE, ScratchArgs, RecEffect, DefEffect);
+}
+
+void RetainSummaryManager::updateSummaryForReceiverUnconsumedSelf(
+ const RetainSummary *&S) {
+
+ RetainSummaryTemplate Template(S, *this);
+
+ Template->setReceiverEffect(ArgEffect(DoNothing));
+ Template->setRetEffect(RetEffect::MakeNoRet());
+}
+
+
+void RetainSummaryManager::updateSummaryForArgumentTypes(
+ const AnyCall &C, const RetainSummary *&RS) {
+ RetainSummaryTemplate Template(RS, *this);
+
+ unsigned parm_idx = 0;
+ for (auto pi = C.param_begin(), pe = C.param_end(); pi != pe;
+ ++pi, ++parm_idx) {
+ QualType QT = (*pi)->getType();
+
+ // Skip already created values.
+ if (RS->getArgEffects().contains(parm_idx))
+ continue;
+
+ ObjKind K = ObjKind::AnyObj;
+
+ if (isISLObjectRef(QT)) {
+ K = ObjKind::Generalized;
+ } else if (isOSObjectPtr(QT)) {
+ K = ObjKind::OS;
+ } else if (cocoa::isCocoaObjectRef(QT)) {
+ K = ObjKind::ObjC;
+ } else if (coreFoundation::isCFObjectRef(QT)) {
+ K = ObjKind::CF;
}
+
+ if (K != ObjKind::AnyObj)
+ Template->addArg(AF, parm_idx,
+ ArgEffect(RS->getDefaultArgEffect().getKind(), K));
}
}
const RetainSummary *
-RetainSummaryManager::getSummary(const CallEvent &Call,
+RetainSummaryManager::getSummary(AnyCall C,
+ bool HasNonZeroCallbackArg,
+ bool IsReceiverUnconsumedSelf,
QualType ReceiverType) {
const RetainSummary *Summ;
- switch (Call.getKind()) {
- case CE_Function:
- case CE_CXXMember:
- case CE_CXXMemberOperator:
- case CE_CXXConstructor:
- case CE_CXXAllocator:
- Summ = getFunctionSummary(cast_or_null<FunctionDecl>(Call.getDecl()));
+ switch (C.getKind()) {
+ case AnyCall::Function:
+ case AnyCall::Constructor:
+ case AnyCall::Allocator:
+ case AnyCall::Deallocator:
+ Summ = getFunctionSummary(cast_or_null<FunctionDecl>(C.getDecl()));
break;
- case CE_Block:
- case CE_CXXDestructor:
+ case AnyCall::Block:
+ case AnyCall::Destructor:
// FIXME: These calls are currently unsupported.
return getPersistentStopSummary();
- case CE_ObjCMessage: {
- const ObjCMethodCall &Msg = cast<ObjCMethodCall>(Call);
- if (Msg.isInstanceMessage())
- Summ = getInstanceMethodSummary(Msg, ReceiverType);
- else
- Summ = getClassMethodSummary(Msg);
+ case AnyCall::ObjCMethod: {
+ const auto *ME = cast_or_null<ObjCMessageExpr>(C.getExpr());
+ if (!ME) {
+ Summ = getMethodSummary(cast<ObjCMethodDecl>(C.getDecl()));
+ } else if (ME->isInstanceMessage()) {
+ Summ = getInstanceMethodSummary(ME, ReceiverType);
+ } else {
+ Summ = getClassMethodSummary(ME);
+ }
break;
}
}
- updateSummaryForCall(Summ, Call);
+ if (HasNonZeroCallbackArg)
+ Summ = updateSummaryForNonZeroCallbackArg(Summ, C);
+
+ if (IsReceiverUnconsumedSelf)
+ updateSummaryForReceiverUnconsumedSelf(Summ);
+
+ updateSummaryForArgumentTypes(C, Summ);
assert(Summ && "Unknown call type?");
return Summ;
@@ -649,7 +705,7 @@ RetainSummaryManager::getCFCreateGetRuleSummary(const FunctionDecl *FD) {
}
bool RetainSummaryManager::isTrustedReferenceCountImplementation(
- const FunctionDecl *FD) {
+ const Decl *FD) {
return hasRCAnnotation(FD, "rc_ownership_trusted_implementation");
}
@@ -678,20 +734,28 @@ RetainSummaryManager::canEval(const CallExpr *CE, const FunctionDecl *FD,
// These are not retain. They just return something and retain it.
return None;
}
- if (cocoa::isRefType(ResultTy, "CF", FName) ||
- cocoa::isRefType(ResultTy, "CG", FName) ||
- cocoa::isRefType(ResultTy, "CV", FName))
- if (isRetain(FD, FName) || isAutorelease(FD, FName) ||
- isMakeCollectable(FName))
- return BehaviorSummary::Identity;
+ if (CE->getNumArgs() == 1 &&
+ (cocoa::isRefType(ResultTy, "CF", FName) ||
+ cocoa::isRefType(ResultTy, "CG", FName) ||
+ cocoa::isRefType(ResultTy, "CV", FName)) &&
+ (isRetain(FD, FName) || isAutorelease(FD, FName) ||
+ isMakeCollectable(FName)))
+ return BehaviorSummary::Identity;
// safeMetaCast is called by OSDynamicCast.
// We assume that OSDynamicCast is either an identity (cast is OK,
// the input was non-zero),
// or that it returns zero (when the cast failed, or the input
// was zero).
- if (TrackOSObjects && isOSObjectDynamicCast(FName)) {
- return BehaviorSummary::IdentityOrZero;
+ if (TrackOSObjects) {
+ if (isOSObjectDynamicCast(FName) && FD->param_size() >= 1) {
+ return BehaviorSummary::IdentityOrZero;
+ } else if (isOSObjectRequiredCast(FName) && FD->param_size() >= 1) {
+ return BehaviorSummary::Identity;
+ } else if (isOSObjectThisCast(FName) && isa<CXXMethodDecl>(FD) &&
+ !cast<CXXMethodDecl>(FD)->isStatic()) {
+ return BehaviorSummary::IdentityThis;
+ }
}
const FunctionDecl* FDD = FD->getDefinition();
@@ -1045,8 +1109,17 @@ RetainSummaryManager::getStandardMethodSummary(const ObjCMethodDecl *MD,
ArgEffect(ReceiverEff), ArgEffect(MayEscape));
}
+const RetainSummary *
+RetainSummaryManager::getClassMethodSummary(const ObjCMessageExpr *ME) {
+ assert(!ME->isInstanceMessage());
+ const ObjCInterfaceDecl *Class = ME->getReceiverInterface();
+
+ return getMethodSummary(ME->getSelector(), Class, ME->getMethodDecl(),
+ ME->getType(), ObjCClassMethodSummaries);
+}
+
const RetainSummary *RetainSummaryManager::getInstanceMethodSummary(
- const ObjCMethodCall &Msg,
+ const ObjCMessageExpr *ME,
QualType ReceiverType) {
const ObjCInterfaceDecl *ReceiverClass = nullptr;
@@ -1058,18 +1131,18 @@ const RetainSummary *RetainSummaryManager::getInstanceMethodSummary(
// If we don't know what kind of object this is, fall back to its static type.
if (!ReceiverClass)
- ReceiverClass = Msg.getReceiverInterface();
+ ReceiverClass = ME->getReceiverInterface();
// FIXME: The receiver could be a reference to a class, meaning that
// we should use the class method.
// id x = [NSObject class];
// [x performSelector:... withObject:... afterDelay:...];
- Selector S = Msg.getSelector();
- const ObjCMethodDecl *Method = Msg.getDecl();
+ Selector S = ME->getSelector();
+ const ObjCMethodDecl *Method = ME->getMethodDecl();
if (!Method && ReceiverClass)
Method = ReceiverClass->getInstanceMethod(S);
- return getMethodSummary(S, ReceiverClass, Method, Msg.getResultType(),
+ return getMethodSummary(S, ReceiverClass, Method, ME->getType(),
ObjCMethodSummaries);
}
@@ -1198,32 +1271,17 @@ void RetainSummaryManager::InitializeMethodSummaries() {
addInstMethSummary("CIContext", CFAllocSumm, "createCGLayerWithSize", "info");
}
-CallEffects CallEffects::getEffect(const ObjCMethodDecl *MD) {
- ASTContext &Ctx = MD->getASTContext();
- LangOptions L = Ctx.getLangOpts();
- RetainSummaryManager M(Ctx, L.ObjCAutoRefCount,
- /*TrackNSAndCFObjects=*/true,
- /*TrackOSObjects=*/false);
- const RetainSummary *S = M.getMethodSummary(MD);
- CallEffects CE(S->getRetEffect(), S->getReceiverEffect());
- unsigned N = MD->param_size();
- for (unsigned i = 0; i < N; ++i) {
- CE.Args.push_back(S->getArg(i));
- }
- return CE;
-}
-
-CallEffects CallEffects::getEffect(const FunctionDecl *FD) {
- ASTContext &Ctx = FD->getASTContext();
- LangOptions L = Ctx.getLangOpts();
- RetainSummaryManager M(Ctx, L.ObjCAutoRefCount,
- /*TrackNSAndCFObjects=*/true,
- /*TrackOSObjects=*/false);
- const RetainSummary *S = M.getFunctionSummary(FD);
- CallEffects CE(S->getRetEffect());
- unsigned N = FD->param_size();
- for (unsigned i = 0; i < N; ++i) {
- CE.Args.push_back(S->getArg(i));
- }
- return CE;
+const RetainSummary *
+RetainSummaryManager::getMethodSummary(const ObjCMethodDecl *MD) {
+ const ObjCInterfaceDecl *ID = MD->getClassInterface();
+ Selector S = MD->getSelector();
+ QualType ResultTy = MD->getReturnType();
+
+ ObjCMethodSummariesTy *CachedSummaries;
+ if (MD->isInstanceMethod())
+ CachedSummaries = &ObjCMethodSummaries;
+ else
+ CachedSummaries = &ObjCClassMethodSummaries;
+
+ return getMethodSummary(S, ID, MD, ResultTy, *CachedSummaries);
}
diff --git a/lib/Analysis/ThreadSafety.cpp b/lib/Analysis/ThreadSafety.cpp
index 78e1b056e1d7..c7b4c4455664 100644
--- a/lib/Analysis/ThreadSafety.cpp
+++ b/lib/Analysis/ThreadSafety.cpp
@@ -1,9 +1,8 @@
//===- ThreadSafety.cpp ---------------------------------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -816,7 +815,7 @@ static void findBlockLocations(CFG *CFGraph,
// Find the source location of the last statement in the block, if the
// block is not empty.
- if (const Stmt *S = CurrBlock->getTerminator()) {
+ if (const Stmt *S = CurrBlock->getTerminatorStmt()) {
CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc = S->getBeginLoc();
} else {
for (CFGBlock::const_reverse_iterator BI = CurrBlock->rbegin(),
@@ -874,7 +873,7 @@ public:
void handleLock(FactSet &FSet, FactManager &FactMan, const FactEntry &entry,
ThreadSafetyHandler &Handler,
StringRef DiagKind) const override {
- Handler.handleDoubleLock(DiagKind, entry.toString(), entry.loc());
+ Handler.handleDoubleLock(DiagKind, entry.toString(), loc(), entry.loc());
}
void handleUnlock(FactSet &FSet, FactManager &FactMan,
@@ -982,12 +981,13 @@ private:
void lock(FactSet &FSet, FactManager &FactMan, const CapabilityExpr &Cp,
LockKind kind, SourceLocation loc, ThreadSafetyHandler *Handler,
StringRef DiagKind) const {
- if (!FSet.findLock(FactMan, Cp)) {
+ if (const FactEntry *Fact = FSet.findLock(FactMan, Cp)) {
+ if (Handler)
+ Handler->handleDoubleLock(DiagKind, Cp.toString(), Fact->loc(), loc);
+ } else {
FSet.removeLock(FactMan, !Cp);
FSet.addLock(FactMan,
llvm::make_unique<LockableFactEntry>(Cp, kind, loc));
- } else if (Handler) {
- Handler->handleDoubleLock(DiagKind, Cp.toString(), loc);
}
}
@@ -1335,8 +1335,8 @@ void ThreadSafetyAnalyzer::removeLock(FactSet &FSet, const CapabilityExpr &Cp,
// Generic lock removal doesn't care about lock kind mismatches, but
// otherwise diagnose when the lock kinds are mismatched.
if (ReceivedKind != LK_Generic && LDat->kind() != ReceivedKind) {
- Handler.handleIncorrectUnlockKind(DiagKind, Cp.toString(),
- LDat->kind(), ReceivedKind, UnlockLoc);
+ Handler.handleIncorrectUnlockKind(DiagKind, Cp.toString(), LDat->kind(),
+ ReceivedKind, LDat->loc(), UnlockLoc);
}
LDat->handleUnlock(FSet, FactMan, Cp, UnlockLoc, FullyRemove, Handler,
@@ -1499,7 +1499,7 @@ void ThreadSafetyAnalyzer::getEdgeLockset(FactSet& Result,
const Stmt *Cond = PredBlock->getTerminatorCondition();
// We don't acquire try-locks on ?: branches, only when its result is used.
- if (!Cond || isa<ConditionalOperator>(PredBlock->getTerminator()))
+ if (!Cond || isa<ConditionalOperator>(PredBlock->getTerminatorStmt()))
return;
bool Negate = false;
@@ -2402,7 +2402,7 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
// a difference in locksets is probably due to a bug in that block, rather
// than in some other predecessor. In that case, keep the other
// predecessor's lockset.
- if (const Stmt *Terminator = (*PI)->getTerminator()) {
+ if (const Stmt *Terminator = (*PI)->getTerminatorStmt()) {
if (isa<ContinueStmt>(Terminator) || isa<BreakStmt>(Terminator)) {
SpecialBlocks.push_back(*PI);
continue;
@@ -2441,7 +2441,7 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
// it might also be part of a switch. Also, a subsequent destructor
// might add to the lockset, in which case the real issue might be a
// double lock on the other path.
- const Stmt *Terminator = PrevBlock->getTerminator();
+ const Stmt *Terminator = PrevBlock->getTerminatorStmt();
bool IsLoop = Terminator && isa<ContinueStmt>(Terminator);
FactSet PrevLockset;
diff --git a/lib/Analysis/ThreadSafetyCommon.cpp b/lib/Analysis/ThreadSafetyCommon.cpp
index 14d1d9c7a8f7..373dfc77fa9b 100644
--- a/lib/Analysis/ThreadSafetyCommon.cpp
+++ b/lib/Analysis/ThreadSafetyCommon.cpp
@@ -1,9 +1,8 @@
//===- ThreadSafetyCommon.cpp ---------------------------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -277,18 +276,23 @@ til::SExpr *SExprBuilder::translateDeclRefExpr(const DeclRefExpr *DRE,
// Function parameters require substitution and/or renaming.
if (const auto *PV = dyn_cast_or_null<ParmVarDecl>(VD)) {
- const auto *FD =
- cast<FunctionDecl>(PV->getDeclContext())->getCanonicalDecl();
unsigned I = PV->getFunctionScopeIndex();
-
- if (Ctx && Ctx->FunArgs && FD == Ctx->AttrDecl->getCanonicalDecl()) {
- // Substitute call arguments for references to function parameters
- assert(I < Ctx->NumArgs);
- return translate(Ctx->FunArgs[I], Ctx->Prev);
+ const DeclContext *D = PV->getDeclContext();
+ if (Ctx && Ctx->FunArgs) {
+ const Decl *Canonical = Ctx->AttrDecl->getCanonicalDecl();
+ if (isa<FunctionDecl>(D)
+ ? (cast<FunctionDecl>(D)->getCanonicalDecl() == Canonical)
+ : (cast<ObjCMethodDecl>(D)->getCanonicalDecl() == Canonical)) {
+ // Substitute call arguments for references to function parameters
+ assert(I < Ctx->NumArgs);
+ return translate(Ctx->FunArgs[I], Ctx->Prev);
+ }
}
// Map the param back to the param of the original function declaration
// for consistent comparisons.
- VD = FD->getParamDecl(I);
+ VD = isa<FunctionDecl>(D)
+ ? cast<FunctionDecl>(D)->getCanonicalDecl()->getParamDecl(I)
+ : cast<ObjCMethodDecl>(D)->getCanonicalDecl()->getParamDecl(I);
}
// For non-local variables, treat it as a reference to a named object.
diff --git a/lib/Analysis/ThreadSafetyLogical.cpp b/lib/Analysis/ThreadSafetyLogical.cpp
index facfa11a39ab..ac730770093e 100644
--- a/lib/Analysis/ThreadSafetyLogical.cpp
+++ b/lib/Analysis/ThreadSafetyLogical.cpp
@@ -1,9 +1,8 @@
//===- ThreadSafetyLogical.cpp ---------------------------------*- C++ --*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// This file defines a representation for logical expressions with SExpr leaves
diff --git a/lib/Analysis/ThreadSafetyTIL.cpp b/lib/Analysis/ThreadSafetyTIL.cpp
index 11f7afbd229c..652f953d2a6d 100644
--- a/lib/Analysis/ThreadSafetyTIL.cpp
+++ b/lib/Analysis/ThreadSafetyTIL.cpp
@@ -1,9 +1,8 @@
//===- ThreadSafetyTIL.cpp ------------------------------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT in the llvm repository for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Analysis/UninitializedValues.cpp b/lib/Analysis/UninitializedValues.cpp
index 31c88a109565..8a233d4a44f1 100644
--- a/lib/Analysis/UninitializedValues.cpp
+++ b/lib/Analysis/UninitializedValues.cpp
@@ -1,9 +1,8 @@
//===- UninitializedValues.cpp - Find Uninitialized Values ----------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -351,6 +350,7 @@ public:
void VisitBinaryOperator(BinaryOperator *BO);
void VisitCallExpr(CallExpr *CE);
void VisitCastExpr(CastExpr *CE);
+ void VisitOMPExecutableDirective(OMPExecutableDirective *ED);
void operator()(Stmt *S) { Visit(S); }
@@ -456,6 +456,11 @@ void ClassifyRefs::VisitUnaryOperator(UnaryOperator *UO) {
classify(UO->getSubExpr(), Use);
}
+void ClassifyRefs::VisitOMPExecutableDirective(OMPExecutableDirective *ED) {
+ for (Stmt *S : OMPExecutableDirective::used_clauses_children(ED->clauses()))
+ classify(cast<Expr>(S), Use);
+}
+
static bool isPointerToConst(const QualType &QT) {
return QT->isAnyPointerType() && QT->getPointeeType().isConstQualified();
}
@@ -533,6 +538,7 @@ public:
void VisitDeclStmt(DeclStmt *ds);
void VisitObjCForCollectionStmt(ObjCForCollectionStmt *FS);
void VisitObjCMessageExpr(ObjCMessageExpr *ME);
+ void VisitOMPExecutableDirective(OMPExecutableDirective *ED);
bool isTrackedVar(const VarDecl *vd) {
return ::isTrackedVar(vd, cast<DeclContext>(ac.getDecl()));
@@ -652,7 +658,7 @@ public:
// uninitialized.
for (const auto *Block : cfg) {
unsigned BlockID = Block->getBlockID();
- const Stmt *Term = Block->getTerminator();
+ const Stmt *Term = Block->getTerminatorStmt();
if (SuccsVisited[BlockID] && SuccsVisited[BlockID] < Block->succ_size() &&
Term) {
// This block inevitably leads to the use. If we have an edge from here
@@ -708,6 +714,16 @@ void TransferFunctions::VisitObjCForCollectionStmt(ObjCForCollectionStmt *FS) {
}
}
+void TransferFunctions::VisitOMPExecutableDirective(
+ OMPExecutableDirective *ED) {
+ for (Stmt *S : OMPExecutableDirective::used_clauses_children(ED->clauses())) {
+ assert(S && "Expected non-null used-in-clause child.");
+ Visit(S);
+ }
+ if (!ED->isStandaloneDirective())
+ Visit(ED->getStructuredBlock());
+}
+
void TransferFunctions::VisitBlockExpr(BlockExpr *be) {
const BlockDecl *bd = be->getBlockDecl();
for (const auto &I : bd->captures()) {
diff --git a/lib/Analysis/plugins/CheckerDependencyHandling/CheckerDependencyHandling.cpp b/lib/Analysis/plugins/CheckerDependencyHandling/CheckerDependencyHandling.cpp
new file mode 100644
index 000000000000..be8e1200d0bf
--- /dev/null
+++ b/lib/Analysis/plugins/CheckerDependencyHandling/CheckerDependencyHandling.cpp
@@ -0,0 +1,28 @@
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Frontend/CheckerRegistry.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+struct Dependency : public Checker<check::BeginFunction> {
+ void checkBeginFunction(CheckerContext &Ctx) const {}
+};
+struct DependendentChecker : public Checker<check::BeginFunction> {
+ void checkBeginFunction(CheckerContext &Ctx) const {}
+};
+} // end anonymous namespace
+
+// Register plugin!
+extern "C" void clang_registerCheckers(CheckerRegistry &registry) {
+ registry.addChecker<Dependency>("example.Dependency", "", "");
+ registry.addChecker<DependendentChecker>("example.DependendentChecker", "",
+ "");
+
+ registry.addDependency("example.DependendentChecker", "example.Dependency");
+}
+
+extern "C" const char clang_analyzerAPIVersionString[] =
+ CLANG_ANALYZER_API_VERSION_STRING;
diff --git a/lib/Analysis/plugins/CheckerDependencyHandling/CheckerDependencyHandlingAnalyzerPlugin.exports b/lib/Analysis/plugins/CheckerDependencyHandling/CheckerDependencyHandlingAnalyzerPlugin.exports
new file mode 100644
index 000000000000..8d9ff882cfb1
--- /dev/null
+++ b/lib/Analysis/plugins/CheckerDependencyHandling/CheckerDependencyHandlingAnalyzerPlugin.exports
@@ -0,0 +1,2 @@
+clang_registerCheckers
+clang_analyzerAPIVersionString
diff --git a/lib/Analysis/plugins/CheckerOptionHandling/CheckerOptionHandling.cpp b/lib/Analysis/plugins/CheckerOptionHandling/CheckerOptionHandling.cpp
new file mode 100644
index 000000000000..77de3630ae7e
--- /dev/null
+++ b/lib/Analysis/plugins/CheckerOptionHandling/CheckerOptionHandling.cpp
@@ -0,0 +1,44 @@
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Frontend/CheckerRegistry.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+struct MyChecker : public Checker<check::BeginFunction> {
+ void checkBeginFunction(CheckerContext &Ctx) const {}
+};
+
+void registerMyChecker(CheckerManager &Mgr) {
+ MyChecker *Checker = Mgr.registerChecker<MyChecker>();
+ llvm::outs() << "Example option is set to "
+ << (Mgr.getAnalyzerOptions().getCheckerBooleanOption(
+ Checker, "ExampleOption")
+ ? "true"
+ : "false")
+ << '\n';
+}
+
+bool shouldRegisterMyChecker(const LangOptions &LO) { return true; }
+
+} // end anonymous namespace
+
+// Register plugin!
+extern "C" void clang_registerCheckers(CheckerRegistry &registry) {
+ registry.addChecker(registerMyChecker, shouldRegisterMyChecker,
+ "example.MyChecker", "Example Description",
+ "example.mychecker.documentation.nonexistent.html",
+ /*isHidden*/false);
+
+ registry.addCheckerOption(/*OptionType*/ "bool",
+ /*CheckerFullName*/ "example.MyChecker",
+ /*OptionName*/ "ExampleOption",
+ /*DefaultValStr*/ "false",
+ /*Description*/ "This is an example checker opt.",
+ /*DevelopmentStage*/ "released");
+}
+
+extern "C" const char clang_analyzerAPIVersionString[] =
+ CLANG_ANALYZER_API_VERSION_STRING;
diff --git a/lib/Analysis/plugins/CheckerOptionHandling/CheckerOptionHandlingAnalyzerPlugin.exports b/lib/Analysis/plugins/CheckerOptionHandling/CheckerOptionHandlingAnalyzerPlugin.exports
new file mode 100644
index 000000000000..8d9ff882cfb1
--- /dev/null
+++ b/lib/Analysis/plugins/CheckerOptionHandling/CheckerOptionHandlingAnalyzerPlugin.exports
@@ -0,0 +1,2 @@
+clang_registerCheckers
+clang_analyzerAPIVersionString
diff --git a/lib/Analysis/plugins/SampleAnalyzer/MainCallChecker.cpp b/lib/Analysis/plugins/SampleAnalyzer/MainCallChecker.cpp
new file mode 100644
index 000000000000..8bd4085108e9
--- /dev/null
+++ b/lib/Analysis/plugins/SampleAnalyzer/MainCallChecker.cpp
@@ -0,0 +1,54 @@
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Frontend/CheckerRegistry.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class MainCallChecker : public Checker<check::PreStmt<CallExpr>> {
+ mutable std::unique_ptr<BugType> BT;
+
+public:
+ void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
+};
+} // end anonymous namespace
+
+void MainCallChecker::checkPreStmt(const CallExpr *CE,
+ CheckerContext &C) const {
+ const Expr *Callee = CE->getCallee();
+ const FunctionDecl *FD = C.getSVal(Callee).getAsFunctionDecl();
+
+ if (!FD)
+ return;
+
+ // Get the name of the callee.
+ IdentifierInfo *II = FD->getIdentifier();
+ if (!II) // if no identifier, not a simple C function
+ return;
+
+ if (II->isStr("main")) {
+ ExplodedNode *N = C.generateErrorNode();
+ if (!N)
+ return;
+
+ if (!BT)
+ BT.reset(new BugType(this, "call to main", "example analyzer plugin"));
+
+ std::unique_ptr<BugReport> report =
+ llvm::make_unique<BugReport>(*BT, BT->getName(), N);
+ report->addRange(Callee->getSourceRange());
+ C.emitReport(std::move(report));
+ }
+}
+
+// Register plugin!
+extern "C" void clang_registerCheckers(CheckerRegistry &registry) {
+ registry.addChecker<MainCallChecker>(
+ "example.MainCallChecker", "Disallows calls to functions called main",
+ "");
+}
+
+extern "C" const char clang_analyzerAPIVersionString[] =
+ CLANG_ANALYZER_API_VERSION_STRING;
diff --git a/lib/Analysis/plugins/SampleAnalyzer/SampleAnalyzerPlugin.exports b/lib/Analysis/plugins/SampleAnalyzer/SampleAnalyzerPlugin.exports
new file mode 100644
index 000000000000..8d9ff882cfb1
--- /dev/null
+++ b/lib/Analysis/plugins/SampleAnalyzer/SampleAnalyzerPlugin.exports
@@ -0,0 +1,2 @@
+clang_registerCheckers
+clang_analyzerAPIVersionString
diff --git a/lib/Basic/Builtins.cpp b/lib/Basic/Builtins.cpp
index 7e7f67ca874e..d23c280d4758 100644
--- a/lib/Basic/Builtins.cpp
+++ b/lib/Basic/Builtins.cpp
@@ -1,9 +1,8 @@
//===--- Builtins.cpp - Builtin function implementation -------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -71,14 +70,18 @@ bool Builtin::Context::builtinIsSupported(const Builtin::Info &BuiltinInfo,
bool ObjCUnsupported = !LangOpts.ObjC && BuiltinInfo.Langs == OBJC_LANG;
bool OclC1Unsupported = (LangOpts.OpenCLVersion / 100) != 1 &&
(BuiltinInfo.Langs & ALL_OCLC_LANGUAGES ) == OCLC1X_LANG;
- bool OclC2Unsupported = LangOpts.OpenCLVersion != 200 &&
- (BuiltinInfo.Langs & ALL_OCLC_LANGUAGES) == OCLC20_LANG;
+ bool OclC2Unsupported =
+ (LangOpts.OpenCLVersion != 200 && !LangOpts.OpenCLCPlusPlus) &&
+ (BuiltinInfo.Langs & ALL_OCLC_LANGUAGES) == OCLC20_LANG;
bool OclCUnsupported = !LangOpts.OpenCL &&
(BuiltinInfo.Langs & ALL_OCLC_LANGUAGES);
bool OpenMPUnsupported = !LangOpts.OpenMP && BuiltinInfo.Langs == OMP_LANG;
+ bool CPlusPlusUnsupported =
+ !LangOpts.CPlusPlus && BuiltinInfo.Langs == CXX_LANG;
return !BuiltinsUnsupported && !MathBuiltinsUnsupported && !OclCUnsupported &&
!OclC1Unsupported && !OclC2Unsupported && !OpenMPUnsupported &&
- !GnuModeUnsupported && !MSModeUnsupported && !ObjCUnsupported;
+ !GnuModeUnsupported && !MSModeUnsupported && !ObjCUnsupported &&
+ !CPlusPlusUnsupported;
}
/// initializeBuiltins - Mark the identifiers for all the builtins with their
@@ -156,6 +159,33 @@ bool Builtin::Context::isScanfLike(unsigned ID, unsigned &FormatIdx,
return isLike(ID, FormatIdx, HasVAListArg, "sS");
}
+bool Builtin::Context::performsCallback(unsigned ID,
+ SmallVectorImpl<int> &Encoding) const {
+ const char *CalleePos = ::strchr(getRecord(ID).Attributes, 'C');
+ if (!CalleePos)
+ return false;
+
+ ++CalleePos;
+ assert(*CalleePos == '<' &&
+ "Callback callee specifier must be followed by a '<'");
+ ++CalleePos;
+
+ char *EndPos;
+ int CalleeIdx = ::strtol(CalleePos, &EndPos, 10);
+ assert(CalleeIdx >= 0 && "Callee index is supposed to be positive!");
+ Encoding.push_back(CalleeIdx);
+
+ while (*EndPos == ',') {
+ const char *PayloadPos = EndPos + 1;
+
+ int PayloadIdx = ::strtol(PayloadPos, &EndPos, 10);
+ Encoding.push_back(PayloadIdx);
+ }
+
+ assert(*EndPos == '>' && "Callback callee specifier must end with a '>'");
+ return true;
+}
+
bool Builtin::Context::canBeRedeclared(unsigned ID) const {
return ID == Builtin::NotBuiltin ||
ID == Builtin::BI__va_start ||
diff --git a/lib/Basic/CharInfo.cpp b/lib/Basic/CharInfo.cpp
index 32b3277c927b..d02054c9718f 100644
--- a/lib/Basic/CharInfo.cpp
+++ b/lib/Basic/CharInfo.cpp
@@ -1,9 +1,8 @@
//===--- CharInfo.cpp - Static Data for Classifying ASCII Characters ------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Basic/CodeGenOptions.cpp b/lib/Basic/CodeGenOptions.cpp
index aface1cd4bf9..fa186380f109 100644
--- a/lib/Basic/CodeGenOptions.cpp
+++ b/lib/Basic/CodeGenOptions.cpp
@@ -1,9 +1,8 @@
//===--- CodeGenOptions.cpp -----------------------------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Basic/Cuda.cpp b/lib/Basic/Cuda.cpp
index 6c34856dfdf7..f2b6c8cd3ee9 100644
--- a/lib/Basic/Cuda.cpp
+++ b/lib/Basic/Cuda.cpp
@@ -3,6 +3,7 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/VersionTuple.h"
namespace clang {
@@ -24,10 +25,24 @@ const char *CudaVersionToString(CudaVersion V) {
return "9.2";
case CudaVersion::CUDA_100:
return "10.0";
+ case CudaVersion::CUDA_101:
+ return "10.1";
}
llvm_unreachable("invalid enum");
}
+CudaVersion CudaStringToVersion(llvm::StringRef S) {
+ return llvm::StringSwitch<CudaVersion>(S)
+ .Case("7.0", CudaVersion::CUDA_70)
+ .Case("7.5", CudaVersion::CUDA_75)
+ .Case("8.0", CudaVersion::CUDA_80)
+ .Case("9.0", CudaVersion::CUDA_90)
+ .Case("9.1", CudaVersion::CUDA_91)
+ .Case("9.2", CudaVersion::CUDA_92)
+ .Case("10.0", CudaVersion::CUDA_100)
+ .Case("10.1", CudaVersion::CUDA_101);
+}
+
const char *CudaArchToString(CudaArch A) {
switch (A) {
case CudaArch::LAST:
@@ -94,8 +109,16 @@ const char *CudaArchToString(CudaArch A) {
return "gfx904";
case CudaArch::GFX906: // TBA
return "gfx906";
+ case CudaArch::GFX908: // TBA
+ return "gfx908";
case CudaArch::GFX909: // TBA
return "gfx909";
+ case CudaArch::GFX1010: // TBA
+ return "gfx1010";
+ case CudaArch::GFX1011: // TBA
+ return "gfx1011";
+ case CudaArch::GFX1012: // TBA
+ return "gfx1012";
}
llvm_unreachable("invalid enum");
}
@@ -132,7 +155,11 @@ CudaArch StringToCudaArch(llvm::StringRef S) {
.Case("gfx902", CudaArch::GFX902)
.Case("gfx904", CudaArch::GFX904)
.Case("gfx906", CudaArch::GFX906)
+ .Case("gfx908", CudaArch::GFX908)
.Case("gfx909", CudaArch::GFX909)
+ .Case("gfx1010", CudaArch::GFX1010)
+ .Case("gfx1011", CudaArch::GFX1011)
+ .Case("gfx1012", CudaArch::GFX1012)
.Default(CudaArch::UNKNOWN);
}
@@ -244,7 +271,11 @@ CudaVirtualArch VirtualArchForCudaArch(CudaArch A) {
case CudaArch::GFX902:
case CudaArch::GFX904:
case CudaArch::GFX906:
+ case CudaArch::GFX908:
case CudaArch::GFX909:
+ case CudaArch::GFX1010:
+ case CudaArch::GFX1011:
+ case CudaArch::GFX1012:
return CudaVirtualArch::COMPUTE_AMDGCN;
}
llvm_unreachable("invalid enum");
@@ -291,7 +322,11 @@ CudaVersion MinVersionForCudaArch(CudaArch A) {
case CudaArch::GFX902:
case CudaArch::GFX904:
case CudaArch::GFX906:
+ case CudaArch::GFX908:
case CudaArch::GFX909:
+ case CudaArch::GFX1010:
+ case CudaArch::GFX1011:
+ case CudaArch::GFX1012:
return CudaVersion::CUDA_70;
}
llvm_unreachable("invalid enum");
@@ -316,10 +351,51 @@ CudaVersion MaxVersionForCudaArch(CudaArch A) {
case CudaArch::GFX810:
case CudaArch::GFX900:
case CudaArch::GFX902:
+ case CudaArch::GFX1010:
+ case CudaArch::GFX1011:
+ case CudaArch::GFX1012:
return CudaVersion::CUDA_80;
default:
return CudaVersion::LATEST;
}
}
+static CudaVersion ToCudaVersion(llvm::VersionTuple Version) {
+ int IVer =
+ Version.getMajor() * 10 + Version.getMinor().getValueOr(0);
+ switch(IVer) {
+ case 70:
+ return CudaVersion::CUDA_70;
+ case 75:
+ return CudaVersion::CUDA_75;
+ case 80:
+ return CudaVersion::CUDA_80;
+ case 90:
+ return CudaVersion::CUDA_90;
+ case 91:
+ return CudaVersion::CUDA_91;
+ case 92:
+ return CudaVersion::CUDA_92;
+ case 100:
+ return CudaVersion::CUDA_100;
+ case 101:
+ return CudaVersion::CUDA_101;
+ default:
+ return CudaVersion::UNKNOWN;
+ }
+}
+
+bool CudaFeatureEnabled(llvm::VersionTuple Version, CudaFeature Feature) {
+ return CudaFeatureEnabled(ToCudaVersion(Version), Feature);
+}
+
+bool CudaFeatureEnabled(CudaVersion Version, CudaFeature Feature) {
+ switch (Feature) {
+ case CudaFeature::CUDA_USES_NEW_LAUNCH:
+ return Version >= CudaVersion::CUDA_92;
+ case CudaFeature::CUDA_USES_FATBIN_REGISTER_END:
+ return Version >= CudaVersion::CUDA_101;
+ }
+ llvm_unreachable("Unknown CUDA feature.");
+}
} // namespace clang
diff --git a/lib/Basic/Diagnostic.cpp b/lib/Basic/Diagnostic.cpp
index 56c54cb9070c..c82f74413ec1 100644
--- a/lib/Basic/Diagnostic.cpp
+++ b/lib/Basic/Diagnostic.cpp
@@ -1,9 +1,8 @@
//===- Diagnostic.cpp - C Language Family Diagnostic Handling -------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -206,10 +205,9 @@ DiagnosticsEngine::DiagStateMap::lookup(SourceManager &SrcMgr,
DiagnosticsEngine::DiagState *
DiagnosticsEngine::DiagStateMap::File::lookup(unsigned Offset) const {
- auto OnePastIt = std::upper_bound(
- StateTransitions.begin(), StateTransitions.end(), Offset,
- [](unsigned Offset, const DiagStatePoint &P) {
- return Offset < P.Offset;
+ auto OnePastIt =
+ llvm::partition_point(StateTransitions, [=](const DiagStatePoint &P) {
+ return P.Offset <= Offset;
});
assert(OnePastIt != StateTransitions.begin() && "missing initial state");
return OnePastIt[-1].State;
diff --git a/lib/Basic/DiagnosticIDs.cpp b/lib/Basic/DiagnosticIDs.cpp
index 8f2c3d06a504..e30e3753d193 100644
--- a/lib/Basic/DiagnosticIDs.cpp
+++ b/lib/Basic/DiagnosticIDs.cpp
@@ -1,9 +1,8 @@
//===--- DiagnosticIDs.cpp - Diagnostic IDs Handling ----------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -312,11 +311,9 @@ namespace clang {
// Common Diagnostic implementation
//===----------------------------------------------------------------------===//
-DiagnosticIDs::DiagnosticIDs() { CustomDiagInfo = nullptr; }
+DiagnosticIDs::DiagnosticIDs() {}
-DiagnosticIDs::~DiagnosticIDs() {
- delete CustomDiagInfo;
-}
+DiagnosticIDs::~DiagnosticIDs() {}
/// getCustomDiagID - Return an ID for a diagnostic with the specified message
/// and level. If this is the first request for this diagnostic, it is
@@ -326,7 +323,7 @@ DiagnosticIDs::~DiagnosticIDs() {
/// mapped to a unique DiagID.
unsigned DiagnosticIDs::getCustomDiagID(Level L, StringRef FormatString) {
if (!CustomDiagInfo)
- CustomDiagInfo = new diag::CustomDiagInfo();
+ CustomDiagInfo.reset(new diag::CustomDiagInfo());
return CustomDiagInfo->getOrCreateDiagID(L, FormatString, *this);
}
@@ -457,12 +454,17 @@ DiagnosticIDs::getDiagnosticSeverity(unsigned DiagID, SourceLocation Loc,
if (Result == diag::Severity::Ignored)
return Result;
- // Honor -w, which is lower in priority than pedantic-errors, but higher than
- // -Werror.
- // FIXME: Under GCC, this also suppresses warnings that have been mapped to
- // errors by -W flags and #pragma diagnostic.
- if (Result == diag::Severity::Warning && State->IgnoreAllWarnings)
- return diag::Severity::Ignored;
+ // Honor -w: this disables all messages which which are not Error/Fatal by
+ // default (disregarding attempts to upgrade severity from Warning to Error),
+ // as well as disabling all messages which are currently mapped to Warning
+ // (whether by default or downgraded from Error via e.g. -Wno-error or #pragma
+ // diagnostic.)
+ if (State->IgnoreAllWarnings) {
+ if (Result == diag::Severity::Warning ||
+ (Result >= diag::Severity::Error &&
+ !isDefaultMappingAsError((diag::kind)DiagID)))
+ return diag::Severity::Ignored;
+ }
// If -Werror is enabled, map warnings to errors unless explicitly disabled.
if (Result == diag::Severity::Warning) {
@@ -477,6 +479,11 @@ DiagnosticIDs::getDiagnosticSeverity(unsigned DiagID, SourceLocation Loc,
Result = diag::Severity::Fatal;
}
+ // If explicitly requested, map fatal errors to errors.
+ if (Result == diag::Severity::Fatal &&
+ Diag.CurDiagID != diag::fatal_too_many_errors && Diag.FatalsAsError)
+ Result = diag::Severity::Error;
+
// Custom diagnostics always are emitted in system headers.
bool ShowInSystemHeader =
!GetDiagInfo(DiagID) || GetDiagInfo(DiagID)->WarnShowInSystemHeader;
@@ -571,11 +578,8 @@ static bool getDiagnosticsInGroup(diag::Flavor Flavor,
bool
DiagnosticIDs::getDiagnosticsInGroup(diag::Flavor Flavor, StringRef Group,
SmallVectorImpl<diag::kind> &Diags) const {
- auto Found = std::lower_bound(std::begin(OptionTable), std::end(OptionTable),
- Group,
- [](const WarningOption &LHS, StringRef RHS) {
- return LHS.getName() < RHS;
- });
+ auto Found = llvm::partition_point(
+ OptionTable, [=](const WarningOption &O) { return O.getName() < Group; });
if (Found == std::end(OptionTable) || Found->getName() != Group)
return true; // Option not found.
@@ -656,7 +660,7 @@ bool DiagnosticIDs::ProcessDiag(DiagnosticsEngine &Diag) const {
// If a fatal error has already been emitted, silence all subsequent
// diagnostics.
- if (Diag.FatalErrorOccurred && Diag.SuppressAfterFatalError) {
+ if (Diag.FatalErrorOccurred) {
if (DiagLevel >= DiagnosticIDs::Error &&
Diag.Client->IncludeInDiagnosticCounts()) {
++Diag.NumErrors;
diff --git a/lib/Basic/DiagnosticOptions.cpp b/lib/Basic/DiagnosticOptions.cpp
index ebd9bb45f380..68571f2cf94f 100644
--- a/lib/Basic/DiagnosticOptions.cpp
+++ b/lib/Basic/DiagnosticOptions.cpp
@@ -1,9 +1,8 @@
//===- DiagnosticOptions.cpp - C Language Family Diagnostic Handling ------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Basic/FileManager.cpp b/lib/Basic/FileManager.cpp
index f5a2d4894c13..b6a7fde09f35 100644
--- a/lib/Basic/FileManager.cpp
+++ b/lib/Basic/FileManager.cpp
@@ -1,9 +1,8 @@
//===--- FileManager.cpp - File System Probing and Caching ----------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -36,14 +35,6 @@
using namespace clang;
-/// NON_EXISTENT_DIR - A special value distinct from null that is used to
-/// represent a dir name that doesn't exist on the disk.
-#define NON_EXISTENT_DIR reinterpret_cast<DirectoryEntry*>((intptr_t)-1)
-
-/// NON_EXISTENT_FILE - A special value distinct from null that is used to
-/// represent a filename that doesn't exist on the disk.
-#define NON_EXISTENT_FILE reinterpret_cast<FileEntry*>((intptr_t)-1)
-
//===----------------------------------------------------------------------===//
// Common logic.
//===----------------------------------------------------------------------===//
@@ -96,14 +87,13 @@ void FileManager::addAncestorsAsVirtualDirs(StringRef Path) {
if (DirName.empty())
DirName = ".";
- auto &NamedDirEnt =
- *SeenDirEntries.insert(std::make_pair(DirName, nullptr)).first;
+ auto &NamedDirEnt = *SeenDirEntries.insert({DirName, nullptr}).first;
// When caching a virtual directory, we always cache its ancestors
// at the same time. Therefore, if DirName is already in the cache,
// we don't need to recurse as its ancestors must also already be in
- // the cache.
- if (NamedDirEnt.second && NamedDirEnt.second != NON_EXISTENT_DIR)
+ // the cache (or it's a known non-virtual directory).
+ if (NamedDirEnt.second)
return;
// Add the virtual directory to the cache.
@@ -137,27 +127,25 @@ const DirectoryEntry *FileManager::getDirectory(StringRef DirName,
#endif
++NumDirLookups;
- auto &NamedDirEnt =
- *SeenDirEntries.insert(std::make_pair(DirName, nullptr)).first;
// See if there was already an entry in the map. Note that the map
// contains both virtual and real directories.
- if (NamedDirEnt.second)
- return NamedDirEnt.second == NON_EXISTENT_DIR ? nullptr
- : NamedDirEnt.second;
+ auto SeenDirInsertResult = SeenDirEntries.insert({DirName, nullptr});
+ if (!SeenDirInsertResult.second)
+ return SeenDirInsertResult.first->second;
+ // We've not seen this before. Fill it in.
++NumDirCacheMisses;
-
- // By default, initialize it to invalid.
- NamedDirEnt.second = NON_EXISTENT_DIR;
+ auto &NamedDirEnt = *SeenDirInsertResult.first;
+ assert(!NamedDirEnt.second && "should be newly-created");
// Get the null-terminated directory name as stored as the key of the
// SeenDirEntries map.
StringRef InterndDirName = NamedDirEnt.first();
// Check to see if the directory exists.
- FileData Data;
- if (getStatValue(InterndDirName, Data, false, nullptr /*directory lookup*/)) {
+ llvm::vfs::Status Status;
+ if (getStatValue(InterndDirName, Status, false, nullptr /*directory lookup*/)) {
// There's no real directory at the given path.
if (!CacheFailure)
SeenDirEntries.erase(DirName);
@@ -168,7 +156,7 @@ const DirectoryEntry *FileManager::getDirectory(StringRef DirName,
// same inode (this occurs on Unix-like systems when one dir is
// symlinked to another, for example) or the same path (on
// Windows).
- DirectoryEntry &UDE = UniqueRealDirs[Data.UniqueID];
+ DirectoryEntry &UDE = UniqueRealDirs[Status.getUniqueID()];
NamedDirEnt.second = &UDE;
if (UDE.getName().empty()) {
@@ -185,24 +173,14 @@ const FileEntry *FileManager::getFile(StringRef Filename, bool openFile,
++NumFileLookups;
// See if there is already an entry in the map.
- auto &NamedFileEnt =
- *SeenFileEntries.insert(std::make_pair(Filename, nullptr)).first;
-
- // See if there is already an entry in the map.
- if (NamedFileEnt.second) {
- if (NamedFileEnt.second == NON_EXISTENT_FILE)
- return nullptr;
- // Entry exists: return it *unless* it wasn't opened and open is requested.
- if (!(NamedFileEnt.second->DeferredOpen && openFile))
- return NamedFileEnt.second;
- // We previously stat()ed the file, but didn't open it: do that below.
- // FIXME: the below does other redundant work too (stats the dir and file).
- } else {
- // By default, initialize it to invalid.
- NamedFileEnt.second = NON_EXISTENT_FILE;
- }
+ auto SeenFileInsertResult = SeenFileEntries.insert({Filename, nullptr});
+ if (!SeenFileInsertResult.second)
+ return SeenFileInsertResult.first->second;
+ // We've not seen this before. Fill it in.
++NumFileCacheMisses;
+ auto &NamedFileEnt = *SeenFileInsertResult.first;
+ assert(!NamedFileEnt.second && "should be newly-created");
// Get the null-terminated file name as stored as the key of the
// SeenFileEntries map.
@@ -225,10 +203,10 @@ const FileEntry *FileManager::getFile(StringRef Filename, bool openFile,
// FIXME: Use the directory info to prune this, before doing the stat syscall.
// FIXME: This will reduce the # syscalls.
- // Nope, there isn't. Check to see if the file exists.
+ // Check to see if the file exists.
std::unique_ptr<llvm::vfs::File> F;
- FileData Data;
- if (getStatValue(InterndFileName, Data, true, openFile ? &F : nullptr)) {
+ llvm::vfs::Status Status;
+ if (getStatValue(InterndFileName, Status, true, openFile ? &F : nullptr)) {
// There's no real file at the given path.
if (!CacheFailure)
SeenFileEntries.erase(Filename);
@@ -240,33 +218,20 @@ const FileEntry *FileManager::getFile(StringRef Filename, bool openFile,
// It exists. See if we have already opened a file with the same inode.
// This occurs when one dir is symlinked to another, for example.
- FileEntry &UFE = UniqueRealFiles[Data.UniqueID];
- UFE.DeferredOpen = !openFile;
+ FileEntry &UFE = UniqueRealFiles[Status.getUniqueID()];
NamedFileEnt.second = &UFE;
// If the name returned by getStatValue is different than Filename, re-intern
// the name.
- if (Data.Name != Filename) {
+ if (Status.getName() != Filename) {
auto &NamedFileEnt =
- *SeenFileEntries.insert(std::make_pair(Data.Name, nullptr)).first;
- if (!NamedFileEnt.second)
- NamedFileEnt.second = &UFE;
- else
- assert(NamedFileEnt.second == &UFE &&
- "filename from getStatValue() refers to wrong file");
+ *SeenFileEntries.insert({Status.getName(), &UFE}).first;
+ assert(NamedFileEnt.second == &UFE &&
+ "filename from getStatValue() refers to wrong file");
InterndFileName = NamedFileEnt.first().data();
}
- // If we opened the file for the first time, record the resulting info.
- // Do this even if the cache entry was valid, maybe we didn't previously open.
- if (F && !UFE.File) {
- if (auto PathName = F->getName())
- fillRealPathName(&UFE, *PathName);
- UFE.File = std::move(F);
- assert(!UFE.DeferredOpen && "we just opened it!");
- }
-
if (UFE.isValid()) { // Already have an entry with this inode, return it.
// FIXME: this hack ensures that if we look up a file by a virtual path in
@@ -275,7 +240,7 @@ const FileEntry *FileManager::getFile(StringRef Filename, bool openFile,
// module's structure when its headers/module map are mapped in the VFS.
// We should remove this as soon as we can properly support a file having
// multiple names.
- if (DirInfo != UFE.Dir && Data.IsVFSMapped)
+ if (DirInfo != UFE.Dir && Status.IsVFSMapped)
UFE.Dir = DirInfo;
// Always update the name to use the last name by which a file was accessed.
@@ -290,16 +255,22 @@ const FileEntry *FileManager::getFile(StringRef Filename, bool openFile,
// Otherwise, we don't have this file yet, add it.
UFE.Name = InterndFileName;
- UFE.Size = Data.Size;
- UFE.ModTime = Data.ModTime;
+ UFE.Size = Status.getSize();
+ UFE.ModTime = llvm::sys::toTimeT(Status.getLastModificationTime());
UFE.Dir = DirInfo;
UFE.UID = NextFileUID++;
- UFE.UniqueID = Data.UniqueID;
- UFE.IsNamedPipe = Data.IsNamedPipe;
- UFE.InPCH = Data.InPCH;
+ UFE.UniqueID = Status.getUniqueID();
+ UFE.IsNamedPipe = Status.getType() == llvm::sys::fs::file_type::fifo_file;
+ UFE.File = std::move(F);
UFE.IsValid = true;
- // Note File and DeferredOpen were initialized above.
+ if (UFE.File) {
+ if (auto PathName = UFE.File->getName())
+ fillRealPathName(&UFE, *PathName);
+ } else if (!openFile) {
+ // We should still fill the path even if we aren't opening the file.
+ fillRealPathName(&UFE, InterndFileName);
+ }
return &UFE;
}
@@ -308,19 +279,13 @@ FileManager::getVirtualFile(StringRef Filename, off_t Size,
time_t ModificationTime) {
++NumFileLookups;
- // See if there is already an entry in the map.
- auto &NamedFileEnt =
- *SeenFileEntries.insert(std::make_pair(Filename, nullptr)).first;
-
- // See if there is already an entry in the map.
- if (NamedFileEnt.second && NamedFileEnt.second != NON_EXISTENT_FILE)
+ // See if there is already an entry in the map for an existing file.
+ auto &NamedFileEnt = *SeenFileEntries.insert({Filename, nullptr}).first;
+ if (NamedFileEnt.second)
return NamedFileEnt.second;
+ // We've not seen this before, or the file is cached as non-existent.
++NumFileCacheMisses;
-
- // By default, initialize it to invalid.
- NamedFileEnt.second = NON_EXISTENT_FILE;
-
addAncestorsAsVirtualDirs(Filename);
FileEntry *UFE = nullptr;
@@ -333,12 +298,15 @@ FileManager::getVirtualFile(StringRef Filename, off_t Size,
"The directory of a virtual file should already be in the cache.");
// Check to see if the file exists. If so, drop the virtual file
- FileData Data;
+ llvm::vfs::Status Status;
const char *InterndFileName = NamedFileEnt.first().data();
- if (getStatValue(InterndFileName, Data, true, nullptr) == 0) {
- Data.Size = Size;
- Data.ModTime = ModificationTime;
- UFE = &UniqueRealFiles[Data.UniqueID];
+ if (getStatValue(InterndFileName, Status, true, nullptr) == 0) {
+ UFE = &UniqueRealFiles[Status.getUniqueID()];
+ Status = llvm::vfs::Status(
+ Status.getName(), Status.getUniqueID(),
+ llvm::sys::toTimePoint(ModificationTime),
+ Status.getUser(), Status.getGroup(), Size,
+ Status.getType(), Status.getPermissions());
NamedFileEnt.second = UFE;
@@ -352,13 +320,10 @@ FileManager::getVirtualFile(StringRef Filename, off_t Size,
if (UFE->isValid())
return UFE;
- UFE->UniqueID = Data.UniqueID;
- UFE->IsNamedPipe = Data.IsNamedPipe;
- UFE->InPCH = Data.InPCH;
- fillRealPathName(UFE, Data.Name);
- }
-
- if (!UFE) {
+ UFE->UniqueID = Status.getUniqueID();
+ UFE->IsNamedPipe = Status.getType() == llvm::sys::fs::file_type::fifo_file;
+ fillRealPathName(UFE, Status.getName());
+ } else {
VirtualFileEntries.push_back(llvm::make_unique<FileEntry>());
UFE = VirtualFileEntries.back().get();
NamedFileEnt.second = UFE;
@@ -371,7 +336,6 @@ FileManager::getVirtualFile(StringRef Filename, off_t Size,
UFE->UID = NextFileUID++;
UFE->IsValid = true;
UFE->File.reset();
- UFE->DeferredOpen = false;
return UFE;
}
@@ -459,18 +423,20 @@ FileManager::getBufferForFile(StringRef Filename, bool isVolatile) {
/// if the path points to a virtual file or does not exist, or returns
/// false if it's an existent real file. If FileDescriptor is NULL,
/// do directory look-up instead of file look-up.
-bool FileManager::getStatValue(StringRef Path, FileData &Data, bool isFile,
+bool FileManager::getStatValue(StringRef Path, llvm::vfs::Status &Status,
+ bool isFile,
std::unique_ptr<llvm::vfs::File> *F) {
// FIXME: FileSystemOpts shouldn't be passed in here, all paths should be
// absolute!
if (FileSystemOpts.WorkingDir.empty())
- return FileSystemStatCache::get(Path, Data, isFile, F,StatCache.get(), *FS);
+ return bool(FileSystemStatCache::get(Path, Status, isFile, F,
+ StatCache.get(), *FS));
SmallString<128> FilePath(Path);
FixupRelativePath(FilePath);
- return FileSystemStatCache::get(FilePath.c_str(), Data, isFile, F,
- StatCache.get(), *FS);
+ return bool(FileSystemStatCache::get(FilePath.c_str(), Status, isFile, F,
+ StatCache.get(), *FS));
}
bool FileManager::getNoncachedStatValue(StringRef Path,
@@ -493,6 +459,9 @@ void FileManager::invalidateCache(const FileEntry *Entry) {
// FileEntry invalidation should not block future optimizations in the file
// caches. Possible alternatives are cache truncation (invalidate last N) or
// invalidation of the whole cache.
+ //
+ // FIXME: This is broken. We sometimes have the same FileEntry* shared
+ // betweeen multiple SeenFileEntries, so this can leave dangling pointers.
UniqueRealFiles.erase(Entry->getUniqueID());
}
@@ -505,13 +474,12 @@ void FileManager::GetUniqueIDMapping(
for (llvm::StringMap<FileEntry*, llvm::BumpPtrAllocator>::const_iterator
FE = SeenFileEntries.begin(), FEEnd = SeenFileEntries.end();
FE != FEEnd; ++FE)
- if (FE->getValue() && FE->getValue() != NON_EXISTENT_FILE)
+ if (FE->getValue())
UIDToFiles[FE->getValue()->getUID()] = FE->getValue();
// Map virtual file entries
for (const auto &VFE : VirtualFileEntries)
- if (VFE && VFE.get() != NON_EXISTENT_FILE)
- UIDToFiles[VFE->getUID()] = VFE.get();
+ UIDToFiles[VFE->getUID()] = VFE.get();
}
void FileManager::modifyFileEntry(FileEntry *File,
@@ -533,7 +501,7 @@ StringRef FileManager::getCanonicalName(const DirectoryEntry *Dir) {
if (!FS->getRealPath(Dir->getName(), CanonicalNameBuf))
CanonicalName = StringRef(CanonicalNameBuf).copy(CanonicalNameStorage);
- CanonicalDirNames.insert(std::make_pair(Dir, CanonicalName));
+ CanonicalDirNames.insert({Dir, CanonicalName});
return CanonicalName;
}
diff --git a/lib/Basic/FileSystemStatCache.cpp b/lib/Basic/FileSystemStatCache.cpp
index 6f2eef4e2062..415a4e2025df 100644
--- a/lib/Basic/FileSystemStatCache.cpp
+++ b/lib/Basic/FileSystemStatCache.cpp
@@ -1,9 +1,8 @@
//===- FileSystemStatCache.cpp - Caching for 'stat' calls -----------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -22,18 +21,6 @@ using namespace clang;
void FileSystemStatCache::anchor() {}
-static void copyStatusToFileData(const llvm::vfs::Status &Status,
- FileData &Data) {
- Data.Name = Status.getName();
- Data.Size = Status.getSize();
- Data.ModTime = llvm::sys::toTimeT(Status.getLastModificationTime());
- Data.UniqueID = Status.getUniqueID();
- Data.IsDirectory = Status.isDirectory();
- Data.IsNamedPipe = Status.getType() == llvm::sys::fs::file_type::fifo_file;
- Data.InPCH = false;
- Data.IsVFSMapped = Status.IsVFSMapped;
-}
-
/// FileSystemStatCache::get - Get the 'stat' information for the specified
/// path, using the cache to accelerate it if possible. This returns true if
/// the path does not exist or false if it exists.
@@ -43,25 +30,25 @@ static void copyStatusToFileData(const llvm::vfs::Status &Status,
/// success for directories (not files). On a successful file lookup, the
/// implementation can optionally fill in FileDescriptor with a valid
/// descriptor and the client guarantees that it will close it.
-bool FileSystemStatCache::get(StringRef Path, FileData &Data, bool isFile,
- std::unique_ptr<llvm::vfs::File> *F,
- FileSystemStatCache *Cache,
- llvm::vfs::FileSystem &FS) {
- LookupResult R;
+std::error_code
+FileSystemStatCache::get(StringRef Path, llvm::vfs::Status &Status,
+ bool isFile, std::unique_ptr<llvm::vfs::File> *F,
+ FileSystemStatCache *Cache,
+ llvm::vfs::FileSystem &FS) {
bool isForDir = !isFile;
+ std::error_code RetCode;
// If we have a cache, use it to resolve the stat query.
if (Cache)
- R = Cache->getStat(Path, Data, isFile, F, FS);
+ RetCode = Cache->getStat(Path, Status, isFile, F, FS);
else if (isForDir || !F) {
// If this is a directory or a file descriptor is not needed and we have
// no cache, just go to the file system.
- llvm::ErrorOr<llvm::vfs::Status> Status = FS.status(Path);
- if (!Status) {
- R = CacheMissing;
+ llvm::ErrorOr<llvm::vfs::Status> StatusOrErr = FS.status(Path);
+ if (!StatusOrErr) {
+ RetCode = StatusOrErr.getError();
} else {
- R = CacheExists;
- copyStatusToFileData(*Status, Data);
+ Status = *StatusOrErr;
}
} else {
// Otherwise, we have to go to the filesystem. We can always just use
@@ -75,56 +62,59 @@ bool FileSystemStatCache::get(StringRef Path, FileData &Data, bool isFile,
if (!OwnedFile) {
// If the open fails, our "stat" fails.
- R = CacheMissing;
+ RetCode = OwnedFile.getError();
} else {
// Otherwise, the open succeeded. Do an fstat to get the information
// about the file. We'll end up returning the open file descriptor to the
// client to do what they please with it.
- llvm::ErrorOr<llvm::vfs::Status> Status = (*OwnedFile)->status();
- if (Status) {
- R = CacheExists;
- copyStatusToFileData(*Status, Data);
+ llvm::ErrorOr<llvm::vfs::Status> StatusOrErr = (*OwnedFile)->status();
+ if (StatusOrErr) {
+ Status = *StatusOrErr;
*F = std::move(*OwnedFile);
} else {
// fstat rarely fails. If it does, claim the initial open didn't
// succeed.
- R = CacheMissing;
*F = nullptr;
+ RetCode = StatusOrErr.getError();
}
}
}
// If the path doesn't exist, return failure.
- if (R == CacheMissing) return true;
+ if (RetCode)
+ return RetCode;
// If the path exists, make sure that its "directoryness" matches the clients
// demands.
- if (Data.IsDirectory != isForDir) {
+ if (Status.isDirectory() != isForDir) {
// If not, close the file if opened.
if (F)
*F = nullptr;
-
- return true;
+ return std::make_error_code(
+ Status.isDirectory() ?
+ std::errc::is_a_directory : std::errc::not_a_directory);
}
- return false;
+ return std::error_code();
}
-MemorizeStatCalls::LookupResult
-MemorizeStatCalls::getStat(StringRef Path, FileData &Data, bool isFile,
+std::error_code
+MemorizeStatCalls::getStat(StringRef Path, llvm::vfs::Status &Status,
+ bool isFile,
std::unique_ptr<llvm::vfs::File> *F,
llvm::vfs::FileSystem &FS) {
- if (get(Path, Data, isFile, F, nullptr, FS)) {
+ auto err = get(Path, Status, isFile, F, nullptr, FS);
+ if (err) {
// Do not cache failed stats, it is easy to construct common inconsistent
// situations if we do, and they are not important for PCH performance
// (which currently only needs the stats to construct the initial
// FileManager entries).
- return CacheMissing;
+ return err;
}
// Cache file 'stat' results and directories with absolutely paths.
- if (!Data.IsDirectory || llvm::sys::path::is_absolute(Path))
- StatCalls[Path] = Data;
+ if (!Status.isDirectory() || llvm::sys::path::is_absolute(Path))
+ StatCalls[Path] = Status;
- return CacheExists;
+ return std::error_code();
}
diff --git a/lib/Basic/FixedPoint.cpp b/lib/Basic/FixedPoint.cpp
index bfff0fc212e0..05600dfc6d21 100644
--- a/lib/Basic/FixedPoint.cpp
+++ b/lib/Basic/FixedPoint.cpp
@@ -1,9 +1,8 @@
//===- FixedPoint.cpp - Fixed point constant handling -----------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -16,11 +15,14 @@
namespace clang {
-APFixedPoint APFixedPoint::convert(const FixedPointSemantics &DstSema) const {
+APFixedPoint APFixedPoint::convert(const FixedPointSemantics &DstSema,
+ bool *Overflow) const {
llvm::APSInt NewVal = Val;
unsigned DstWidth = DstSema.getWidth();
unsigned DstScale = DstSema.getScale();
bool Upscaling = DstScale > getScale();
+ if (Overflow)
+ *Overflow = false;
if (Upscaling) {
NewVal = NewVal.extend(NewVal.getBitWidth() + DstScale - getScale());
@@ -29,18 +31,28 @@ APFixedPoint APFixedPoint::convert(const FixedPointSemantics &DstSema) const {
NewVal >>= (getScale() - DstScale);
}
- if (DstSema.isSaturated()) {
- auto Mask = llvm::APInt::getBitsSetFrom(
- NewVal.getBitWidth(),
- std::min(DstScale + DstSema.getIntegralBits(), NewVal.getBitWidth()));
- llvm::APInt Masked(NewVal & Mask);
+ auto Mask = llvm::APInt::getBitsSetFrom(
+ NewVal.getBitWidth(),
+ std::min(DstScale + DstSema.getIntegralBits(), NewVal.getBitWidth()));
+ llvm::APInt Masked(NewVal & Mask);
- // Change in the bits above the sign
- if (!(Masked == Mask || Masked == 0))
+ // Change in the bits above the sign
+ if (!(Masked == Mask || Masked == 0)) {
+ // Found overflow in the bits above the sign
+ if (DstSema.isSaturated())
NewVal = NewVal.isNegative() ? Mask : ~Mask;
+ else if (Overflow)
+ *Overflow = true;
+ }
- if (!DstSema.isSigned() && NewVal.isNegative())
+ // If the dst semantics are unsigned, but our value is signed and negative, we
+ // clamp to zero.
+ if (!DstSema.isSigned() && NewVal.isSigned() && NewVal.isNegative()) {
+ // Found negative overflow for unsigned result
+ if (DstSema.isSaturated())
NewVal = 0;
+ else if (Overflow)
+ *Overflow = true;
}
NewVal = NewVal.extOrTrunc(DstWidth);
@@ -112,4 +124,135 @@ APFixedPoint APFixedPoint::getMin(const FixedPointSemantics &Sema) {
return APFixedPoint(Val, Sema);
}
+FixedPointSemantics FixedPointSemantics::getCommonSemantics(
+ const FixedPointSemantics &Other) const {
+ unsigned CommonScale = std::max(getScale(), Other.getScale());
+ unsigned CommonWidth =
+ std::max(getIntegralBits(), Other.getIntegralBits()) + CommonScale;
+
+ bool ResultIsSigned = isSigned() || Other.isSigned();
+ bool ResultIsSaturated = isSaturated() || Other.isSaturated();
+ bool ResultHasUnsignedPadding = false;
+ if (!ResultIsSigned) {
+ // Both are unsigned.
+ ResultHasUnsignedPadding = hasUnsignedPadding() &&
+ Other.hasUnsignedPadding() && !ResultIsSaturated;
+ }
+
+ // If the result is signed, add an extra bit for the sign. Otherwise, if it is
+ // unsigned and has unsigned padding, we only need to add the extra padding
+ // bit back if we are not saturating.
+ if (ResultIsSigned || ResultHasUnsignedPadding)
+ CommonWidth++;
+
+ return FixedPointSemantics(CommonWidth, CommonScale, ResultIsSigned,
+ ResultIsSaturated, ResultHasUnsignedPadding);
+}
+
+APFixedPoint APFixedPoint::add(const APFixedPoint &Other,
+ bool *Overflow) const {
+ auto CommonFXSema = Sema.getCommonSemantics(Other.getSemantics());
+ APFixedPoint ConvertedThis = convert(CommonFXSema);
+ APFixedPoint ConvertedOther = Other.convert(CommonFXSema);
+ llvm::APSInt ThisVal = ConvertedThis.getValue();
+ llvm::APSInt OtherVal = ConvertedOther.getValue();
+ bool Overflowed = false;
+
+ llvm::APSInt Result;
+ if (CommonFXSema.isSaturated()) {
+ Result = CommonFXSema.isSigned() ? ThisVal.sadd_sat(OtherVal)
+ : ThisVal.uadd_sat(OtherVal);
+ } else {
+ Result = ThisVal.isSigned() ? ThisVal.sadd_ov(OtherVal, Overflowed)
+ : ThisVal.uadd_ov(OtherVal, Overflowed);
+ }
+
+ if (Overflow)
+ *Overflow = Overflowed;
+
+ return APFixedPoint(Result, CommonFXSema);
+}
+
+void APFixedPoint::toString(llvm::SmallVectorImpl<char> &Str) const {
+ llvm::APSInt Val = getValue();
+ unsigned Scale = getScale();
+
+ if (Val.isSigned() && Val.isNegative() && Val != -Val) {
+ Val = -Val;
+ Str.push_back('-');
+ }
+
+ llvm::APSInt IntPart = Val >> Scale;
+
+ // Add 4 digits to hold the value after multiplying 10 (the radix)
+ unsigned Width = Val.getBitWidth() + 4;
+ llvm::APInt FractPart = Val.zextOrTrunc(Scale).zext(Width);
+ llvm::APInt FractPartMask = llvm::APInt::getAllOnesValue(Scale).zext(Width);
+ llvm::APInt RadixInt = llvm::APInt(Width, 10);
+
+ IntPart.toString(Str, /*Radix=*/10);
+ Str.push_back('.');
+ do {
+ (FractPart * RadixInt)
+ .lshr(Scale)
+ .toString(Str, /*Radix=*/10, Val.isSigned());
+ FractPart = (FractPart * RadixInt) & FractPartMask;
+ } while (FractPart != 0);
+}
+
+APFixedPoint APFixedPoint::negate(bool *Overflow) const {
+ if (!isSaturated()) {
+ if (Overflow)
+ *Overflow =
+ (!isSigned() && Val != 0) || (isSigned() && Val.isMinSignedValue());
+ return APFixedPoint(-Val, Sema);
+ }
+
+ // We never overflow for saturation
+ if (Overflow)
+ *Overflow = false;
+
+ if (isSigned())
+ return Val.isMinSignedValue() ? getMax(Sema) : APFixedPoint(-Val, Sema);
+ else
+ return APFixedPoint(Sema);
+}
+
+llvm::APSInt APFixedPoint::convertToInt(unsigned DstWidth, bool DstSign,
+ bool *Overflow) const {
+ llvm::APSInt Result = getIntPart();
+ unsigned SrcWidth = getWidth();
+
+ llvm::APSInt DstMin = llvm::APSInt::getMinValue(DstWidth, !DstSign);
+ llvm::APSInt DstMax = llvm::APSInt::getMaxValue(DstWidth, !DstSign);
+
+ if (SrcWidth < DstWidth) {
+ Result = Result.extend(DstWidth);
+ } else if (SrcWidth > DstWidth) {
+ DstMin = DstMin.extend(SrcWidth);
+ DstMax = DstMax.extend(SrcWidth);
+ }
+
+ if (Overflow) {
+ if (Result.isSigned() && !DstSign) {
+ *Overflow = Result.isNegative() || Result.ugt(DstMax);
+ } else if (Result.isUnsigned() && DstSign) {
+ *Overflow = Result.ugt(DstMax);
+ } else {
+ *Overflow = Result < DstMin || Result > DstMax;
+ }
+ }
+
+ Result.setIsSigned(DstSign);
+ return Result.extOrTrunc(DstWidth);
+}
+
+APFixedPoint APFixedPoint::getFromIntValue(const llvm::APSInt &Value,
+ const FixedPointSemantics &DstFXSema,
+ bool *Overflow) {
+ FixedPointSemantics IntFXSema = FixedPointSemantics::GetIntegerSemantics(
+ Value.getBitWidth(), Value.isSigned());
+ return APFixedPoint(Value, IntFXSema).convert(DstFXSema, Overflow);
+}
+
} // namespace clang
diff --git a/lib/Basic/IdentifierTable.cpp b/lib/Basic/IdentifierTable.cpp
index b961c8333bd7..ca9c71287ab7 100644
--- a/lib/Basic/IdentifierTable.cpp
+++ b/lib/Basic/IdentifierTable.cpp
@@ -1,9 +1,8 @@
//===- IdentifierTable.cpp - Hash table for identifier lookup -------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -100,6 +99,7 @@ namespace {
KEYMODULES = 0x100000,
KEYCXX2A = 0x200000,
KEYOPENCLCXX = 0x400000,
+ KEYMSCOMPAT = 0x800000,
KEYALLCXX = KEYCXX | KEYCXX11 | KEYCXX2A,
KEYALL = (0xffffff & ~KEYNOMS18 &
~KEYNOOPENCL) // KEYNOMS18 and KEYNOOPENCL are used to exclude.
@@ -126,6 +126,7 @@ static KeywordStatus getKeywordStatus(const LangOptions &LangOpts,
if (LangOpts.C99 && (Flags & KEYC99)) return KS_Enabled;
if (LangOpts.GNUKeywords && (Flags & KEYGNU)) return KS_Extension;
if (LangOpts.MicrosoftExt && (Flags & KEYMS)) return KS_Extension;
+ if (LangOpts.MSVCCompat && (Flags & KEYMSCOMPAT)) return KS_Enabled;
if (LangOpts.Borland && (Flags & KEYBORLAND)) return KS_Extension;
if (LangOpts.Bool && (Flags & BOOLSUPPORT)) return KS_Enabled;
if (LangOpts.Half && (Flags & HALFSUPPORT)) return KS_Enabled;
@@ -142,7 +143,7 @@ static KeywordStatus getKeywordStatus(const LangOptions &LangOpts,
// in non-arc mode.
if (LangOpts.ObjC && (Flags & KEYOBJC)) return KS_Enabled;
if (LangOpts.ConceptsTS && (Flags & KEYCONCEPTS)) return KS_Enabled;
- if (LangOpts.CoroutinesTS && (Flags & KEYCOROUTINES)) return KS_Enabled;
+ if (LangOpts.Coroutines && (Flags & KEYCOROUTINES)) return KS_Enabled;
if (LangOpts.ModulesTS && (Flags & KEYMODULES)) return KS_Enabled;
if (LangOpts.CPlusPlus && (Flags & KEYALLCXX)) return KS_Future;
return KS_Disabled;
@@ -217,7 +218,7 @@ void IdentifierTable::AddKeywords(const LangOptions &LangOpts) {
if (LangOpts.DeclSpecKeyword)
AddKeyword("__declspec", tok::kw___declspec, KEYALL, LangOpts, *this);
- // Add the '_experimental_modules_import' contextual keyword.
+ // Add the 'import' contextual keyword.
get("import").setModulesImport(true);
}
diff --git a/lib/Basic/LangOptions.cpp b/lib/Basic/LangOptions.cpp
index 763ba33683bc..516b1ff1b7e2 100644
--- a/lib/Basic/LangOptions.cpp
+++ b/lib/Basic/LangOptions.cpp
@@ -1,9 +1,8 @@
//===- LangOptions.cpp - C Language Family Language Options ---------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Basic/MemoryBufferCache.cpp b/lib/Basic/MemoryBufferCache.cpp
deleted file mode 100644
index c1fc571ec9b3..000000000000
--- a/lib/Basic/MemoryBufferCache.cpp
+++ /dev/null
@@ -1,48 +0,0 @@
-//===- MemoryBufferCache.cpp - Cache for loaded memory buffers ------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "clang/Basic/MemoryBufferCache.h"
-#include "llvm/Support/MemoryBuffer.h"
-
-using namespace clang;
-
-llvm::MemoryBuffer &
-MemoryBufferCache::addBuffer(llvm::StringRef Filename,
- std::unique_ptr<llvm::MemoryBuffer> Buffer) {
- auto Insertion =
- Buffers.insert({Filename, BufferEntry{std::move(Buffer), NextIndex++}});
- assert(Insertion.second && "Already has a buffer");
- return *Insertion.first->second.Buffer;
-}
-
-llvm::MemoryBuffer *MemoryBufferCache::lookupBuffer(llvm::StringRef Filename) {
- auto I = Buffers.find(Filename);
- if (I == Buffers.end())
- return nullptr;
- return I->second.Buffer.get();
-}
-
-bool MemoryBufferCache::isBufferFinal(llvm::StringRef Filename) {
- auto I = Buffers.find(Filename);
- if (I == Buffers.end())
- return false;
- return I->second.Index < FirstRemovableIndex;
-}
-
-bool MemoryBufferCache::tryToRemoveBuffer(llvm::StringRef Filename) {
- auto I = Buffers.find(Filename);
- assert(I != Buffers.end() && "No buffer to remove...");
- if (I->second.Index < FirstRemovableIndex)
- return true;
-
- Buffers.erase(I);
- return false;
-}
-
-void MemoryBufferCache::finalizeCurrentBuffers() { FirstRemovableIndex = NextIndex; }
diff --git a/lib/Basic/Module.cpp b/lib/Basic/Module.cpp
index fd552f2baaca..f394f26e550c 100644
--- a/lib/Basic/Module.cpp
+++ b/lib/Basic/Module.cpp
@@ -1,9 +1,8 @@
//===- Module.cpp - Describe a module -------------------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -109,7 +108,7 @@ static bool hasFeature(StringRef Feature, const LangOptions &LangOpts,
bool HasFeature = llvm::StringSwitch<bool>(Feature)
.Case("altivec", LangOpts.AltiVec)
.Case("blocks", LangOpts.Blocks)
- .Case("coroutines", LangOpts.CoroutinesTS)
+ .Case("coroutines", LangOpts.Coroutines)
.Case("cplusplus", LangOpts.CPlusPlus)
.Case("cplusplus11", LangOpts.CPlusPlus11)
.Case("cplusplus14", LangOpts.CPlusPlus14)
@@ -322,6 +321,21 @@ Module *Module::findSubmodule(StringRef Name) const {
return SubModules[Pos->getValue()];
}
+Module *Module::findOrInferSubmodule(StringRef Name) {
+ llvm::StringMap<unsigned>::const_iterator Pos = SubModuleIndex.find(Name);
+ if (Pos != SubModuleIndex.end())
+ return SubModules[Pos->getValue()];
+ if (!InferSubmodules)
+ return nullptr;
+ Module *Result = new Module(Name, SourceLocation(), this, false, InferExplicitSubmodules, 0);
+ Result->InferExplicitSubmodules = InferExplicitSubmodules;
+ Result->InferSubmodules = InferSubmodules;
+ Result->InferExportWildcard = InferExportWildcard;
+ if (Result->InferExportWildcard)
+ Result->Exports.push_back(Module::ExportDecl(nullptr, true));
+ return Result;
+}
+
void Module::getExportedModules(SmallVectorImpl<Module *> &Exported) const {
// All non-explicit submodules are exported.
for (std::vector<Module *>::const_iterator I = SubModules.begin(),
diff --git a/lib/Basic/ObjCRuntime.cpp b/lib/Basic/ObjCRuntime.cpp
index 311bd067261a..cfc437409b5d 100644
--- a/lib/Basic/ObjCRuntime.cpp
+++ b/lib/Basic/ObjCRuntime.cpp
@@ -1,9 +1,8 @@
//===- ObjCRuntime.cpp - Objective-C Runtime Handling ---------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Basic/OpenMPKinds.cpp b/lib/Basic/OpenMPKinds.cpp
index a5bfac86e610..82e193efef32 100644
--- a/lib/Basic/OpenMPKinds.cpp
+++ b/lib/Basic/OpenMPKinds.cpp
@@ -1,9 +1,8 @@
//===--- OpenMPKinds.cpp - Token Kinds Support ----------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
@@ -115,6 +114,18 @@ unsigned clang::getOpenMPSimpleClauseType(OpenMPClauseKind Kind,
.Case(#Name, static_cast<unsigned>(OMPC_MAP_MODIFIER_##Name))
#include "clang/Basic/OpenMPKinds.def"
.Default(OMPC_MAP_unknown);
+ case OMPC_to:
+ return llvm::StringSwitch<unsigned>(Str)
+#define OPENMP_TO_MODIFIER_KIND(Name) \
+ .Case(#Name, static_cast<unsigned>(OMPC_TO_MODIFIER_##Name))
+#include "clang/Basic/OpenMPKinds.def"
+ .Default(OMPC_TO_MODIFIER_unknown);
+ case OMPC_from:
+ return llvm::StringSwitch<unsigned>(Str)
+#define OPENMP_FROM_MODIFIER_KIND(Name) \
+ .Case(#Name, static_cast<unsigned>(OMPC_FROM_MODIFIER_##Name))
+#include "clang/Basic/OpenMPKinds.def"
+ .Default(OMPC_FROM_MODIFIER_unknown);
case OMPC_dist_schedule:
return llvm::StringSwitch<OpenMPDistScheduleClauseKind>(Str)
#define OPENMP_DIST_SCHEDULE_KIND(Name) .Case(#Name, OMPC_DIST_SCHEDULE_##Name)
@@ -141,6 +152,8 @@ unsigned clang::getOpenMPSimpleClauseType(OpenMPClauseKind Kind,
case OMPC_num_threads:
case OMPC_safelen:
case OMPC_simdlen:
+ case OMPC_allocator:
+ case OMPC_allocate:
case OMPC_collapse:
case OMPC_private:
case OMPC_firstprivate:
@@ -173,8 +186,6 @@ unsigned clang::getOpenMPSimpleClauseType(OpenMPClauseKind Kind,
case OMPC_num_tasks:
case OMPC_hint:
case OMPC_uniform:
- case OMPC_to:
- case OMPC_from:
case OMPC_use_device_ptr:
case OMPC_is_device_ptr:
case OMPC_unified_address:
@@ -259,6 +270,30 @@ const char *clang::getOpenMPSimpleClauseTypeName(OpenMPClauseKind Kind,
break;
}
llvm_unreachable("Invalid OpenMP 'map' clause type");
+ case OMPC_to:
+ switch (Type) {
+ case OMPC_TO_MODIFIER_unknown:
+ return "unknown";
+#define OPENMP_TO_MODIFIER_KIND(Name) \
+ case OMPC_TO_MODIFIER_##Name: \
+ return #Name;
+#include "clang/Basic/OpenMPKinds.def"
+ default:
+ break;
+ }
+ llvm_unreachable("Invalid OpenMP 'to' clause type");
+ case OMPC_from:
+ switch (Type) {
+ case OMPC_FROM_MODIFIER_unknown:
+ return "unknown";
+#define OPENMP_FROM_MODIFIER_KIND(Name) \
+ case OMPC_FROM_MODIFIER_##Name: \
+ return #Name;
+#include "clang/Basic/OpenMPKinds.def"
+ default:
+ break;
+ }
+ llvm_unreachable("Invalid OpenMP 'from' clause type");
case OMPC_dist_schedule:
switch (Type) {
case OMPC_DIST_SCHEDULE_unknown:
@@ -300,6 +335,8 @@ const char *clang::getOpenMPSimpleClauseTypeName(OpenMPClauseKind Kind,
case OMPC_num_threads:
case OMPC_safelen:
case OMPC_simdlen:
+ case OMPC_allocator:
+ case OMPC_allocate:
case OMPC_collapse:
case OMPC_private:
case OMPC_firstprivate:
@@ -332,8 +369,6 @@ const char *clang::getOpenMPSimpleClauseTypeName(OpenMPClauseKind Kind,
case OMPC_num_tasks:
case OMPC_hint:
case OMPC_uniform:
- case OMPC_to:
- case OMPC_from:
case OMPC_use_device_ptr:
case OMPC_is_device_ptr:
case OMPC_unified_address:
@@ -765,6 +800,26 @@ bool clang::isAllowedClauseForDirective(OpenMPDirectiveKind DKind,
break;
}
break;
+ case OMPD_declare_mapper:
+ switch (CKind) {
+#define OPENMP_DECLARE_MAPPER_CLAUSE(Name) \
+ case OMPC_##Name: \
+ return true;
+#include "clang/Basic/OpenMPKinds.def"
+ default:
+ break;
+ }
+ break;
+ case OMPD_allocate:
+ switch (CKind) {
+#define OPENMP_ALLOCATE_CLAUSE(Name) \
+ case OMPC_##Name: \
+ return true;
+#include "clang/Basic/OpenMPKinds.def"
+ default:
+ break;
+ }
+ break;
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_unknown:
@@ -992,6 +1047,7 @@ void clang::getOpenMPCaptureRegions(
CaptureRegions.push_back(OMPD_unknown);
break;
case OMPD_threadprivate:
+ case OMPD_allocate:
case OMPD_taskyield:
case OMPD_barrier:
case OMPD_taskwait:
@@ -999,6 +1055,7 @@ void clang::getOpenMPCaptureRegions(
case OMPD_cancel:
case OMPD_flush:
case OMPD_declare_reduction:
+ case OMPD_declare_mapper:
case OMPD_declare_simd:
case OMPD_declare_target:
case OMPD_end_declare_target:
diff --git a/lib/Basic/OperatorPrecedence.cpp b/lib/Basic/OperatorPrecedence.cpp
index bf805fc7deb1..02876f14291d 100644
--- a/lib/Basic/OperatorPrecedence.cpp
+++ b/lib/Basic/OperatorPrecedence.cpp
@@ -1,9 +1,8 @@
//===--- OperatorPrecedence.cpp ---------------------------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
diff --git a/lib/Basic/SanitizerBlacklist.cpp b/lib/Basic/SanitizerBlacklist.cpp
index 199ded1f317a..aec35c7d9864 100644
--- a/lib/Basic/SanitizerBlacklist.cpp
+++ b/lib/Basic/SanitizerBlacklist.cpp
@@ -1,9 +1,8 @@
//===--- SanitizerBlacklist.cpp - Blacklist for sanitizers ----------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Basic/SanitizerSpecialCaseList.cpp b/lib/Basic/SanitizerSpecialCaseList.cpp
index ee8feecbce65..5fb0f9660b15 100644
--- a/lib/Basic/SanitizerSpecialCaseList.cpp
+++ b/lib/Basic/SanitizerSpecialCaseList.cpp
@@ -1,9 +1,8 @@
//===--- SanitizerSpecialCaseList.cpp - SCL for sanitizers ----------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -37,7 +36,7 @@ SanitizerSpecialCaseList::createOrDie(const std::vector<std::string> &Paths) {
void SanitizerSpecialCaseList::createSanitizerSections() {
for (auto &S : Sections) {
- SanitizerMask Mask = 0;
+ SanitizerMask Mask;
#define SANITIZER(NAME, ID) \
if (S.SectionMatcher->match(NAME)) \
diff --git a/lib/Basic/Sanitizers.cpp b/lib/Basic/Sanitizers.cpp
index 8faf17b8f22e..f5f81b5fb3e5 100644
--- a/lib/Basic/Sanitizers.cpp
+++ b/lib/Basic/Sanitizers.cpp
@@ -1,9 +1,8 @@
//===- Sanitizers.cpp - C Language Family Language Options ----------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -12,17 +11,26 @@
//===----------------------------------------------------------------------===//
#include "clang/Basic/Sanitizers.h"
+#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/StringSwitch.h"
using namespace clang;
+// Once LLVM switches to C++17, the constexpr variables can be inline and we
+// won't need this.
+#define SANITIZER(NAME, ID) constexpr SanitizerMask SanitizerKind::ID;
+#define SANITIZER_GROUP(NAME, ID, ALIAS) \
+ constexpr SanitizerMask SanitizerKind::ID; \
+ constexpr SanitizerMask SanitizerKind::ID##Group;
+#include "clang/Basic/Sanitizers.def"
+
SanitizerMask clang::parseSanitizerValue(StringRef Value, bool AllowGroups) {
SanitizerMask ParsedKind = llvm::StringSwitch<SanitizerMask>(Value)
#define SANITIZER(NAME, ID) .Case(NAME, SanitizerKind::ID)
#define SANITIZER_GROUP(NAME, ID, ALIAS) \
- .Case(NAME, AllowGroups ? SanitizerKind::ID##Group : 0)
+ .Case(NAME, AllowGroups ? SanitizerKind::ID##Group : SanitizerMask())
#include "clang/Basic/Sanitizers.def"
- .Default(0);
+ .Default(SanitizerMask());
return ParsedKind;
}
@@ -34,3 +42,13 @@ SanitizerMask clang::expandSanitizerGroups(SanitizerMask Kinds) {
#include "clang/Basic/Sanitizers.def"
return Kinds;
}
+
+llvm::hash_code SanitizerMask::hash_value() const {
+ return llvm::hash_combine_range(&maskLoToHigh[0], &maskLoToHigh[kNumElem]);
+}
+
+namespace clang {
+llvm::hash_code hash_value(const clang::SanitizerMask &Arg) {
+ return Arg.hash_value();
+}
+} // namespace clang
diff --git a/lib/Basic/SourceLocation.cpp b/lib/Basic/SourceLocation.cpp
index aa844f2cd26c..c1fa406909fe 100644
--- a/lib/Basic/SourceLocation.cpp
+++ b/lib/Basic/SourceLocation.cpp
@@ -1,9 +1,8 @@
//===- SourceLocation.cpp - Compact identifier for Source Files -----------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Basic/SourceManager.cpp b/lib/Basic/SourceManager.cpp
index ce8aa5d112b3..12b0305e707c 100644
--- a/lib/Basic/SourceManager.cpp
+++ b/lib/Basic/SourceManager.cpp
@@ -1,9 +1,8 @@
//===- SourceManager.cpp - Track and cache source files -------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -70,7 +69,7 @@ llvm::MemoryBuffer::BufferKind ContentCache::getMemoryBufferKind() const {
if (!Buffer.getPointer())
return llvm::MemoryBuffer::MemoryBuffer_Malloc;
- llvm::MemoryBuffer *buf = Buffer.getPointer();
+ const llvm::MemoryBuffer *buf = Buffer.getPointer();
return buf->getBufferKind();
}
@@ -83,7 +82,7 @@ unsigned ContentCache::getSize() const {
: (unsigned) ContentsEntry->getSize();
}
-void ContentCache::replaceBuffer(llvm::MemoryBuffer *B, bool DoNotFree) {
+void ContentCache::replaceBuffer(const llvm::MemoryBuffer *B, bool DoNotFree) {
if (B && B == Buffer.getPointer()) {
assert(0 && "Replacing with the same buffer");
Buffer.setInt(DoNotFree? DoNotFreeFlag : 0);
@@ -96,10 +95,10 @@ void ContentCache::replaceBuffer(llvm::MemoryBuffer *B, bool DoNotFree) {
Buffer.setInt((B && DoNotFree) ? DoNotFreeFlag : 0);
}
-llvm::MemoryBuffer *ContentCache::getBuffer(DiagnosticsEngine &Diag,
- const SourceManager &SM,
- SourceLocation Loc,
- bool *Invalid) const {
+const llvm::MemoryBuffer *ContentCache::getBuffer(DiagnosticsEngine &Diag,
+ const SourceManager &SM,
+ SourceLocation Loc,
+ bool *Invalid) const {
// Lazily create the Buffer for ContentCaches that wrap files. If we already
// computed it, just return what we have.
if (Buffer.getPointer() || !ContentsEntry) {
@@ -109,6 +108,32 @@ llvm::MemoryBuffer *ContentCache::getBuffer(DiagnosticsEngine &Diag,
return Buffer.getPointer();
}
+ // Check that the file's size fits in an 'unsigned' (with room for a
+ // past-the-end value). This is deeply regrettable, but various parts of
+ // Clang (including elsewhere in this file!) use 'unsigned' to represent file
+ // offsets, line numbers, string literal lengths, and so on, and fail
+ // miserably on large source files.
+ if ((uint64_t)ContentsEntry->getSize() >=
+ std::numeric_limits<unsigned>::max()) {
+ // We can't make a memory buffer of the required size, so just make a small
+ // one. We should never hit a situation where we've already parsed to a
+ // later offset of the file, so it shouldn't matter that the buffer is
+ // smaller than the file.
+ Buffer.setPointer(
+ llvm::MemoryBuffer::getMemBuffer("", ContentsEntry->getName())
+ .release());
+ if (Diag.isDiagnosticInFlight())
+ Diag.SetDelayedDiagnostic(diag::err_file_too_large,
+ ContentsEntry->getName());
+ else
+ Diag.Report(Loc, diag::err_file_too_large)
+ << ContentsEntry->getName();
+
+ Buffer.setInt(Buffer.getInt() | InvalidFlag);
+ if (Invalid) *Invalid = true;
+ return Buffer.getPointer();
+ }
+
bool isVolatile = SM.userFilesAreVolatile() && !IsSystemFile;
auto BufferOrError =
SM.getFileManager().getBufferForFile(ContentsEntry, isVolatile);
@@ -168,16 +193,16 @@ llvm::MemoryBuffer *ContentCache::getBuffer(DiagnosticsEngine &Diag,
// http://en.wikipedia.org/wiki/Byte_order_mark for more information.
StringRef BufStr = Buffer.getPointer()->getBuffer();
const char *InvalidBOM = llvm::StringSwitch<const char *>(BufStr)
- .StartsWith("\xFE\xFF", "UTF-16 (BE)")
- .StartsWith("\xFF\xFE", "UTF-16 (LE)")
.StartsWith(llvm::StringLiteral::withInnerNUL("\x00\x00\xFE\xFF"),
"UTF-32 (BE)")
.StartsWith(llvm::StringLiteral::withInnerNUL("\xFF\xFE\x00\x00"),
"UTF-32 (LE)")
+ .StartsWith("\xFE\xFF", "UTF-16 (BE)")
+ .StartsWith("\xFF\xFE", "UTF-16 (LE)")
.StartsWith("\x2B\x2F\x76", "UTF-7")
.StartsWith("\xF7\x64\x4C", "UTF-1")
.StartsWith("\xDD\x73\x66\x73", "UTF-EBCDIC")
- .StartsWith("\x0E\xFE\xFF", "SDSU")
+ .StartsWith("\x0E\xFE\xFF", "SCSU")
.StartsWith("\xFB\xEE\x28", "BOCU-1")
.StartsWith("\x84\x31\x95\x33", "GB-18030")
.Default(nullptr);
@@ -253,9 +278,9 @@ const LineEntry *LineTableInfo::FindNearestLineEntry(FileID FID,
return &Entries.back();
// Do a binary search to find the maximal element that is still before Offset.
- std::vector<LineEntry>::const_iterator I =
- std::upper_bound(Entries.begin(), Entries.end(), Offset);
- if (I == Entries.begin()) return nullptr;
+ std::vector<LineEntry>::const_iterator I = llvm::upper_bound(Entries, Offset);
+ if (I == Entries.begin())
+ return nullptr;
return &*--I;
}
@@ -304,7 +329,7 @@ void SourceManager::AddLineNote(SourceLocation Loc, unsigned LineNo,
LineTableInfo &SourceManager::getLineTable() {
if (!LineTable)
- LineTable = new LineTableInfo();
+ LineTable.reset(new LineTableInfo());
return *LineTable;
}
@@ -320,8 +345,6 @@ SourceManager::SourceManager(DiagnosticsEngine &Diag, FileManager &FileMgr,
}
SourceManager::~SourceManager() {
- delete LineTable;
-
// Delete FileEntry objects corresponding to content caches. Since the actual
// content cache objects are bump pointer allocated, we just have to run the
// dtors, but we call the deallocate method for completeness.
@@ -424,7 +447,7 @@ SourceManager::getOrCreateContentCache(const FileEntry *FileEnt,
/// Create a new ContentCache for the specified memory buffer.
/// This does no caching.
const ContentCache *
-SourceManager::createMemBufferContentCache(llvm::MemoryBuffer *Buffer,
+SourceManager::createMemBufferContentCache(const llvm::MemoryBuffer *Buffer,
bool DoNotFree) {
// Add a new ContentCache to the MemBufferInfos list and return it.
ContentCache *Entry = ContentCacheAlloc.Allocate<ContentCache>();
@@ -619,8 +642,8 @@ SourceManager::createExpansionLocImpl(const ExpansionInfo &Info,
return SourceLocation::getMacroLoc(NextLocalOffset - (TokLength + 1));
}
-llvm::MemoryBuffer *SourceManager::getMemoryBufferForFile(const FileEntry *File,
- bool *Invalid) {
+const llvm::MemoryBuffer *
+SourceManager::getMemoryBufferForFile(const FileEntry *File, bool *Invalid) {
const SrcMgr::ContentCache *IR = getOrCreateContentCache(File);
assert(IR && "getOrCreateContentCache() cannot return NULL");
return IR->getBuffer(Diag, *this, SourceLocation(), Invalid);
@@ -676,7 +699,7 @@ StringRef SourceManager::getBufferData(FileID FID, bool *Invalid) const {
return "<<<<<INVALID SOURCE LOCATION>>>>>";
}
- llvm::MemoryBuffer *Buf = SLoc.getFile().getContentCache()->getBuffer(
+ const llvm::MemoryBuffer *Buf = SLoc.getFile().getContentCache()->getBuffer(
Diag, *this, SourceLocation(), &MyInvalid);
if (Invalid)
*Invalid = MyInvalid;
@@ -1106,8 +1129,9 @@ const char *SourceManager::getCharacterData(SourceLocation SL,
return "<<<<INVALID BUFFER>>>>";
}
- llvm::MemoryBuffer *Buffer = Entry.getFile().getContentCache()->getBuffer(
- Diag, *this, SourceLocation(), &CharDataInvalid);
+ const llvm::MemoryBuffer *Buffer =
+ Entry.getFile().getContentCache()->getBuffer(
+ Diag, *this, SourceLocation(), &CharDataInvalid);
if (Invalid)
*Invalid = CharDataInvalid;
return Buffer->getBufferStart() + (CharDataInvalid? 0 : LocInfo.second);
@@ -1118,7 +1142,7 @@ const char *SourceManager::getCharacterData(SourceLocation SL,
unsigned SourceManager::getColumnNumber(FileID FID, unsigned FilePos,
bool *Invalid) const {
bool MyInvalid = false;
- llvm::MemoryBuffer *MemBuf = getBuffer(FID, &MyInvalid);
+ const llvm::MemoryBuffer *MemBuf = getBuffer(FID, &MyInvalid);
if (Invalid)
*Invalid = MyInvalid;
@@ -1203,7 +1227,8 @@ static void ComputeLineNumbers(DiagnosticsEngine &Diag, ContentCache *FI,
llvm::BumpPtrAllocator &Alloc,
const SourceManager &SM, bool &Invalid) {
// Note that calling 'getBuffer()' may lazily page in the file.
- MemoryBuffer *Buffer = FI->getBuffer(Diag, SM, SourceLocation(), &Invalid);
+ const MemoryBuffer *Buffer =
+ FI->getBuffer(Diag, SM, SourceLocation(), &Invalid);
if (Invalid)
return;
@@ -1429,6 +1454,7 @@ PresumedLoc SourceManager::getPresumedLoc(SourceLocation Loc,
// To get the source name, first consult the FileEntry (if one exists)
// before the MemBuffer as this will avoid unnecessarily paging in the
// MemBuffer.
+ FileID FID = LocInfo.first;
StringRef Filename;
if (C->OrigEntry)
Filename = C->OrigEntry->getName();
@@ -1452,8 +1478,12 @@ PresumedLoc SourceManager::getPresumedLoc(SourceLocation Loc,
if (const LineEntry *Entry =
LineTable->FindNearestLineEntry(LocInfo.first, LocInfo.second)) {
// If the LineEntry indicates a filename, use it.
- if (Entry->FilenameID != -1)
+ if (Entry->FilenameID != -1) {
Filename = LineTable->getFilename(Entry->FilenameID);
+ // The contents of files referenced by #line are not in the
+ // SourceManager
+ FID = FileID::get(0);
+ }
// Use the line number specified by the LineEntry. This line number may
// be multiple lines down from the line entry. Add the difference in
@@ -1472,7 +1502,7 @@ PresumedLoc SourceManager::getPresumedLoc(SourceLocation Loc,
}
}
- return PresumedLoc(Filename.data(), LineNo, ColNo, IncludeLoc);
+ return PresumedLoc(Filename.data(), FID, LineNo, ColNo, IncludeLoc);
}
/// Returns whether the PresumedLoc for a given SourceLocation is
@@ -1581,7 +1611,7 @@ FileID SourceManager::translateFile(const FileEntry *SourceFile) const {
if (MainSLoc.isFile()) {
const ContentCache *MainContentCache
= MainSLoc.getFile().getContentCache();
- if (!MainContentCache) {
+ if (!MainContentCache || !MainContentCache->OrigEntry) {
// Can't do anything
} else if (MainContentCache->OrigEntry == SourceFile) {
FirstFID = MainFileID;
@@ -1721,7 +1751,7 @@ SourceLocation SourceManager::translateLineCol(FileID FID,
return FileLoc.getLocWithOffset(Size);
}
- llvm::MemoryBuffer *Buffer = Content->getBuffer(Diag, *this);
+ const llvm::MemoryBuffer *Buffer = Content->getBuffer(Diag, *this);
unsigned FilePos = Content->SourceLineCache[Line - 1];
const char *Buf = Buffer->getBufferStart() + FilePos;
unsigned BufLength = Buffer->getBufferSize() - FilePos;
diff --git a/lib/Basic/TargetInfo.cpp b/lib/Basic/TargetInfo.cpp
index 269fad38b8d5..a9dfe69b90c5 100644
--- a/lib/Basic/TargetInfo.cpp
+++ b/lib/Basic/TargetInfo.cpp
@@ -1,9 +1,8 @@
//===--- TargetInfo.cpp - Information about Target machine ----------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -35,6 +34,7 @@ TargetInfo::TargetInfo(const llvm::Triple &T) : TargetOpts(), Triple(T) {
NoAsmVariants = false;
HasLegalHalfType = false;
HasFloat128 = false;
+ HasFloat16 = false;
PointerWidth = PointerAlign = 32;
BoolWidth = BoolAlign = 8;
IntWidth = IntAlign = 32;
@@ -373,6 +373,17 @@ void TargetInfo::adjust(LangOptions &Opts) {
LongDoubleFormat = &llvm::APFloat::IEEEquad();
}
+ if (Opts.LongDoubleSize) {
+ if (Opts.LongDoubleSize == DoubleWidth) {
+ LongDoubleWidth = DoubleWidth;
+ LongDoubleAlign = DoubleAlign;
+ LongDoubleFormat = DoubleFormat;
+ } else if (Opts.LongDoubleSize == 128) {
+ LongDoubleWidth = LongDoubleAlign = 128;
+ LongDoubleFormat = &llvm::APFloat::IEEEquad();
+ }
+ }
+
if (Opts.NewAlignOverride)
NewAlign = Opts.NewAlignOverride * getCharWidth();
@@ -456,7 +467,7 @@ bool TargetInfo::isValidGCCRegisterName(StringRef Name) const {
}
// Check register names.
- if (std::find(Names.begin(), Names.end(), Name) != Names.end())
+ if (llvm::is_contained(Names, Name))
return true;
// Check any additional names that we have.
@@ -796,3 +807,9 @@ void TargetInfo::CheckFixedPointBits() const {
assert(getAccumIBits() >= getUnsignedAccumIBits());
assert(getLongAccumIBits() >= getUnsignedLongAccumIBits());
}
+
+void TargetInfo::copyAuxTarget(const TargetInfo *Aux) {
+ auto *Target = static_cast<TransferrableTargetInfo*>(this);
+ auto *Src = static_cast<const TransferrableTargetInfo*>(Aux);
+ *Target = *Src;
+}
diff --git a/lib/Basic/Targets.cpp b/lib/Basic/Targets.cpp
index cf87bc484621..a08e399e7270 100644
--- a/lib/Basic/Targets.cpp
+++ b/lib/Basic/Targets.cpp
@@ -1,9 +1,8 @@
//===--- Targets.cpp - Implement target feature support -------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -99,19 +98,6 @@ void addCygMingDefines(const LangOptions &Opts, MacroBuilder &Builder) {
}
}
-void addMinGWDefines(const llvm::Triple &Triple, const LangOptions &Opts,
- MacroBuilder &Builder) {
- DefineStd(Builder, "WIN32", Opts);
- DefineStd(Builder, "WINNT", Opts);
- if (Triple.isArch64Bit()) {
- DefineStd(Builder, "WIN64", Opts);
- Builder.defineMacro("__MINGW64__");
- }
- Builder.defineMacro("__MSVCRT__");
- Builder.defineMacro("__MINGW32__");
- addCygMingDefines(Opts, Builder);
-}
-
//===----------------------------------------------------------------------===//
// Driver code
//===----------------------------------------------------------------------===//
@@ -333,6 +319,8 @@ TargetInfo *AllocateTarget(const llvm::Triple &Triple,
return new OpenBSDTargetInfo<PPC32TargetInfo>(Triple, Opts);
case llvm::Triple::RTEMS:
return new RTEMSTargetInfo<PPC32TargetInfo>(Triple, Opts);
+ case llvm::Triple::AIX:
+ return new AIXPPC32TargetInfo(Triple, Opts);
default:
return new PPC32TargetInfo(Triple, Opts);
}
@@ -349,6 +337,8 @@ TargetInfo *AllocateTarget(const llvm::Triple &Triple,
return new FreeBSDTargetInfo<PPC64TargetInfo>(Triple, Opts);
case llvm::Triple::NetBSD:
return new NetBSDTargetInfo<PPC64TargetInfo>(Triple, Opts);
+ case llvm::Triple::AIX:
+ return new AIXPPC64TargetInfo(Triple, Opts);
default:
return new PPC64TargetInfo(Triple, Opts);
}
@@ -570,19 +560,31 @@ TargetInfo *AllocateTarget(const llvm::Triple &Triple,
Triple.getVendor() != llvm::Triple::UnknownVendor ||
!Triple.isOSBinFormatWasm())
return nullptr;
- if (Triple.getOS() != llvm::Triple::UnknownOS &&
- Triple.getOS() != llvm::Triple::WASI)
- return nullptr;
- return new WebAssemblyOSTargetInfo<WebAssembly32TargetInfo>(Triple, Opts);
+ switch (Triple.getOS()) {
+ case llvm::Triple::WASI:
+ return new WASITargetInfo<WebAssembly32TargetInfo>(Triple, Opts);
+ case llvm::Triple::Emscripten:
+ return new EmscriptenTargetInfo<WebAssembly32TargetInfo>(Triple, Opts);
+ case llvm::Triple::UnknownOS:
+ return new WebAssemblyOSTargetInfo<WebAssembly32TargetInfo>(Triple, Opts);
+ default:
+ return nullptr;
+ }
case llvm::Triple::wasm64:
if (Triple.getSubArch() != llvm::Triple::NoSubArch ||
Triple.getVendor() != llvm::Triple::UnknownVendor ||
!Triple.isOSBinFormatWasm())
return nullptr;
- if (Triple.getOS() != llvm::Triple::UnknownOS &&
- Triple.getOS() != llvm::Triple::WASI)
- return nullptr;
- return new WebAssemblyOSTargetInfo<WebAssembly64TargetInfo>(Triple, Opts);
+ switch (Triple.getOS()) {
+ case llvm::Triple::WASI:
+ return new WASITargetInfo<WebAssembly64TargetInfo>(Triple, Opts);
+ case llvm::Triple::Emscripten:
+ return new EmscriptenTargetInfo<WebAssembly64TargetInfo>(Triple, Opts);
+ case llvm::Triple::UnknownOS:
+ return new WebAssemblyOSTargetInfo<WebAssembly64TargetInfo>(Triple, Opts);
+ default:
+ return nullptr;
+ }
case llvm::Triple::renderscript32:
return new LinuxTargetInfo<RenderScript32TargetInfo>(Triple, Opts);
diff --git a/lib/Basic/Targets.h b/lib/Basic/Targets.h
index d450aa3f37ed..a063204e69e6 100644
--- a/lib/Basic/Targets.h
+++ b/lib/Basic/Targets.h
@@ -1,9 +1,8 @@
//===------- Targets.h - Declare target feature support ---------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -40,10 +39,6 @@ void defineCPUMacros(clang::MacroBuilder &Builder, llvm::StringRef CPUName,
bool Tuning = true);
LLVM_LIBRARY_VISIBILITY
-void addMinGWDefines(const llvm::Triple &Triple, const clang::LangOptions &Opts,
- clang::MacroBuilder &Builder);
-
-LLVM_LIBRARY_VISIBILITY
void addCygMingDefines(const clang::LangOptions &Opts,
clang::MacroBuilder &Builder);
} // namespace targets
diff --git a/lib/Basic/Targets/AArch64.cpp b/lib/Basic/Targets/AArch64.cpp
index 62919a02dcb9..74ac69ab8946 100644
--- a/lib/Basic/Targets/AArch64.cpp
+++ b/lib/Basic/Targets/AArch64.cpp
@@ -1,9 +1,8 @@
//===--- AArch64.cpp - Implement AArch64 target feature support -----------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -50,6 +49,7 @@ AArch64TargetInfo::AArch64TargetInfo(const llvm::Triple &Triple,
// All AArch64 implementations support ARMv8 FP, which makes half a legal type.
HasLegalHalfType = true;
+ HasFloat16 = true;
LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
MaxVectorAlign = 128;
@@ -118,6 +118,28 @@ void AArch64TargetInfo::getTargetDefinesARMV82A(const LangOptions &Opts,
getTargetDefinesARMV81A(Opts, Builder);
}
+void AArch64TargetInfo::getTargetDefinesARMV83A(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ Builder.defineMacro("__ARM_FEATURE_JCVT", "1");
+ // Also include the Armv8.2 defines
+ getTargetDefinesARMV82A(Opts, Builder);
+}
+
+void AArch64TargetInfo::getTargetDefinesARMV84A(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ // Also include the Armv8.3 defines
+ // FIXME: Armv8.4 makes some extensions mandatory. Handle them here.
+ getTargetDefinesARMV83A(Opts, Builder);
+}
+
+void AArch64TargetInfo::getTargetDefinesARMV85A(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ // Also include the Armv8.4 defines
+ // FIXME: Armv8.5 makes some extensions mandatory. Handle them here.
+ getTargetDefinesARMV84A(Opts, Builder);
+}
+
+
void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
// Target identification.
@@ -177,13 +199,13 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
if (FPU & SveMode)
Builder.defineMacro("__ARM_FEATURE_SVE", "1");
- if (CRC)
+ if (HasCRC)
Builder.defineMacro("__ARM_FEATURE_CRC32", "1");
- if (Crypto)
+ if (HasCrypto)
Builder.defineMacro("__ARM_FEATURE_CRYPTO", "1");
- if (Unaligned)
+ if (HasUnaligned)
Builder.defineMacro("__ARM_FEATURE_UNALIGNED", "1");
if ((FPU & NeonMode) && HasFullFP16)
@@ -194,6 +216,9 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
if (HasDotProd)
Builder.defineMacro("__ARM_FEATURE_DOTPROD", "1");
+ if (HasMTE)
+ Builder.defineMacro("__ARM_FEATURE_MEMORY_TAGGING", "1");
+
if ((FPU & NeonMode) && HasFP16FML)
Builder.defineMacro("__ARM_FEATURE_FP16FML", "1");
@@ -206,6 +231,15 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
case llvm::AArch64::ArchKind::ARMV8_2A:
getTargetDefinesARMV82A(Opts, Builder);
break;
+ case llvm::AArch64::ArchKind::ARMV8_3A:
+ getTargetDefinesARMV83A(Opts, Builder);
+ break;
+ case llvm::AArch64::ArchKind::ARMV8_4A:
+ getTargetDefinesARMV84A(Opts, Builder);
+ break;
+ case llvm::AArch64::ArchKind::ARMV8_5A:
+ getTargetDefinesARMV85A(Opts, Builder);
+ break;
}
// All of the __sync_(bool|val)_compare_and_swap_(1|2|4|8) builtins work.
@@ -229,12 +263,13 @@ bool AArch64TargetInfo::hasFeature(StringRef Feature) const {
bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
DiagnosticsEngine &Diags) {
FPU = FPUMode;
- CRC = 0;
- Crypto = 0;
- Unaligned = 1;
- HasFullFP16 = 0;
- HasDotProd = 0;
- HasFP16FML = 0;
+ HasCRC = false;
+ HasCrypto = false;
+ HasUnaligned = true;
+ HasFullFP16 = false;
+ HasDotProd = false;
+ HasFP16FML = false;
+ HasMTE = false;
ArchKind = llvm::AArch64::ArchKind::ARMV8A;
for (const auto &Feature : Features) {
@@ -243,21 +278,29 @@ bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
if (Feature == "+sve")
FPU |= SveMode;
if (Feature == "+crc")
- CRC = 1;
+ HasCRC = true;
if (Feature == "+crypto")
- Crypto = 1;
+ HasCrypto = true;
if (Feature == "+strict-align")
- Unaligned = 0;
+ HasUnaligned = false;
if (Feature == "+v8.1a")
ArchKind = llvm::AArch64::ArchKind::ARMV8_1A;
if (Feature == "+v8.2a")
ArchKind = llvm::AArch64::ArchKind::ARMV8_2A;
+ if (Feature == "+v8.3a")
+ ArchKind = llvm::AArch64::ArchKind::ARMV8_3A;
+ if (Feature == "+v8.4a")
+ ArchKind = llvm::AArch64::ArchKind::ARMV8_4A;
+ if (Feature == "+v8.5a")
+ ArchKind = llvm::AArch64::ArchKind::ARMV8_5A;
if (Feature == "+fullfp16")
- HasFullFP16 = 1;
+ HasFullFP16 = true;
if (Feature == "+dotprod")
- HasDotProd = 1;
+ HasDotProd = true;
if (Feature == "+fp16fml")
- HasFP16FML = 1;
+ HasFP16FML = true;
+ if (Feature == "+mte")
+ HasMTE = true;
}
setDataLayout();
@@ -528,16 +571,10 @@ MicrosoftARM64TargetInfo::MicrosoftARM64TargetInfo(const llvm::Triple &Triple,
TheCXXABI.set(TargetCXXABI::Microsoft);
}
-void MicrosoftARM64TargetInfo::getVisualStudioDefines(
- const LangOptions &Opts, MacroBuilder &Builder) const {
- WindowsTargetInfo<AArch64leTargetInfo>::getVisualStudioDefines(Opts, Builder);
- Builder.defineMacro("_M_ARM64", "1");
-}
-
void MicrosoftARM64TargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
- WindowsTargetInfo::getTargetDefines(Opts, Builder);
- getVisualStudioDefines(Opts, Builder);
+ WindowsARM64TargetInfo::getTargetDefines(Opts, Builder);
+ Builder.defineMacro("_M_ARM64", "1");
}
TargetInfo::CallingConvKind
@@ -545,6 +582,23 @@ MicrosoftARM64TargetInfo::getCallingConvKind(bool ClangABICompat4) const {
return CCK_MicrosoftWin64;
}
+unsigned MicrosoftARM64TargetInfo::getMinGlobalAlign(uint64_t TypeSize) const {
+ unsigned Align = WindowsARM64TargetInfo::getMinGlobalAlign(TypeSize);
+
+ // MSVC does size based alignment for arm64 based on alignment section in
+ // below document, replicate that to keep alignment consistent with object
+ // files compiled by MSVC.
+ // https://docs.microsoft.com/en-us/cpp/build/arm64-windows-abi-conventions
+ if (TypeSize >= 512) { // TypeSize >= 64 bytes
+ Align = std::max(Align, 128u); // align type at least 16 bytes
+ } else if (TypeSize >= 64) { // TypeSize >= 8 bytes
+ Align = std::max(Align, 64u); // align type at least 8 butes
+ } else if (TypeSize >= 16) { // TypeSize >= 2 bytes
+ Align = std::max(Align, 32u); // align type at least 4 bytes
+ }
+ return Align;
+}
+
MinGWARM64TargetInfo::MinGWARM64TargetInfo(const llvm::Triple &Triple,
const TargetOptions &Opts)
: WindowsARM64TargetInfo(Triple, Opts) {
diff --git a/lib/Basic/Targets/AArch64.h b/lib/Basic/Targets/AArch64.h
index d7f767abd4d1..5833c146003b 100644
--- a/lib/Basic/Targets/AArch64.h
+++ b/lib/Basic/Targets/AArch64.h
@@ -1,9 +1,8 @@
//===--- AArch64.h - Declare AArch64 target feature support -----*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -29,12 +28,14 @@ class LLVM_LIBRARY_VISIBILITY AArch64TargetInfo : public TargetInfo {
enum FPUModeEnum { FPUMode, NeonMode = (1 << 0), SveMode = (1 << 1) };
unsigned FPU;
- unsigned CRC;
- unsigned Crypto;
- unsigned Unaligned;
- unsigned HasFullFP16;
- unsigned HasDotProd;
- unsigned HasFP16FML;
+ bool HasCRC;
+ bool HasCrypto;
+ bool HasUnaligned;
+ bool HasFullFP16;
+ bool HasDotProd;
+ bool HasFP16FML;
+ bool HasMTE;
+
llvm::AArch64::ArchKind ArchKind;
static const Builtin::Info BuiltinInfo[];
@@ -59,6 +60,12 @@ public:
MacroBuilder &Builder) const;
void getTargetDefinesARMV82A(const LangOptions &Opts,
MacroBuilder &Builder) const;
+ void getTargetDefinesARMV83A(const LangOptions &Opts,
+ MacroBuilder &Builder) const;
+ void getTargetDefinesARMV84A(const LangOptions &Opts,
+ MacroBuilder &Builder) const;
+ void getTargetDefinesARMV85A(const LangOptions &Opts,
+ MacroBuilder &Builder) const;
void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const override;
@@ -123,12 +130,12 @@ public:
MicrosoftARM64TargetInfo(const llvm::Triple &Triple,
const TargetOptions &Opts);
- void getVisualStudioDefines(const LangOptions &Opts,
- MacroBuilder &Builder) const;
void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const override;
TargetInfo::CallingConvKind
getCallingConvKind(bool ClangABICompat4) const override;
+
+ unsigned getMinGlobalAlign(uint64_t TypeSize) const override;
};
// ARM64 MinGW target
diff --git a/lib/Basic/Targets/AMDGPU.cpp b/lib/Basic/Targets/AMDGPU.cpp
index 7313a692f46b..b5c82e288570 100644
--- a/lib/Basic/Targets/AMDGPU.cpp
+++ b/lib/Basic/Targets/AMDGPU.cpp
@@ -1,9 +1,8 @@
//===--- AMDGPU.cpp - Implement AMDGPU target feature support -------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -35,7 +34,8 @@ static const char *const DataLayoutStringR600 =
static const char *const DataLayoutStringAMDGCN =
"e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32"
"-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
- "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5";
+ "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
+ "-ni:7";
const LangASMap AMDGPUTargetInfo::AMDGPUDefIsGenMap = {
Generic, // Default
@@ -135,9 +135,33 @@ bool AMDGPUTargetInfo::initFeatureMap(
CPU = "gfx600";
switch (llvm::AMDGPU::parseArchAMDGCN(CPU)) {
+ case GK_GFX1012:
+ case GK_GFX1011:
+ Features["dot1-insts"] = true;
+ Features["dot2-insts"] = true;
+ Features["dot5-insts"] = true;
+ Features["dot6-insts"] = true;
+ LLVM_FALLTHROUGH;
+ case GK_GFX1010:
+ Features["dl-insts"] = true;
+ Features["ci-insts"] = true;
+ Features["16-bit-insts"] = true;
+ Features["dpp"] = true;
+ Features["gfx8-insts"] = true;
+ Features["gfx9-insts"] = true;
+ Features["gfx10-insts"] = true;
+ Features["s-memrealtime"] = true;
+ break;
+ case GK_GFX908:
+ Features["dot3-insts"] = true;
+ Features["dot4-insts"] = true;
+ Features["dot5-insts"] = true;
+ Features["dot6-insts"] = true;
+ LLVM_FALLTHROUGH;
case GK_GFX906:
Features["dl-insts"] = true;
- Features["dot-insts"] = true;
+ Features["dot1-insts"] = true;
+ Features["dot2-insts"] = true;
LLVM_FALLTHROUGH;
case GK_GFX909:
case GK_GFX904:
@@ -149,7 +173,7 @@ bool AMDGPUTargetInfo::initFeatureMap(
case GK_GFX803:
case GK_GFX802:
case GK_GFX801:
- Features["vi-insts"] = true;
+ Features["gfx8-insts"] = true;
Features["16-bit-insts"] = true;
Features["dpp"] = true;
Features["s-memrealtime"] = true;
@@ -251,6 +275,9 @@ AMDGPUTargetInfo::AMDGPUTargetInfo(const llvm::Triple &Triple,
!isAMDGCN(Triple));
UseAddrSpaceMapMangling = true;
+ HasLegalHalfType = true;
+ HasFloat16 = true;
+
// Set pointer width and alignment for target address space 0.
PointerWidth = PointerAlign = DataLayout->getPointerSizeInBits();
if (getMaxPointerWidth() == 64) {
@@ -306,3 +333,18 @@ void AMDGPUTargetInfo::getTargetDefines(const LangOptions &Opts,
if (hasFastFMA())
Builder.defineMacro("FP_FAST_FMA");
}
+
+void AMDGPUTargetInfo::setAuxTarget(const TargetInfo *Aux) {
+ assert(HalfFormat == Aux->HalfFormat);
+ assert(FloatFormat == Aux->FloatFormat);
+ assert(DoubleFormat == Aux->DoubleFormat);
+
+ // On x86_64 long double is 80-bit extended precision format, which is
+ // not supported by AMDGPU. 128-bit floating point format is also not
+ // supported by AMDGPU. Therefore keep its own format for these two types.
+ auto SaveLongDoubleFormat = LongDoubleFormat;
+ auto SaveFloat128Format = Float128Format;
+ copyAuxTarget(Aux);
+ LongDoubleFormat = SaveLongDoubleFormat;
+ Float128Format = SaveFloat128Format;
+}
diff --git a/lib/Basic/Targets/AMDGPU.h b/lib/Basic/Targets/AMDGPU.h
index 926772809aa7..456cb2ebb8b5 100644
--- a/lib/Basic/Targets/AMDGPU.h
+++ b/lib/Basic/Targets/AMDGPU.h
@@ -1,9 +1,8 @@
//===--- AMDGPU.h - Declare AMDGPU target feature support -------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -42,7 +41,6 @@ class LLVM_LIBRARY_VISIBILITY AMDGPUTargetInfo final : public TargetInfo {
llvm::AMDGPU::GPUKind GPUKind;
unsigned GPUFeatures;
-
bool hasFP64() const {
return getTriple().getArch() == llvm::Triple::amdgcn ||
!!(GPUFeatures & llvm::AMDGPU::FEATURE_FP64);
@@ -352,6 +350,8 @@ public:
uint64_t getNullPointerValue(LangAS AS) const override {
return AS == LangAS::opencl_local ? ~0 : 0;
}
+
+ void setAuxTarget(const TargetInfo *Aux) override;
};
} // namespace targets
diff --git a/lib/Basic/Targets/ARC.cpp b/lib/Basic/Targets/ARC.cpp
index 2159ab8e2020..5cc13e2cf728 100644
--- a/lib/Basic/Targets/ARC.cpp
+++ b/lib/Basic/Targets/ARC.cpp
@@ -1,9 +1,8 @@
//===--- ARC.cpp - Implement ARC target feature support -------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -22,4 +21,4 @@ using namespace clang::targets;
void ARCTargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
Builder.defineMacro("__arc__");
-}
+}
diff --git a/lib/Basic/Targets/ARC.h b/lib/Basic/Targets/ARC.h
index ee20568f3d5b..c43a39984edb 100644
--- a/lib/Basic/Targets/ARC.h
+++ b/lib/Basic/Targets/ARC.h
@@ -1,9 +1,8 @@
//===--- ARC.h - Declare ARC target feature support -------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Basic/Targets/ARM.cpp b/lib/Basic/Targets/ARM.cpp
index 16644ace108b..c6834b9fac15 100644
--- a/lib/Basic/Targets/ARM.cpp
+++ b/lib/Basic/Targets/ARM.cpp
@@ -1,9 +1,8 @@
//===--- ARM.cpp - Implement ARM target feature support -------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -41,13 +40,14 @@ void ARMTargetInfo::setABIAAPCS() {
// so set preferred for small types to 32.
if (T.isOSBinFormatMachO()) {
resetDataLayout(BigEndian
- ? "E-m:o-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
- : "e-m:o-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64");
+ ? "E-m:o-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
+ : "e-m:o-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64");
} else if (T.isOSWindows()) {
assert(!BigEndian && "Windows on ARM does not support big endian");
resetDataLayout("e"
"-m:w"
"-p:32:32"
+ "-Fi8"
"-i64:64"
"-v128:64:128"
"-a:0:32"
@@ -55,11 +55,11 @@ void ARMTargetInfo::setABIAAPCS() {
"-S64");
} else if (T.isOSNaCl()) {
assert(!BigEndian && "NaCl on ARM does not support big endian");
- resetDataLayout("e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S128");
+ resetDataLayout("e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S128");
} else {
resetDataLayout(BigEndian
- ? "E-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
- : "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64");
+ ? "E-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
+ : "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64");
}
// FIXME: Enumerated types are variable width in straight AAPCS.
@@ -88,17 +88,17 @@ void ARMTargetInfo::setABIAPCS(bool IsAAPCS16) {
if (T.isOSBinFormatMachO() && IsAAPCS16) {
assert(!BigEndian && "AAPCS16 does not support big-endian");
- resetDataLayout("e-m:o-p:32:32-i64:64-a:0:32-n32-S128");
+ resetDataLayout("e-m:o-p:32:32-Fi8-i64:64-a:0:32-n32-S128");
} else if (T.isOSBinFormatMachO())
resetDataLayout(
BigEndian
- ? "E-m:o-p:32:32-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32"
- : "e-m:o-p:32:32-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32");
+ ? "E-m:o-p:32:32-Fi8-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32"
+ : "e-m:o-p:32:32-Fi8-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32");
else
resetDataLayout(
BigEndian
- ? "E-m:e-p:32:32-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32"
- : "e-m:e-p:32:32-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32");
+ ? "E-m:e-p:32:32-Fi8-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32"
+ : "e-m:e-p:32:32-Fi8-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32");
// FIXME: Override "preferred align" for double and long long.
}
@@ -146,6 +146,14 @@ void ARMTargetInfo::setAtomic() {
}
}
+bool ARMTargetInfo::hasMVE() const {
+ return ArchKind == llvm::ARM::ArchKind::ARMV8_1MMainline && MVE != 0;
+}
+
+bool ARMTargetInfo::hasMVEFloat() const {
+ return hasMVE() && (MVE & MVE_FP);
+}
+
bool ARMTargetInfo::isThumb() const {
return ArchISA == llvm::ARM::ISAKind::THUMB;
}
@@ -197,6 +205,8 @@ StringRef ARMTargetInfo::getCPUAttr() const {
return "8M_MAIN";
case llvm::ARM::ArchKind::ARMV8R:
return "8R";
+ case llvm::ARM::ArchKind::ARMV8_1MMainline:
+ return "8_1M_MAIN";
}
}
@@ -313,6 +323,8 @@ ARMTargetInfo::ARMTargetInfo(const llvm::Triple &Triple,
this->MCountName = Opts.EABIVersion == llvm::EABI::GNU
? "\01__gnu_mcount_nc"
: "\01mcount";
+
+ SoftFloatABI = llvm::is_contained(Opts.FeaturesAsWritten, "+soft-float-abi");
}
StringRef ARMTargetInfo::getABI() const { return ABI; }
@@ -375,12 +387,21 @@ bool ARMTargetInfo::initFeatureMap(
// Convert user-provided arm and thumb GNU target attributes to
// [-|+]thumb-mode target features respectively.
- std::vector<std::string> UpdatedFeaturesVec(FeaturesVec);
- for (auto &Feature : UpdatedFeaturesVec) {
- if (Feature.compare("+arm") == 0)
- Feature = "-thumb-mode";
- else if (Feature.compare("+thumb") == 0)
- Feature = "+thumb-mode";
+ std::vector<std::string> UpdatedFeaturesVec;
+ for (const auto &Feature : FeaturesVec) {
+ // Skip soft-float-abi; it's something we only use to initialize a bit of
+ // class state, and is otherwise unrecognized.
+ if (Feature == "+soft-float-abi")
+ continue;
+
+ StringRef FixedFeature;
+ if (Feature == "+arm")
+ FixedFeature = "-thumb-mode";
+ else if (Feature == "+thumb")
+ FixedFeature = "+thumb-mode";
+ else
+ FixedFeature = Feature;
+ UpdatedFeaturesVec.push_back(FixedFeature.str());
}
return TargetInfo::initFeatureMap(Features, Diags, CPU, UpdatedFeaturesVec);
@@ -390,37 +411,49 @@ bool ARMTargetInfo::initFeatureMap(
bool ARMTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
DiagnosticsEngine &Diags) {
FPU = 0;
+ MVE = 0;
CRC = 0;
Crypto = 0;
DSP = 0;
Unaligned = 1;
- SoftFloat = SoftFloatABI = false;
+ SoftFloat = false;
+ // Note that SoftFloatABI is initialized in our constructor.
HWDiv = 0;
DotProd = 0;
+ HasFloat16 = true;
// This does not diagnose illegal cases like having both
- // "+vfpv2" and "+vfpv3" or having "+neon" and "+fp-only-sp".
- uint32_t HW_FP_remove = 0;
+ // "+vfpv2" and "+vfpv3" or having "+neon" and "-fp64".
for (const auto &Feature : Features) {
if (Feature == "+soft-float") {
SoftFloat = true;
- } else if (Feature == "+soft-float-abi") {
- SoftFloatABI = true;
- } else if (Feature == "+vfp2") {
+ } else if (Feature == "+vfp2sp" || Feature == "+vfp2d16sp" ||
+ Feature == "+vfp2" || Feature == "+vfp2d16") {
FPU |= VFP2FPU;
- HW_FP |= HW_FP_SP | HW_FP_DP;
- } else if (Feature == "+vfp3") {
+ HW_FP |= HW_FP_SP;
+ if (Feature == "+vfp2" || Feature == "+vfp2d16")
+ HW_FP |= HW_FP_DP;
+ } else if (Feature == "+vfp3sp" || Feature == "+vfp3d16sp" ||
+ Feature == "+vfp3" || Feature == "+vfp3d16") {
FPU |= VFP3FPU;
- HW_FP |= HW_FP_SP | HW_FP_DP;
- } else if (Feature == "+vfp4") {
+ HW_FP |= HW_FP_SP;
+ if (Feature == "+vfp3" || Feature == "+vfp3d16")
+ HW_FP |= HW_FP_DP;
+ } else if (Feature == "+vfp4sp" || Feature == "+vfp4d16sp" ||
+ Feature == "+vfp4" || Feature == "+vfp4d16") {
FPU |= VFP4FPU;
- HW_FP |= HW_FP_SP | HW_FP_DP | HW_FP_HP;
- } else if (Feature == "+fp-armv8") {
+ HW_FP |= HW_FP_SP | HW_FP_HP;
+ if (Feature == "+vfp4" || Feature == "+vfp4d16")
+ HW_FP |= HW_FP_DP;
+ } else if (Feature == "+fp-armv8sp" || Feature == "+fp-armv8d16sp" ||
+ Feature == "+fp-armv8" || Feature == "+fp-armv8d16") {
FPU |= FPARMV8;
- HW_FP |= HW_FP_SP | HW_FP_DP | HW_FP_HP;
+ HW_FP |= HW_FP_SP | HW_FP_HP;
+ if (Feature == "+fp-armv8" || Feature == "+fp-armv8d16")
+ HW_FP |= HW_FP_DP;
} else if (Feature == "+neon") {
FPU |= NeonFPU;
- HW_FP |= HW_FP_SP | HW_FP_DP;
+ HW_FP |= HW_FP_SP;
} else if (Feature == "+hwdiv") {
HWDiv |= HWDivThumb;
} else if (Feature == "+hwdiv-arm") {
@@ -431,8 +464,13 @@ bool ARMTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
Crypto = 1;
} else if (Feature == "+dsp") {
DSP = 1;
- } else if (Feature == "+fp-only-sp") {
- HW_FP_remove |= HW_FP_DP;
+ } else if (Feature == "+fp64") {
+ HW_FP |= HW_FP_DP;
+ } else if (Feature == "+8msecext") {
+ if (CPUProfile != "M" || ArchVersion != 8) {
+ Diags.Report(diag::err_target_unsupported_mcmse) << CPU;
+ return false;
+ }
} else if (Feature == "+strict-align") {
Unaligned = 0;
} else if (Feature == "+fp16") {
@@ -441,9 +479,17 @@ bool ARMTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasLegalHalfType = true;
} else if (Feature == "+dotprod") {
DotProd = true;
+ } else if (Feature == "+mve") {
+ DSP = 1;
+ MVE |= MVE_INT;
+ } else if (Feature == "+mve.fp") {
+ DSP = 1;
+ HasLegalHalfType = true;
+ FPU |= FPARMV8;
+ MVE |= MVE_INT | MVE_FP;
+ HW_FP |= HW_FP_SP | HW_FP_HP;
}
}
- HW_FP &= ~HW_FP_remove;
switch (ArchVersion) {
case 6:
@@ -474,11 +520,6 @@ bool ARMTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
else if (FPMath == FP_VFP)
Features.push_back("-neonfp");
- // Remove front-end specific options which the backend handles differently.
- auto Feature = std::find(Features.begin(), Features.end(), "+soft-float-abi");
- if (Feature != Features.end())
- Features.erase(Feature);
-
return true;
}
@@ -492,6 +533,7 @@ bool ARMTargetInfo::hasFeature(StringRef Feature) const {
.Case("vfp", FPU && !SoftFloat)
.Case("hwdiv", HWDiv & HWDivThumb)
.Case("hwdiv-arm", HWDiv & HWDivARM)
+ .Case("mve", hasMVE())
.Default(false);
}
@@ -652,6 +694,12 @@ void ARMTargetInfo::getTargetDefines(const LangOptions &Opts,
if (SoftFloat)
Builder.defineMacro("__SOFTFP__");
+ // ACLE position independent code macros.
+ if (Opts.ROPI)
+ Builder.defineMacro("__ARM_ROPI", "1");
+ if (Opts.RWPI)
+ Builder.defineMacro("__ARM_RWPI", "1");
+
if (ArchKind == llvm::ARM::ArchKind::XSCALE)
Builder.defineMacro("__XSCALE__");
@@ -701,11 +749,19 @@ void ARMTargetInfo::getTargetDefines(const LangOptions &Opts,
"0x" + Twine::utohexstr(HW_FP & ~HW_FP_DP));
}
+ if (hasMVE()) {
+ Builder.defineMacro("__ARM_FEATURE_MVE", hasMVEFloat() ? "3" : "1");
+ }
+
Builder.defineMacro("__ARM_SIZEOF_WCHAR_T",
Twine(Opts.WCharSize ? Opts.WCharSize : 4));
Builder.defineMacro("__ARM_SIZEOF_MINIMAL_ENUM", Opts.ShortEnums ? "1" : "4");
+ // CMSE
+ if (ArchVersion == 8 && ArchProfile == llvm::ARM::ProfileKind::M)
+ Builder.defineMacro("__ARM_FEATURE_CMSE", Opts.Cmse ? "3" : "1");
+
if (ArchVersion >= 6 && CPUAttr != "6M" && CPUAttr != "8M_BASE") {
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
@@ -844,6 +900,17 @@ bool ARMTargetInfo::validateAsmConstraint(
case 'Q': // A memory address that is a single base register.
Info.setAllowsMemory();
return true;
+ case 'T':
+ switch (Name[1]) {
+ default:
+ break;
+ case 'e': // Even general-purpose register
+ case 'o': // Odd general-purpose register
+ Info.setAllowsRegister();
+ Name++;
+ return true;
+ }
+ break;
case 'U': // a memory reference...
switch (Name[1]) {
case 'q': // ...ARMV4 ldrsb
@@ -859,6 +926,7 @@ bool ARMTargetInfo::validateAsmConstraint(
Name++;
return true;
}
+ break;
}
return false;
}
@@ -867,6 +935,7 @@ std::string ARMTargetInfo::convertConstraint(const char *&Constraint) const {
std::string R;
switch (*Constraint) {
case 'U': // Two-character constraint; add "^" hint for later parsing.
+ case 'T':
R = std::string("^") + std::string(Constraint, 2);
Constraint++;
break;
@@ -961,8 +1030,6 @@ WindowsARMTargetInfo::WindowsARMTargetInfo(const llvm::Triple &Triple,
void WindowsARMTargetInfo::getVisualStudioDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
- WindowsTargetInfo<ARMleTargetInfo>::getVisualStudioDefines(Opts, Builder);
-
// FIXME: this is invalid for WindowsCE
Builder.defineMacro("_M_ARM_NT", "1");
Builder.defineMacro("_M_ARMT", "_M_ARM");
@@ -1049,7 +1116,7 @@ CygwinARMTargetInfo::CygwinARMTargetInfo(const llvm::Triple &Triple,
this->WCharType = TargetInfo::UnsignedShort;
TLSSupported = false;
DoubleAlign = LongLongAlign = 64;
- resetDataLayout("e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64");
+ resetDataLayout("e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64");
}
void CygwinARMTargetInfo::getTargetDefines(const LangOptions &Opts,
diff --git a/lib/Basic/Targets/ARM.h b/lib/Basic/Targets/ARM.h
index 9c72c3387f7a..ce87a6265934 100644
--- a/lib/Basic/Targets/ARM.h
+++ b/lib/Basic/Targets/ARM.h
@@ -1,9 +1,8 @@
//===--- ARM.h - Declare ARM target feature support -------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -34,6 +33,11 @@ class LLVM_LIBRARY_VISIBILITY ARMTargetInfo : public TargetInfo {
FPARMV8 = (1 << 4)
};
+ enum MVEMode {
+ MVE_INT = (1 << 0),
+ MVE_FP = (1 << 1)
+ };
+
// Possible HWDiv features.
enum HWDivMode { HWDivThumb = (1 << 0), HWDivARM = (1 << 1) };
@@ -57,6 +61,7 @@ class LLVM_LIBRARY_VISIBILITY ARMTargetInfo : public TargetInfo {
unsigned ArchVersion;
unsigned FPU : 5;
+ unsigned MVE : 2;
unsigned IsAAPCS : 1;
unsigned HWDiv : 2;
@@ -101,6 +106,8 @@ class LLVM_LIBRARY_VISIBILITY ARMTargetInfo : public TargetInfo {
bool isThumb() const;
bool supportsThumb() const;
bool supportsThumb2() const;
+ bool hasMVE() const;
+ bool hasMVEFloat() const;
StringRef getCPUAttr() const;
StringRef getCPUProfile() const;
@@ -117,6 +124,12 @@ public:
StringRef CPU,
const std::vector<std::string> &FeaturesVec) const override;
+ bool isValidFeatureName(StringRef Feature) const override {
+ // We pass soft-float-abi in as a -target-feature, but the backend figures
+ // this out through other means.
+ return Feature != "soft-float-abi";
+ }
+
bool handleTargetFeatures(std::vector<std::string> &Features,
DiagnosticsEngine &Diags) override;
diff --git a/lib/Basic/Targets/AVR.cpp b/lib/Basic/Targets/AVR.cpp
index 9b66449cbca6..d865676700b5 100644
--- a/lib/Basic/Targets/AVR.cpp
+++ b/lib/Basic/Targets/AVR.cpp
@@ -1,9 +1,8 @@
//===--- AVR.cpp - Implement AVR target feature support -------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Basic/Targets/AVR.h b/lib/Basic/Targets/AVR.h
index d595f48e8ef7..94f006ee1b8a 100644
--- a/lib/Basic/Targets/AVR.h
+++ b/lib/Basic/Targets/AVR.h
@@ -1,9 +1,8 @@
//===--- AVR.h - Declare AVR target feature support -------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Basic/Targets/BPF.cpp b/lib/Basic/Targets/BPF.cpp
index cf41a09d76f5..0cf55a58a951 100644
--- a/lib/Basic/Targets/BPF.cpp
+++ b/lib/Basic/Targets/BPF.cpp
@@ -1,9 +1,8 @@
//===--- BPF.cpp - Implement BPF target feature support -------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -21,12 +20,12 @@ using namespace clang::targets;
void BPFTargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
- DefineStd(Builder, "bpf", Opts);
+ Builder.defineMacro("__bpf__");
Builder.defineMacro("__BPF__");
}
static constexpr llvm::StringLiteral ValidCPUNames[] = {"generic", "v1", "v2",
- "probe"};
+ "v3", "probe"};
bool BPFTargetInfo::isValidCPUName(StringRef Name) const {
return llvm::find(ValidCPUNames, Name) != std::end(ValidCPUNames);
diff --git a/lib/Basic/Targets/BPF.h b/lib/Basic/Targets/BPF.h
index 7f97f8189145..79abd8828a2c 100644
--- a/lib/Basic/Targets/BPF.h
+++ b/lib/Basic/Targets/BPF.h
@@ -1,9 +1,8 @@
//===--- BPF.h - Declare BPF target feature support -------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Basic/Targets/Hexagon.cpp b/lib/Basic/Targets/Hexagon.cpp
index 94e1388e381e..be23fd2536e0 100644
--- a/lib/Basic/Targets/Hexagon.cpp
+++ b/lib/Basic/Targets/Hexagon.cpp
@@ -1,9 +1,8 @@
//===--- Hexagon.cpp - Implement Hexagon target feature support -----------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Basic/Targets/Hexagon.h b/lib/Basic/Targets/Hexagon.h
index fb4956a9e53d..25a78c181580 100644
--- a/lib/Basic/Targets/Hexagon.h
+++ b/lib/Basic/Targets/Hexagon.h
@@ -1,9 +1,8 @@
//===--- Hexagon.h - Declare Hexagon target feature support -----*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Basic/Targets/Lanai.cpp b/lib/Basic/Targets/Lanai.cpp
index 0e8030c04e5c..bb1872083c09 100644
--- a/lib/Basic/Targets/Lanai.cpp
+++ b/lib/Basic/Targets/Lanai.cpp
@@ -1,9 +1,8 @@
//===--- Lanai.cpp - Implement Lanai target feature support ---------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Basic/Targets/Lanai.h b/lib/Basic/Targets/Lanai.h
index b9e6dbe04433..e119606384c7 100644
--- a/lib/Basic/Targets/Lanai.h
+++ b/lib/Basic/Targets/Lanai.h
@@ -1,9 +1,8 @@
//===--- Lanai.h - Declare Lanai target feature support ---------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Basic/Targets/Le64.cpp b/lib/Basic/Targets/Le64.cpp
index 5a1c1c88e7e3..cacd10dc8936 100644
--- a/lib/Basic/Targets/Le64.cpp
+++ b/lib/Basic/Targets/Le64.cpp
@@ -1,9 +1,8 @@
//===--- Le64.cpp - Implement Le64 target feature support -----------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Basic/Targets/Le64.h b/lib/Basic/Targets/Le64.h
index 5e18d0498641..253d5681abc2 100644
--- a/lib/Basic/Targets/Le64.h
+++ b/lib/Basic/Targets/Le64.h
@@ -1,9 +1,8 @@
//===--- Le64.h - Declare Le64 target feature support -----------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Basic/Targets/MSP430.cpp b/lib/Basic/Targets/MSP430.cpp
index 86f85a398f14..ef53ee352c32 100644
--- a/lib/Basic/Targets/MSP430.cpp
+++ b/lib/Basic/Targets/MSP430.cpp
@@ -1,9 +1,8 @@
//===--- MSP430.cpp - Implement MSP430 target feature support -------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Basic/Targets/MSP430.h b/lib/Basic/Targets/MSP430.h
index 72aafb9459bd..620f12d2b8e3 100644
--- a/lib/Basic/Targets/MSP430.h
+++ b/lib/Basic/Targets/MSP430.h
@@ -1,9 +1,8 @@
//===--- MSP430.h - Declare MSP430 target feature support -------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -34,6 +33,10 @@ public:
LongWidth = 32;
LongLongWidth = 64;
LongAlign = LongLongAlign = 16;
+ FloatWidth = 32;
+ FloatAlign = 16;
+ DoubleWidth = LongDoubleWidth = 64;
+ DoubleAlign = LongDoubleAlign = 16;
PointerWidth = 16;
PointerAlign = 16;
SuitableAlign = 16;
@@ -52,6 +55,8 @@ public:
return None;
}
+ bool allowsLargerPreferedTypeAlignment() const override { return false; }
+
bool hasFeature(StringRef Feature) const override {
return Feature == "msp430";
}
diff --git a/lib/Basic/Targets/Mips.cpp b/lib/Basic/Targets/Mips.cpp
index d43edeae608f..2cafbe87a996 100644
--- a/lib/Basic/Targets/Mips.cpp
+++ b/lib/Basic/Targets/Mips.cpp
@@ -1,9 +1,8 @@
//===--- Mips.cpp - Implement Mips target feature support -----------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -216,6 +215,14 @@ ArrayRef<Builtin::Info> MipsTargetInfo::getTargetBuiltins() const {
Builtin::FirstTSBuiltin);
}
+unsigned MipsTargetInfo::getUnwindWordWidth() const {
+ return llvm::StringSwitch<unsigned>(ABI)
+ .Case("o32", 32)
+ .Case("n32", 64)
+ .Case("n64", 64)
+ .Default(getPointerWidth(0));
+}
+
bool MipsTargetInfo::validateTarget(DiagnosticsEngine &Diags) const {
// microMIPS64R6 backend was removed.
if (getTriple().isMIPS64() && IsMicromips && (ABI == "n32" || ABI == "n64")) {
diff --git a/lib/Basic/Targets/Mips.h b/lib/Basic/Targets/Mips.h
index d49f49888b0c..474cda84a40e 100644
--- a/lib/Basic/Targets/Mips.h
+++ b/lib/Basic/Targets/Mips.h
@@ -1,9 +1,8 @@
//===--- Mips.h - Declare Mips target feature support -----------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -402,6 +401,8 @@ public:
return (ABI == "n32" || ABI == "n64") || getTargetOpts().ForceEnableInt128;
}
+ unsigned getUnwindWordWidth() const override;
+
bool validateTarget(DiagnosticsEngine &Diags) const override;
};
} // namespace targets
diff --git a/lib/Basic/Targets/NVPTX.cpp b/lib/Basic/Targets/NVPTX.cpp
index ca41c4d14ca3..f69e9d84c701 100644
--- a/lib/Basic/Targets/NVPTX.cpp
+++ b/lib/Basic/Targets/NVPTX.cpp
@@ -1,9 +1,8 @@
//===--- NVPTX.cpp - Implement NVPTX target feature support ---------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -45,6 +44,8 @@ NVPTXTargetInfo::NVPTXTargetInfo(const llvm::Triple &Triple,
if (!Feature.startswith("+ptx"))
continue;
PTXVersion = llvm::StringSwitch<unsigned>(Feature)
+ .Case("+ptx64", 64)
+ .Case("+ptx63", 63)
.Case("+ptx61", 61)
.Case("+ptx60", 60)
.Case("+ptx50", 50)
@@ -118,7 +119,7 @@ NVPTXTargetInfo::NVPTXTargetInfo(const llvm::Triple &Triple,
LongAlign = HostTarget->getLongAlign();
LongLongWidth = HostTarget->getLongLongWidth();
LongLongAlign = HostTarget->getLongLongAlign();
- MinGlobalAlign = HostTarget->getMinGlobalAlign();
+ MinGlobalAlign = HostTarget->getMinGlobalAlign(/* TypeSize = */ 0);
NewAlign = HostTarget->getNewAlign();
DefaultAlignForAttributeAligned =
HostTarget->getDefaultAlignForAttributeAligned();
@@ -190,7 +191,11 @@ void NVPTXTargetInfo::getTargetDefines(const LangOptions &Opts,
case CudaArch::GFX902:
case CudaArch::GFX904:
case CudaArch::GFX906:
+ case CudaArch::GFX908:
case CudaArch::GFX909:
+ case CudaArch::GFX1010:
+ case CudaArch::GFX1011:
+ case CudaArch::GFX1012:
case CudaArch::LAST:
break;
case CudaArch::UNKNOWN:
diff --git a/lib/Basic/Targets/NVPTX.h b/lib/Basic/Targets/NVPTX.h
index 84d466d2f49f..2cdd37ca1b07 100644
--- a/lib/Basic/Targets/NVPTX.h
+++ b/lib/Basic/Targets/NVPTX.h
@@ -1,9 +1,8 @@
//===--- NVPTX.h - Declare NVPTX target feature support ---------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -36,6 +35,16 @@ static const unsigned NVPTXAddrSpaceMap[] = {
3, // cuda_shared
};
+/// The DWARF address class. Taken from
+/// https://docs.nvidia.com/cuda/archive/10.0/ptx-writers-guide-to-interoperability/index.html#cuda-specific-dwarf
+static const int NVPTXDWARFAddrSpaceMap[] = {
+ -1, // Default, opencl_private or opencl_generic - not defined
+ 5, // opencl_global
+ -1,
+ 8, // opencl_local or cuda_shared
+ 4, // opencl_constant or cuda_constant
+};
+
class LLVM_LIBRARY_VISIBILITY NVPTXTargetInfo : public TargetInfo {
static const char *const GCCRegNames[];
static const Builtin::Info BuiltinInfo[];
@@ -125,6 +134,20 @@ public:
Opts.support("cl_khr_local_int32_extended_atomics");
}
+ /// \returns If a target requires an address within a target specific address
+ /// space \p AddressSpace to be converted in order to be used, then return the
+ /// corresponding target specific DWARF address space.
+ ///
+ /// \returns Otherwise return None and no conversion will be emitted in the
+ /// DWARF.
+ Optional<unsigned>
+ getDWARFAddressSpace(unsigned AddressSpace) const override {
+ if (AddressSpace >= llvm::array_lengthof(NVPTXDWARFAddrSpaceMap) ||
+ NVPTXDWARFAddrSpaceMap[AddressSpace] < 0)
+ return llvm::None;
+ return NVPTXDWARFAddrSpaceMap[AddressSpace];
+ }
+
CallingConvCheckResult checkCallingConvention(CallingConv CC) const override {
// CUDA compilations support all of the host's calling conventions.
//
diff --git a/lib/Basic/Targets/OSTargets.cpp b/lib/Basic/Targets/OSTargets.cpp
index 6252a51ef710..72fdb0e7dde8 100644
--- a/lib/Basic/Targets/OSTargets.cpp
+++ b/lib/Basic/Targets/OSTargets.cpp
@@ -1,9 +1,8 @@
//===--- OSTargets.cpp - Implement OS target feature support --------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -135,5 +134,84 @@ void getDarwinDefines(MacroBuilder &Builder, const LangOptions &Opts,
PlatformMinVersion = VersionTuple(Maj, Min, Rev);
}
+
+static void addMinGWDefines(const llvm::Triple &Triple, const LangOptions &Opts,
+ MacroBuilder &Builder) {
+ DefineStd(Builder, "WIN32", Opts);
+ DefineStd(Builder, "WINNT", Opts);
+ if (Triple.isArch64Bit()) {
+ DefineStd(Builder, "WIN64", Opts);
+ Builder.defineMacro("__MINGW64__");
+ }
+ Builder.defineMacro("__MSVCRT__");
+ Builder.defineMacro("__MINGW32__");
+ addCygMingDefines(Opts, Builder);
+}
+
+static void addVisualCDefines(const LangOptions &Opts, MacroBuilder &Builder) {
+ if (Opts.CPlusPlus) {
+ if (Opts.RTTIData)
+ Builder.defineMacro("_CPPRTTI");
+
+ if (Opts.CXXExceptions)
+ Builder.defineMacro("_CPPUNWIND");
+ }
+
+ if (Opts.Bool)
+ Builder.defineMacro("__BOOL_DEFINED");
+
+ if (!Opts.CharIsSigned)
+ Builder.defineMacro("_CHAR_UNSIGNED");
+
+ // FIXME: POSIXThreads isn't exactly the option this should be defined for,
+ // but it works for now.
+ if (Opts.POSIXThreads)
+ Builder.defineMacro("_MT");
+
+ if (Opts.MSCompatibilityVersion) {
+ Builder.defineMacro("_MSC_VER",
+ Twine(Opts.MSCompatibilityVersion / 100000));
+ Builder.defineMacro("_MSC_FULL_VER", Twine(Opts.MSCompatibilityVersion));
+ // FIXME We cannot encode the revision information into 32-bits
+ Builder.defineMacro("_MSC_BUILD", Twine(1));
+
+ if (Opts.CPlusPlus11 && Opts.isCompatibleWithMSVC(LangOptions::MSVC2015))
+ Builder.defineMacro("_HAS_CHAR16_T_LANGUAGE_SUPPORT", Twine(1));
+
+ if (Opts.isCompatibleWithMSVC(LangOptions::MSVC2015)) {
+ if (Opts.CPlusPlus2a)
+ Builder.defineMacro("_MSVC_LANG", "201704L");
+ else if (Opts.CPlusPlus17)
+ Builder.defineMacro("_MSVC_LANG", "201703L");
+ else if (Opts.CPlusPlus14)
+ Builder.defineMacro("_MSVC_LANG", "201402L");
+ }
+ }
+
+ if (Opts.MicrosoftExt) {
+ Builder.defineMacro("_MSC_EXTENSIONS");
+
+ if (Opts.CPlusPlus11) {
+ Builder.defineMacro("_RVALUE_REFERENCES_V2_SUPPORTED");
+ Builder.defineMacro("_RVALUE_REFERENCES_SUPPORTED");
+ Builder.defineMacro("_NATIVE_NULLPTR_SUPPORTED");
+ }
+ }
+
+ Builder.defineMacro("_INTEGRAL_MAX_BITS", "64");
+}
+
+void addWindowsDefines(const llvm::Triple &Triple, const LangOptions &Opts,
+ MacroBuilder &Builder) {
+ Builder.defineMacro("_WIN32");
+ if (Triple.isArch64Bit())
+ Builder.defineMacro("_WIN64");
+ if (Triple.isWindowsGNUEnvironment())
+ addMinGWDefines(Triple, Opts, Builder);
+ else if (Triple.isKnownWindowsMSVCEnvironment() ||
+ (Triple.isWindowsItaniumEnvironment() && Opts.MSVCCompat))
+ addVisualCDefines(Opts, Builder);
+}
+
} // namespace targets
} // namespace clang
diff --git a/lib/Basic/Targets/OSTargets.h b/lib/Basic/Targets/OSTargets.h
index 085efa02cc5f..8542311ffa41 100644
--- a/lib/Basic/Targets/OSTargets.h
+++ b/lib/Basic/Targets/OSTargets.h
@@ -1,9 +1,8 @@
//===--- OSTargets.h - Declare OS target feature support --------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -134,6 +133,37 @@ public:
/// attribute on declarations that can be dynamically replaced.
bool hasProtectedVisibility() const override { return false; }
+ unsigned getExnObjectAlignment() const override {
+ // Older versions of libc++abi guarantee an alignment of only 8-bytes for
+ // exception objects because of a bug in __cxa_exception that was
+ // eventually fixed in r319123.
+ llvm::VersionTuple MinVersion;
+ const llvm::Triple &T = this->getTriple();
+
+ // Compute the earliest OS versions that have the fix to libc++abi.
+ switch (T.getOS()) {
+ case llvm::Triple::Darwin:
+ case llvm::Triple::MacOSX: // Earliest supporting version is 10.14.
+ MinVersion = llvm::VersionTuple(10U, 14U);
+ break;
+ case llvm::Triple::IOS:
+ case llvm::Triple::TvOS: // Earliest supporting version is 12.0.0.
+ MinVersion = llvm::VersionTuple(12U);
+ break;
+ case llvm::Triple::WatchOS: // Earliest supporting version is 5.0.0.
+ MinVersion = llvm::VersionTuple(5U);
+ break;
+ default:
+ llvm_unreachable("Unexpected OS");
+ }
+
+ unsigned Major, Minor, Micro;
+ T.getOSVersion(Major, Minor, Micro);
+ if (llvm::VersionTuple(Major, Minor, Micro) < MinVersion)
+ return 64;
+ return OSTargetInfo<Target>::getExnObjectAlignment();
+ }
+
TargetInfo::IntType getLeastIntTypeByWidth(unsigned BitWidth,
bool IsSigned) const final {
// Darwin uses `long long` for `int_least64_t` and `int_fast64_t`.
@@ -602,7 +632,11 @@ protected:
public:
SolarisTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
: OSTargetInfo<Target>(Triple, Opts) {
- // FIXME: WIntType should be SignedLong
+ if (this->PointerWidth == 64) {
+ this->WCharType = this->WIntType = this->SignedInt;
+ } else {
+ this->WCharType = this->WIntType = this->SignedLong;
+ }
switch (Triple.getArch()) {
default:
break;
@@ -614,71 +648,79 @@ public:
}
};
-// Windows target
+// AIX Target
template <typename Target>
-class LLVM_LIBRARY_VISIBILITY WindowsTargetInfo : public OSTargetInfo<Target> {
+class AIXTargetInfo : public OSTargetInfo<Target> {
protected:
void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
MacroBuilder &Builder) const override {
- Builder.defineMacro("_WIN32");
- if (Triple.isArch64Bit())
- Builder.defineMacro("_WIN64");
- if (Triple.isWindowsGNUEnvironment())
- addMinGWDefines(Triple, Opts, Builder);
-
- }
- void getVisualStudioDefines(const LangOptions &Opts,
- MacroBuilder &Builder) const {
- if (Opts.CPlusPlus) {
- if (Opts.RTTIData)
- Builder.defineMacro("_CPPRTTI");
-
- if (Opts.CXXExceptions)
- Builder.defineMacro("_CPPUNWIND");
+ DefineStd(Builder, "unix", Opts);
+ Builder.defineMacro("_IBMR2");
+ Builder.defineMacro("_POWER");
+
+ Builder.defineMacro("_AIX");
+
+ unsigned Major, Minor, Micro;
+ Triple.getOSVersion(Major, Minor, Micro);
+
+ // Define AIX OS-Version Macros.
+ // Includes logic for legacy versions of AIX; no specific intent to support.
+ std::pair<int, int> OsVersion = {Major, Minor};
+ if (OsVersion >= std::make_pair(3, 2)) Builder.defineMacro("_AIX32");
+ if (OsVersion >= std::make_pair(4, 1)) Builder.defineMacro("_AIX41");
+ if (OsVersion >= std::make_pair(4, 3)) Builder.defineMacro("_AIX43");
+ if (OsVersion >= std::make_pair(5, 0)) Builder.defineMacro("_AIX50");
+ if (OsVersion >= std::make_pair(5, 1)) Builder.defineMacro("_AIX51");
+ if (OsVersion >= std::make_pair(5, 2)) Builder.defineMacro("_AIX52");
+ if (OsVersion >= std::make_pair(5, 3)) Builder.defineMacro("_AIX53");
+ if (OsVersion >= std::make_pair(6, 1)) Builder.defineMacro("_AIX61");
+ if (OsVersion >= std::make_pair(7, 1)) Builder.defineMacro("_AIX71");
+ if (OsVersion >= std::make_pair(7, 2)) Builder.defineMacro("_AIX72");
+
+ // FIXME: Do not define _LONG_LONG when -fno-long-long is specified.
+ Builder.defineMacro("_LONG_LONG");
+
+ if (Opts.POSIXThreads) {
+ Builder.defineMacro("_THREAD_SAFE");
}
- if (Opts.Bool)
- Builder.defineMacro("__BOOL_DEFINED");
+ if (this->PointerWidth == 64) {
+ Builder.defineMacro("__64BIT__");
+ }
- if (!Opts.CharIsSigned)
- Builder.defineMacro("_CHAR_UNSIGNED");
+ // Define _WCHAR_T when it is a fundamental type
+ // (i.e., for C++ without -fno-wchar).
+ if (Opts.CPlusPlus && Opts.WChar) {
+ Builder.defineMacro("_WCHAR_T");
+ }
+ }
- // FIXME: POSIXThreads isn't exactly the option this should be defined for,
- // but it works for now.
- if (Opts.POSIXThreads)
- Builder.defineMacro("_MT");
-
- if (Opts.MSCompatibilityVersion) {
- Builder.defineMacro("_MSC_VER",
- Twine(Opts.MSCompatibilityVersion / 100000));
- Builder.defineMacro("_MSC_FULL_VER", Twine(Opts.MSCompatibilityVersion));
- // FIXME We cannot encode the revision information into 32-bits
- Builder.defineMacro("_MSC_BUILD", Twine(1));
-
- if (Opts.CPlusPlus11 && Opts.isCompatibleWithMSVC(LangOptions::MSVC2015))
- Builder.defineMacro("_HAS_CHAR16_T_LANGUAGE_SUPPORT", Twine(1));
-
- if (Opts.isCompatibleWithMSVC(LangOptions::MSVC2015)) {
- if (Opts.CPlusPlus2a)
- Builder.defineMacro("_MSVC_LANG", "201704L");
- else if (Opts.CPlusPlus17)
- Builder.defineMacro("_MSVC_LANG", "201703L");
- else if (Opts.CPlusPlus14)
- Builder.defineMacro("_MSVC_LANG", "201402L");
- }
+public:
+ AIXTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
+ : OSTargetInfo<Target>(Triple, Opts) {
+ if (this->PointerWidth == 64) {
+ this->WCharType = this->UnsignedInt;
+ } else {
+ this->WCharType = this->UnsignedShort;
}
+ this->UseZeroLengthBitfieldAlignment = true;
+ }
- if (Opts.MicrosoftExt) {
- Builder.defineMacro("_MSC_EXTENSIONS");
+ // AIX sets FLT_EVAL_METHOD to be 1.
+ unsigned getFloatEvalMethod() const override { return 1; }
+ bool hasInt128Type() const override { return false; }
+};
- if (Opts.CPlusPlus11) {
- Builder.defineMacro("_RVALUE_REFERENCES_V2_SUPPORTED");
- Builder.defineMacro("_RVALUE_REFERENCES_SUPPORTED");
- Builder.defineMacro("_NATIVE_NULLPTR_SUPPORTED");
- }
- }
+void addWindowsDefines(const llvm::Triple &Triple, const LangOptions &Opts,
+ MacroBuilder &Builder);
- Builder.defineMacro("_INTEGRAL_MAX_BITS", "64");
+// Windows target
+template <typename Target>
+class LLVM_LIBRARY_VISIBILITY WindowsTargetInfo : public OSTargetInfo<Target> {
+protected:
+ void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+ MacroBuilder &Builder) const override {
+ addWindowsDefines(Triple, Opts, Builder);
}
public:
@@ -764,14 +806,17 @@ public:
template <typename Target>
class LLVM_LIBRARY_VISIBILITY WebAssemblyOSTargetInfo
: public OSTargetInfo<Target> {
+protected:
void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
- MacroBuilder &Builder) const final {
+ MacroBuilder &Builder) const {
// A common platform macro.
if (Opts.POSIXThreads)
Builder.defineMacro("_REENTRANT");
// Follow g++ convention and predefine _GNU_SOURCE for C++.
if (Opts.CPlusPlus)
Builder.defineMacro("_GNU_SOURCE");
+ // Indicate that we have __float128.
+ Builder.defineMacro("__FLOAT128__");
}
public:
@@ -780,7 +825,38 @@ public:
: OSTargetInfo<Target>(Triple, Opts) {
this->MCountName = "__mcount";
this->TheCXXABI.set(TargetCXXABI::WebAssembly);
+ this->HasFloat128 = true;
+ }
+};
+
+// WASI target
+template <typename Target>
+class LLVM_LIBRARY_VISIBILITY WASITargetInfo
+ : public WebAssemblyOSTargetInfo<Target> {
+ void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+ MacroBuilder &Builder) const final {
+ WebAssemblyOSTargetInfo<Target>::getOSDefines(Opts, Triple, Builder);
+ Builder.defineMacro("__wasi__");
}
+
+public:
+ explicit WASITargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
+ : WebAssemblyOSTargetInfo<Target>(Triple, Opts) {}
+};
+
+// Emscripten target
+template <typename Target>
+class LLVM_LIBRARY_VISIBILITY EmscriptenTargetInfo
+ : public WebAssemblyOSTargetInfo<Target> {
+ void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+ MacroBuilder &Builder) const final {
+ WebAssemblyOSTargetInfo<Target>::getOSDefines(Opts, Triple, Builder);
+ Builder.defineMacro("__EMSCRIPTEN__");
+ }
+
+public:
+ explicit EmscriptenTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
+ : WebAssemblyOSTargetInfo<Target>(Triple, Opts) {}
};
} // namespace targets
diff --git a/lib/Basic/Targets/PNaCl.cpp b/lib/Basic/Targets/PNaCl.cpp
index b9128c2716e8..60e9467193a8 100644
--- a/lib/Basic/Targets/PNaCl.cpp
+++ b/lib/Basic/Targets/PNaCl.cpp
@@ -1,9 +1,8 @@
//===--- PNaCl.cpp - Implement PNaCl target feature support ---------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Basic/Targets/PNaCl.h b/lib/Basic/Targets/PNaCl.h
index 922944e85ca2..ab4abf9fc567 100644
--- a/lib/Basic/Targets/PNaCl.h
+++ b/lib/Basic/Targets/PNaCl.h
@@ -1,9 +1,8 @@
//===--- PNaCl.h - Declare PNaCl target feature support ---------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Basic/Targets/PPC.cpp b/lib/Basic/Targets/PPC.cpp
index 6cfbed1713e1..2a773d999286 100644
--- a/lib/Basic/Targets/PPC.cpp
+++ b/lib/Basic/Targets/PPC.cpp
@@ -1,9 +1,8 @@
//===--- PPC.cpp - Implement PPC target feature support -------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -31,6 +30,7 @@ const Builtin::Info PPCTargetInfo::BuiltinInfo[] = {
/// configured set of features.
bool PPCTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
DiagnosticsEngine &Diags) {
+ FloatABI = HardFloat;
for (const auto &Feature : Features) {
if (Feature == "+altivec") {
HasAltivec = true;
@@ -54,6 +54,8 @@ bool PPCTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasFloat128 = true;
} else if (Feature == "+power9-vector") {
HasP9Vector = true;
+ } else if (Feature == "-hard-float") {
+ FloatABI = SoftFloat;
}
// TODO: Finish this list and add an assert that we've handled them
// all.
@@ -101,7 +103,9 @@ void PPCTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("_CALL_LINUX", "1");
// Subtarget options.
- Builder.defineMacro("__NATURAL_ALIGNMENT__");
+ if (!getTriple().isOSAIX()){
+ Builder.defineMacro("__NATURAL_ALIGNMENT__");
+ }
Builder.defineMacro("__REGISTER_PREFIX__", "");
// FIXME: Should be controlled by command line option.
@@ -213,31 +217,26 @@ void PPCTargetInfo::getTargetDefines(const LangOptions &Opts,
static bool ppcUserFeaturesCheck(DiagnosticsEngine &Diags,
const std::vector<std::string> &FeaturesVec) {
- if (std::find(FeaturesVec.begin(), FeaturesVec.end(), "-vsx") !=
- FeaturesVec.end()) {
- if (std::find(FeaturesVec.begin(), FeaturesVec.end(), "+power8-vector") !=
- FeaturesVec.end()) {
+ if (llvm::find(FeaturesVec, "-vsx") != FeaturesVec.end()) {
+ if (llvm::find(FeaturesVec, "+power8-vector") != FeaturesVec.end()) {
Diags.Report(diag::err_opt_not_valid_with_opt) << "-mpower8-vector"
<< "-mno-vsx";
return false;
}
- if (std::find(FeaturesVec.begin(), FeaturesVec.end(), "+direct-move") !=
- FeaturesVec.end()) {
+ if (llvm::find(FeaturesVec, "+direct-move") != FeaturesVec.end()) {
Diags.Report(diag::err_opt_not_valid_with_opt) << "-mdirect-move"
<< "-mno-vsx";
return false;
}
- if (std::find(FeaturesVec.begin(), FeaturesVec.end(), "+float128") !=
- FeaturesVec.end()) {
+ if (llvm::find(FeaturesVec, "+float128") != FeaturesVec.end()) {
Diags.Report(diag::err_opt_not_valid_with_opt) << "-mfloat128"
<< "-mno-vsx";
return false;
}
- if (std::find(FeaturesVec.begin(), FeaturesVec.end(), "+power9-vector") !=
- FeaturesVec.end()) {
+ if (llvm::find(FeaturesVec, "+power9-vector") != FeaturesVec.end()) {
Diags.Report(diag::err_opt_not_valid_with_opt) << "-mpower9-vector"
<< "-mno-vsx";
return false;
@@ -310,8 +309,7 @@ bool PPCTargetInfo::initFeatureMap(
return false;
if (!(ArchDefs & ArchDefinePwr9) && (ArchDefs & ArchDefinePpcgr) &&
- std::find(FeaturesVec.begin(), FeaturesVec.end(), "+float128") !=
- FeaturesVec.end()) {
+ llvm::find(FeaturesVec, "+float128") != FeaturesVec.end()) {
// We have __float128 on PPC but not power 9 and above.
Diags.Report(diag::err_opt_not_valid_with_opt) << "-mfloat128" << CPU;
return false;
@@ -467,6 +465,10 @@ void PPCTargetInfo::adjust(LangOptions &Opts) {
if (HasAltivec)
Opts.AltiVec = 1;
TargetInfo::adjust(Opts);
+ if (LongDoubleFormat != &llvm::APFloat::IEEEdouble())
+ LongDoubleFormat = Opts.PPCIEEELongDouble
+ ? &llvm::APFloat::IEEEquad()
+ : &llvm::APFloat::PPCDoubleDouble();
}
ArrayRef<Builtin::Info> PPCTargetInfo::getTargetBuiltins() const {
diff --git a/lib/Basic/Targets/PPC.h b/lib/Basic/Targets/PPC.h
index 058970a0e098..6e5df097921b 100644
--- a/lib/Basic/Targets/PPC.h
+++ b/lib/Basic/Targets/PPC.h
@@ -1,9 +1,8 @@
//===--- PPC.h - Declare PPC target feature support -------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -54,6 +53,7 @@ class LLVM_LIBRARY_VISIBILITY PPCTargetInfo : public TargetInfo {
static const char *const GCCRegNames[];
static const TargetInfo::GCCRegAlias GCCRegAliases[];
std::string CPU;
+ enum PPCFloatABI { HardFloat, SoftFloat } FloatABI;
// Target cpu features.
bool HasAltivec = false;
@@ -132,19 +132,18 @@ public:
ArchDefinePwr5 | ArchDefinePwr4 | ArchDefinePpcgr |
ArchDefinePpcsq)
.Cases("power7", "pwr7",
- ArchDefinePwr7 | ArchDefinePwr6x | ArchDefinePwr6 |
- ArchDefinePwr5x | ArchDefinePwr5 | ArchDefinePwr4 |
- ArchDefinePpcgr | ArchDefinePpcsq)
+ ArchDefinePwr7 | ArchDefinePwr6 | ArchDefinePwr5x |
+ ArchDefinePwr5 | ArchDefinePwr4 | ArchDefinePpcgr |
+ ArchDefinePpcsq)
// powerpc64le automatically defaults to at least power8.
.Cases("power8", "pwr8", "ppc64le",
- ArchDefinePwr8 | ArchDefinePwr7 | ArchDefinePwr6x |
- ArchDefinePwr6 | ArchDefinePwr5x | ArchDefinePwr5 |
- ArchDefinePwr4 | ArchDefinePpcgr | ArchDefinePpcsq)
+ ArchDefinePwr8 | ArchDefinePwr7 | ArchDefinePwr6 |
+ ArchDefinePwr5x | ArchDefinePwr5 | ArchDefinePwr4 |
+ ArchDefinePpcgr | ArchDefinePpcsq)
.Cases("power9", "pwr9",
- ArchDefinePwr9 | ArchDefinePwr8 | ArchDefinePwr7 |
- ArchDefinePwr6x | ArchDefinePwr6 | ArchDefinePwr5x |
- ArchDefinePwr5 | ArchDefinePwr4 | ArchDefinePpcgr |
- ArchDefinePpcsq)
+ ArchDefinePwr9 | ArchDefinePwr8 | ArchDefinePwr7 |
+ ArchDefinePwr6 | ArchDefinePwr5x | ArchDefinePwr5 |
+ ArchDefinePwr4 | ArchDefinePpcgr | ArchDefinePpcsq)
.Default(ArchDefineNone);
}
return CPUKnown;
@@ -185,8 +184,12 @@ public:
return false;
case 'O': // Zero
break;
- case 'b': // Base register
case 'f': // Floating point register
+ // Don't use floating point registers on soft float ABI.
+ if (FloatABI == SoftFloat)
+ return false;
+ LLVM_FALLTHROUGH;
+ case 'b': // Base register
Info.setAllowsRegister();
break;
// FIXME: The following are added to allow parsing.
@@ -194,13 +197,18 @@ public:
// Also, is more specific checking needed? I.e. specific registers?
case 'd': // Floating point register (containing 64-bit value)
case 'v': // Altivec vector register
+ // Don't use floating point and altivec vector registers
+ // on soft float ABI
+ if (FloatABI == SoftFloat)
+ return false;
Info.setAllowsRegister();
break;
case 'w':
switch (Name[1]) {
case 'd': // VSX vector register to hold vector double data
case 'f': // VSX vector register to hold vector float data
- case 's': // VSX vector register to hold scalar float data
+ case 's': // VSX vector register to hold scalar double data
+ case 'w': // VSX vector register to hold scalar double data
case 'a': // Any VSX register
case 'c': // An individual CR bit
case 'i': // FP or VSX register to hold 64-bit integers data
@@ -306,11 +314,14 @@ public:
bool hasSjLjLowering() const override { return true; }
- bool useFloat128ManglingForLongDouble() const override {
- return LongDoubleWidth == 128 &&
- LongDoubleFormat == &llvm::APFloat::PPCDoubleDouble() &&
- getTriple().isOSBinFormatELF();
+ const char *getLongDoubleMangling() const override {
+ if (LongDoubleWidth == 64)
+ return "e";
+ return LongDoubleFormat == &llvm::APFloat::PPCDoubleDouble()
+ ? "g"
+ : "u9__ieee128";
}
+ const char *getFloat128Mangling() const override { return "u9__ieee128"; }
};
class LLVM_LIBRARY_VISIBILITY PPC32TargetInfo : public PPCTargetInfo {
@@ -327,11 +338,18 @@ public:
PtrDiffType = SignedInt;
IntPtrType = SignedInt;
break;
+ case llvm::Triple::AIX:
+ SizeType = UnsignedLong;
+ PtrDiffType = SignedLong;
+ IntPtrType = SignedLong;
+ SuitableAlign = 64;
+ break;
default:
break;
}
- if (getTriple().isOSFreeBSD()) {
+ if (Triple.isOSFreeBSD() || Triple.isOSNetBSD() || Triple.isOSOpenBSD() ||
+ Triple.getOS() == llvm::Triple::AIX || Triple.isMusl()) {
LongDoubleWidth = LongDoubleAlign = 64;
LongDoubleFormat = &llvm::APFloat::IEEEdouble();
}
@@ -361,16 +379,16 @@ public:
ABI = "elfv2";
} else {
resetDataLayout("E-m:e-i64:64-n32:64");
- ABI = "elfv1";
+ ABI = Triple.getEnvironment() == llvm::Triple::ELFv2 ? "elfv2" : "elfv1";
}
- switch (getTriple().getOS()) {
- case llvm::Triple::FreeBSD:
+ if (Triple.getOS() == llvm::Triple::AIX)
+ SuitableAlign = 64;
+
+ if (Triple.isOSFreeBSD() || Triple.getOS() == llvm::Triple::AIX ||
+ Triple.isMusl()) {
LongDoubleWidth = LongDoubleAlign = 64;
LongDoubleFormat = &llvm::APFloat::IEEEdouble();
- break;
- default:
- break;
}
// PPC64 supports atomics up to 8 bytes.
@@ -427,6 +445,21 @@ public:
}
};
+class LLVM_LIBRARY_VISIBILITY AIXPPC32TargetInfo :
+ public AIXTargetInfo<PPC32TargetInfo> {
+public:
+ using AIXTargetInfo::AIXTargetInfo;
+ BuiltinVaListKind getBuiltinVaListKind() const override {
+ return TargetInfo::CharPtrBuiltinVaList;
+ }
+};
+
+class LLVM_LIBRARY_VISIBILITY AIXPPC64TargetInfo :
+ public AIXTargetInfo<PPC64TargetInfo> {
+public:
+ using AIXTargetInfo::AIXTargetInfo;
+};
+
} // namespace targets
} // namespace clang
#endif // LLVM_CLANG_LIB_BASIC_TARGETS_PPC_H
diff --git a/lib/Basic/Targets/RISCV.cpp b/lib/Basic/Targets/RISCV.cpp
index 7eb5e6a686a9..f800bb0b25da 100644
--- a/lib/Basic/Targets/RISCV.cpp
+++ b/lib/Basic/Targets/RISCV.cpp
@@ -1,9 +1,8 @@
//===--- RISCV.cpp - Implement RISCV target feature support ---------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -32,7 +31,7 @@ ArrayRef<TargetInfo::GCCRegAlias> RISCVTargetInfo::getGCCRegAliases() const {
{{"zero"}, "x0"}, {{"ra"}, "x1"}, {{"sp"}, "x2"}, {{"gp"}, "x3"},
{{"tp"}, "x4"}, {{"t0"}, "x5"}, {{"t1"}, "x6"}, {{"t2"}, "x7"},
{{"s0"}, "x8"}, {{"s1"}, "x9"}, {{"a0"}, "x10"}, {{"a1"}, "x11"},
- {{"a2"}, "x12"}, {{"a3"}, "x13"}, {{"a4"}, "x15"}, {{"a5"}, "x15"},
+ {{"a2"}, "x12"}, {{"a3"}, "x13"}, {{"a4"}, "x14"}, {{"a5"}, "x15"},
{{"a6"}, "x16"}, {{"a7"}, "x17"}, {{"s2"}, "x18"}, {{"s3"}, "x19"},
{{"s4"}, "x20"}, {{"s5"}, "x21"}, {{"s6"}, "x22"}, {{"s7"}, "x23"},
{{"s8"}, "x24"}, {{"s9"}, "x25"}, {{"s10"}, "x26"}, {{"s11"}, "x27"},
@@ -40,6 +39,26 @@ ArrayRef<TargetInfo::GCCRegAlias> RISCVTargetInfo::getGCCRegAliases() const {
return llvm::makeArrayRef(GCCRegAliases);
}
+bool RISCVTargetInfo::validateAsmConstraint(
+ const char *&Name, TargetInfo::ConstraintInfo &Info) const {
+ switch (*Name) {
+ default:
+ return false;
+ case 'I':
+ // A 12-bit signed immediate.
+ Info.setRequiresImmediate(-2048, 2047);
+ return true;
+ case 'J':
+ // Integer zero.
+ Info.setRequiresImmediate(0);
+ return true;
+ case 'K':
+ // A 5-bit unsigned immediate for CSR access instructions.
+ Info.setRequiresImmediate(0, 31);
+ return true;
+ }
+}
+
void RISCVTargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
Builder.defineMacro("__ELF__");
diff --git a/lib/Basic/Targets/RISCV.h b/lib/Basic/Targets/RISCV.h
index f83aae539391..bc814b79ce51 100644
--- a/lib/Basic/Targets/RISCV.h
+++ b/lib/Basic/Targets/RISCV.h
@@ -1,9 +1,8 @@
//===--- RISCV.h - Declare RISCV target feature support ---------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -36,7 +35,6 @@ public:
RISCVTargetInfo(const llvm::Triple &Triple, const TargetOptions &)
: TargetInfo(Triple), HasM(false), HasA(false), HasF(false),
HasD(false), HasC(false) {
- TLSSupported = false;
LongDoubleWidth = 128;
LongDoubleAlign = 128;
LongDoubleFormat = &llvm::APFloat::IEEEquad();
@@ -59,12 +57,19 @@ public:
ArrayRef<const char *> getGCCRegNames() const override;
+ int getEHDataRegisterNumber(unsigned RegNo) const override {
+ if (RegNo == 0)
+ return 10;
+ else if (RegNo == 1)
+ return 11;
+ else
+ return -1;
+ }
+
ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override;
bool validateAsmConstraint(const char *&Name,
- TargetInfo::ConstraintInfo &Info) const override {
- return false;
- }
+ TargetInfo::ConstraintInfo &Info) const override;
bool hasFeature(StringRef Feature) const override;
diff --git a/lib/Basic/Targets/SPIR.cpp b/lib/Basic/Targets/SPIR.cpp
index 304d904368ca..a9b815d13bc1 100644
--- a/lib/Basic/Targets/SPIR.cpp
+++ b/lib/Basic/Targets/SPIR.cpp
@@ -1,9 +1,8 @@
//===--- SPIR.cpp - Implement SPIR target feature support -----------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Basic/Targets/SPIR.h b/lib/Basic/Targets/SPIR.h
index 9815292fc276..6023c868dbdc 100644
--- a/lib/Basic/Targets/SPIR.h
+++ b/lib/Basic/Targets/SPIR.h
@@ -1,9 +1,8 @@
//===--- SPIR.h - Declare SPIR target feature support -----------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -48,6 +47,7 @@ public:
AddrSpaceMap = &SPIRAddrSpaceMap;
UseAddrSpaceMapMangling = true;
HasLegalHalfType = true;
+ HasFloat16 = true;
// Define available target features
// These must be defined in sorted order!
NoAsmVariants = true;
diff --git a/lib/Basic/Targets/Sparc.cpp b/lib/Basic/Targets/Sparc.cpp
index ee4f309363af..13aa964d4716 100644
--- a/lib/Basic/Targets/Sparc.cpp
+++ b/lib/Basic/Targets/Sparc.cpp
@@ -1,9 +1,8 @@
//===--- Sparc.cpp - Implement Sparc target feature support ---------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Basic/Targets/Sparc.h b/lib/Basic/Targets/Sparc.h
index 5ae305bffb43..963192a4634f 100644
--- a/lib/Basic/Targets/Sparc.h
+++ b/lib/Basic/Targets/Sparc.h
@@ -1,9 +1,8 @@
//===--- Sparc.h - declare sparc target feature support ---------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -40,7 +39,7 @@ public:
bool handleTargetFeatures(std::vector<std::string> &Features,
DiagnosticsEngine &Diags) override {
// Check if software floating point is enabled
- auto Feature = std::find(Features.begin(), Features.end(), "+soft-float");
+ auto Feature = llvm::find(Features, "+soft-float");
if (Feature != Features.end()) {
SoftFloat = true;
}
diff --git a/lib/Basic/Targets/SystemZ.cpp b/lib/Basic/Targets/SystemZ.cpp
index 6f06f1fc760c..d86928a6333b 100644
--- a/lib/Basic/Targets/SystemZ.cpp
+++ b/lib/Basic/Targets/SystemZ.cpp
@@ -1,9 +1,8 @@
//===--- SystemZ.cpp - Implement SystemZ target feature support -----------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -92,7 +91,8 @@ static constexpr ISANameRevision ISARevisions[] = {
{{"arch9"}, 9}, {{"z196"}, 9},
{{"arch10"}, 10}, {{"zEC12"}, 10},
{{"arch11"}, 11}, {{"z13"}, 11},
- {{"arch12"}, 12}, {{"z14"}, 12}
+ {{"arch12"}, 12}, {{"z14"}, 12},
+ {{"arch13"}, 13},
};
int SystemZTargetInfo::getISARevision(StringRef Name) const {
@@ -119,6 +119,7 @@ bool SystemZTargetInfo::hasFeature(StringRef Feature) const {
.Case("arch10", ISARevision >= 10)
.Case("arch11", ISARevision >= 11)
.Case("arch12", ISARevision >= 12)
+ .Case("arch13", ISARevision >= 13)
.Case("htm", HasTransactionalExecution)
.Case("vx", HasVector)
.Default(false);
@@ -143,7 +144,7 @@ void SystemZTargetInfo::getTargetDefines(const LangOptions &Opts,
if (HasVector)
Builder.defineMacro("__VX__");
if (Opts.ZVector)
- Builder.defineMacro("__VEC__", "10302");
+ Builder.defineMacro("__VEC__", "10303");
}
ArrayRef<Builtin::Info> SystemZTargetInfo::getTargetBuiltins() const {
diff --git a/lib/Basic/Targets/SystemZ.h b/lib/Basic/Targets/SystemZ.h
index 842316005ed9..e751806f4747 100644
--- a/lib/Basic/Targets/SystemZ.h
+++ b/lib/Basic/Targets/SystemZ.h
@@ -1,9 +1,8 @@
//===--- SystemZ.h - Declare SystemZ target feature support -----*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -101,6 +100,8 @@ public:
Features["vector"] = true;
if (ISARevision >= 12)
Features["vector-enhancements-1"] = true;
+ if (ISARevision >= 13)
+ Features["vector-enhancements-2"] = true;
return TargetInfo::initFeatureMap(Features, Diags, CPU, FeaturesVec);
}
@@ -142,7 +143,7 @@ public:
return "";
}
- bool useFloat128ManglingForLongDouble() const override { return true; }
+ const char *getLongDoubleMangling() const override { return "g"; }
};
} // namespace targets
} // namespace clang
diff --git a/lib/Basic/Targets/TCE.cpp b/lib/Basic/Targets/TCE.cpp
index bf89c1dc549e..91194b568a09 100644
--- a/lib/Basic/Targets/TCE.cpp
+++ b/lib/Basic/Targets/TCE.cpp
@@ -1,9 +1,8 @@
//===--- TCE.cpp - Implement TCE target feature support -------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Basic/Targets/TCE.h b/lib/Basic/Targets/TCE.h
index be43bed98d80..967ef5c59ee5 100644
--- a/lib/Basic/Targets/TCE.h
+++ b/lib/Basic/Targets/TCE.h
@@ -1,9 +1,8 @@
//===--- TCE.h - Declare TCE target feature support -------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Basic/Targets/WebAssembly.cpp b/lib/Basic/Targets/WebAssembly.cpp
index 2fdc84bb8cc8..b16442b99b62 100644
--- a/lib/Basic/Targets/WebAssembly.cpp
+++ b/lib/Basic/Targets/WebAssembly.cpp
@@ -1,9 +1,8 @@
//===--- WebAssembly.cpp - Implement WebAssembly target feature support ---===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -41,6 +40,11 @@ bool WebAssemblyTargetInfo::hasFeature(StringRef Feature) const {
.Case("nontrapping-fptoint", HasNontrappingFPToInt)
.Case("sign-ext", HasSignExt)
.Case("exception-handling", HasExceptionHandling)
+ .Case("bulk-memory", HasBulkMemory)
+ .Case("atomics", HasAtomics)
+ .Case("mutable-globals", HasMutableGlobals)
+ .Case("multivalue", HasMultivalue)
+ .Case("tail-call", HasTailCall)
.Default(false);
}
@@ -60,6 +64,22 @@ void WebAssemblyTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__wasm_simd128__");
if (SIMDLevel >= UnimplementedSIMD128)
Builder.defineMacro("__wasm_unimplemented_simd128__");
+ if (HasNontrappingFPToInt)
+ Builder.defineMacro("__wasm_nontrapping_fptoint__");
+ if (HasSignExt)
+ Builder.defineMacro("__wasm_sign_ext__");
+ if (HasExceptionHandling)
+ Builder.defineMacro("__wasm_exception_handling__");
+ if (HasBulkMemory)
+ Builder.defineMacro("__wasm_bulk_memory__");
+ if (HasAtomics)
+ Builder.defineMacro("__wasm_atomics__");
+ if (HasMutableGlobals)
+ Builder.defineMacro("__wasm_mutable_globals__");
+ if (HasMultivalue)
+ Builder.defineMacro("__wasm_multivalue__");
+ if (HasTailCall)
+ Builder.defineMacro("__wasm_tail_call__");
}
void WebAssemblyTargetInfo::setSIMDLevel(llvm::StringMap<bool> &Features,
@@ -82,6 +102,8 @@ bool WebAssemblyTargetInfo::initFeatureMap(
if (CPU == "bleeding-edge") {
Features["nontrapping-fptoint"] = true;
Features["sign-ext"] = true;
+ Features["atomics"] = true;
+ Features["mutable-globals"] = true;
setSIMDLevel(Features, SIMD128);
}
// Other targets do not consider user-configured features here, but while we
@@ -94,6 +116,16 @@ bool WebAssemblyTargetInfo::initFeatureMap(
Features["sign-ext"] = true;
if (HasExceptionHandling)
Features["exception-handling"] = true;
+ if (HasBulkMemory)
+ Features["bulk-memory"] = true;
+ if (HasAtomics)
+ Features["atomics"] = true;
+ if (HasMutableGlobals)
+ Features["mutable-globals"] = true;
+ if (HasMultivalue)
+ Features["multivalue"] = true;
+ if (HasTailCall)
+ Features["tail-call"] = true;
return TargetInfo::initFeatureMap(Features, Diags, CPU, FeaturesVec);
}
@@ -141,6 +173,46 @@ bool WebAssemblyTargetInfo::handleTargetFeatures(
HasExceptionHandling = false;
continue;
}
+ if (Feature == "+bulk-memory") {
+ HasBulkMemory = true;
+ continue;
+ }
+ if (Feature == "-bulk-memory") {
+ HasBulkMemory = false;
+ continue;
+ }
+ if (Feature == "+atomics") {
+ HasAtomics = true;
+ continue;
+ }
+ if (Feature == "-atomics") {
+ HasAtomics = false;
+ continue;
+ }
+ if (Feature == "+mutable-globals") {
+ HasMutableGlobals = true;
+ continue;
+ }
+ if (Feature == "-mutable-globals") {
+ HasMutableGlobals = false;
+ continue;
+ }
+ if (Feature == "+multivalue") {
+ HasMultivalue = true;
+ continue;
+ }
+ if (Feature == "-multivalue") {
+ HasMultivalue = false;
+ continue;
+ }
+ if (Feature == "+tail-call") {
+ HasTailCall = true;
+ continue;
+ }
+ if (Feature == "-tail-call") {
+ HasTailCall = false;
+ continue;
+ }
Diags.Report(diag::err_opt_not_valid_with_opt)
<< Feature << "-target-feature";
diff --git a/lib/Basic/Targets/WebAssembly.h b/lib/Basic/Targets/WebAssembly.h
index 3dea9a373cb4..9665156b143f 100644
--- a/lib/Basic/Targets/WebAssembly.h
+++ b/lib/Basic/Targets/WebAssembly.h
@@ -1,9 +1,8 @@
//=== WebAssembly.h - Declare WebAssembly target feature support *- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -31,14 +30,18 @@ class LLVM_LIBRARY_VISIBILITY WebAssemblyTargetInfo : public TargetInfo {
UnimplementedSIMD128,
} SIMDLevel = NoSIMD;
- bool HasNontrappingFPToInt;
- bool HasSignExt;
- bool HasExceptionHandling;
+ bool HasNontrappingFPToInt = false;
+ bool HasSignExt = false;
+ bool HasExceptionHandling = false;
+ bool HasBulkMemory = false;
+ bool HasAtomics = false;
+ bool HasMutableGlobals = false;
+ bool HasMultivalue = false;
+ bool HasTailCall = false;
public:
explicit WebAssemblyTargetInfo(const llvm::Triple &T, const TargetOptions &)
- : TargetInfo(T), SIMDLevel(NoSIMD), HasNontrappingFPToInt(false),
- HasSignExt(false), HasExceptionHandling(false) {
+ : TargetInfo(T) {
NoAsmVariants = true;
SuitableAlign = 128;
LargeArrayMinWidth = 128;
diff --git a/lib/Basic/Targets/X86.cpp b/lib/Basic/Targets/X86.cpp
index 53b4c153e952..d618c90b05c0 100644
--- a/lib/Basic/Targets/X86.cpp
+++ b/lib/Basic/Targets/X86.cpp
@@ -1,9 +1,8 @@
//===--- X86.cpp - Implement X86 target feature support -------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -116,6 +115,11 @@ bool X86TargetInfo::initFeatureMap(
if (Kind != CK_Lakemont)
setFeatureEnabledImpl(Features, "x87", true);
+ // Enable cmpxchg8 for i586 and greater CPUs. Include generic for backwards
+ // compatibility.
+ if (Kind >= CK_i586 || Kind == CK_Generic)
+ setFeatureEnabledImpl(Features, "cx8", true);
+
switch (Kind) {
case CK_Generic:
case CK_i386:
@@ -123,6 +127,7 @@ bool X86TargetInfo::initFeatureMap(
case CK_i586:
case CK_Pentium:
case CK_PentiumPro:
+ case CK_i686:
case CK_Lakemont:
break;
@@ -133,6 +138,25 @@ bool X86TargetInfo::initFeatureMap(
setFeatureEnabledImpl(Features, "mmx", true);
break;
+ case CK_Cooperlake:
+ // CPX inherits all CLX features plus AVX512BF16
+ setFeatureEnabledImpl(Features, "avx512bf16", true);
+ LLVM_FALLTHROUGH;
+ case CK_Cascadelake:
+ // CLX inherits all SKX features plus AVX512VNNI
+ setFeatureEnabledImpl(Features, "avx512vnni", true);
+ LLVM_FALLTHROUGH;
+ case CK_SkylakeServer:
+ setFeatureEnabledImpl(Features, "avx512f", true);
+ setFeatureEnabledImpl(Features, "avx512cd", true);
+ setFeatureEnabledImpl(Features, "avx512dq", true);
+ setFeatureEnabledImpl(Features, "avx512bw", true);
+ setFeatureEnabledImpl(Features, "avx512vl", true);
+ setFeatureEnabledImpl(Features, "clwb", true);
+ setFeatureEnabledImpl(Features, "pku", true);
+ // SkylakeServer cores inherits all SKL features, except SGX
+ goto SkylakeCommon;
+
case CK_IcelakeServer:
setFeatureEnabledImpl(Features, "pconfig", true);
setFeatureEnabledImpl(Features, "wbnoinvd", true);
@@ -143,38 +167,29 @@ bool X86TargetInfo::initFeatureMap(
setFeatureEnabledImpl(Features, "vpclmulqdq", true);
setFeatureEnabledImpl(Features, "avx512bitalg", true);
setFeatureEnabledImpl(Features, "avx512vbmi2", true);
+ setFeatureEnabledImpl(Features, "avx512vnni", true);
setFeatureEnabledImpl(Features, "avx512vpopcntdq", true);
setFeatureEnabledImpl(Features, "rdpid", true);
+ setFeatureEnabledImpl(Features, "clwb", true);
LLVM_FALLTHROUGH;
case CK_Cannonlake:
- setFeatureEnabledImpl(Features, "avx512ifma", true);
- setFeatureEnabledImpl(Features, "avx512vbmi", true);
- setFeatureEnabledImpl(Features, "sha", true);
- LLVM_FALLTHROUGH;
- case CK_Cascadelake:
- //Cannonlake has no VNNI feature inside while Icelake has
- if (Kind != CK_Cannonlake)
- // CLK inherits all SKX features plus AVX512_VNNI
- setFeatureEnabledImpl(Features, "avx512vnni", true);
- LLVM_FALLTHROUGH;
- case CK_SkylakeServer:
setFeatureEnabledImpl(Features, "avx512f", true);
setFeatureEnabledImpl(Features, "avx512cd", true);
setFeatureEnabledImpl(Features, "avx512dq", true);
setFeatureEnabledImpl(Features, "avx512bw", true);
setFeatureEnabledImpl(Features, "avx512vl", true);
+ setFeatureEnabledImpl(Features, "avx512ifma", true);
+ setFeatureEnabledImpl(Features, "avx512vbmi", true);
setFeatureEnabledImpl(Features, "pku", true);
- if (Kind != CK_Cannonlake) // CNL inherits all SKX features, except CLWB
- setFeatureEnabledImpl(Features, "clwb", true);
+ setFeatureEnabledImpl(Features, "sha", true);
LLVM_FALLTHROUGH;
case CK_SkylakeClient:
+ setFeatureEnabledImpl(Features, "sgx", true);
+ // SkylakeServer cores inherits all SKL features, except SGX
+SkylakeCommon:
setFeatureEnabledImpl(Features, "xsavec", true);
setFeatureEnabledImpl(Features, "xsaves", true);
setFeatureEnabledImpl(Features, "mpx", true);
- if (Kind != CK_SkylakeServer
- && Kind != CK_Cascadelake)
- // SKX/CLX inherits all SKL features, except SGX
- setFeatureEnabledImpl(Features, "sgx", true);
setFeatureEnabledImpl(Features, "clflushopt", true);
setFeatureEnabledImpl(Features, "aes", true);
LLVM_FALLTHROUGH;
@@ -215,11 +230,12 @@ bool X86TargetInfo::initFeatureMap(
setFeatureEnabledImpl(Features, "ssse3", true);
setFeatureEnabledImpl(Features, "sahf", true);
LLVM_FALLTHROUGH;
+ case CK_Nocona:
+ setFeatureEnabledImpl(Features, "cx16", true);
+ LLVM_FALLTHROUGH;
case CK_Yonah:
case CK_Prescott:
- case CK_Nocona:
setFeatureEnabledImpl(Features, "sse3", true);
- setFeatureEnabledImpl(Features, "cx16", true);
LLVM_FALLTHROUGH;
case CK_PentiumM:
case CK_Pentium4:
@@ -348,6 +364,11 @@ bool X86TargetInfo::initFeatureMap(
setFeatureEnabledImpl(Features, "sahf", true);
break;
+ case CK_ZNVER2:
+ setFeatureEnabledImpl(Features, "clwb", true);
+ setFeatureEnabledImpl(Features, "rdpid", true);
+ setFeatureEnabledImpl(Features, "wbnoinvd", true);
+ LLVM_FALLTHROUGH;
case CK_ZNVER1:
setFeatureEnabledImpl(Features, "adx", true);
setFeatureEnabledImpl(Features, "aes", true);
@@ -416,23 +437,20 @@ bool X86TargetInfo::initFeatureMap(
// Enable popcnt if sse4.2 is enabled and popcnt is not explicitly disabled.
auto I = Features.find("sse4.2");
if (I != Features.end() && I->getValue() &&
- std::find(FeaturesVec.begin(), FeaturesVec.end(), "-popcnt") ==
- FeaturesVec.end())
+ llvm::find(FeaturesVec, "-popcnt") == FeaturesVec.end())
Features["popcnt"] = true;
// Enable prfchw if 3DNow! is enabled and prfchw is not explicitly disabled.
I = Features.find("3dnow");
if (I != Features.end() && I->getValue() &&
- std::find(FeaturesVec.begin(), FeaturesVec.end(), "-prfchw") ==
- FeaturesVec.end())
+ llvm::find(FeaturesVec, "-prfchw") == FeaturesVec.end())
Features["prfchw"] = true;
// Additionally, if SSE is enabled and mmx is not explicitly disabled,
// then enable MMX.
I = Features.find("sse");
if (I != Features.end() && I->getValue() &&
- std::find(FeaturesVec.begin(), FeaturesVec.end(), "-mmx") ==
- FeaturesVec.end())
+ llvm::find(FeaturesVec, "-mmx") == FeaturesVec.end())
Features["mmx"] = true;
return true;
@@ -443,7 +461,9 @@ void X86TargetInfo::setSSELevel(llvm::StringMap<bool> &Features,
if (Enabled) {
switch (Level) {
case AVX512F:
- Features["avx512f"] = Features["fma"] = Features["f16c"] = true;
+ Features["avx512f"] = true;
+ Features["fma"] = true;
+ Features["f16c"] = true;
LLVM_FALLTHROUGH;
case AVX2:
Features["avx2"] = true;
@@ -482,8 +502,8 @@ void X86TargetInfo::setSSELevel(llvm::StringMap<bool> &Features,
Features["sse"] = false;
LLVM_FALLTHROUGH;
case SSE2:
- Features["sse2"] = Features["pclmul"] = Features["aes"] = Features["sha"] =
- Features["gfni"] = false;
+ Features["sse2"] = Features["pclmul"] = Features["aes"] = false;
+ Features["sha"] = Features["gfni"] = false;
LLVM_FALLTHROUGH;
case SSE3:
Features["sse3"] = false;
@@ -499,20 +519,22 @@ void X86TargetInfo::setSSELevel(llvm::StringMap<bool> &Features,
Features["sse4.2"] = false;
LLVM_FALLTHROUGH;
case AVX:
- Features["fma"] = Features["avx"] = Features["f16c"] = Features["xsave"] =
- Features["xsaveopt"] = Features["vaes"] = Features["vpclmulqdq"] = false;
+ Features["fma"] = Features["avx"] = Features["f16c"] = false;
+ Features["xsave"] = Features["xsaveopt"] = Features["vaes"] = false;
+ Features["vpclmulqdq"] = false;
setXOPLevel(Features, FMA4, false);
LLVM_FALLTHROUGH;
case AVX2:
Features["avx2"] = false;
LLVM_FALLTHROUGH;
case AVX512F:
- Features["avx512f"] = Features["avx512cd"] = Features["avx512er"] =
- Features["avx512pf"] = Features["avx512dq"] = Features["avx512bw"] =
- Features["avx512vl"] = Features["avx512vbmi"] =
- Features["avx512ifma"] = Features["avx512vpopcntdq"] =
- Features["avx512bitalg"] = Features["avx512vnni"] =
- Features["avx512vbmi2"] = false;
+ Features["avx512f"] = Features["avx512cd"] = Features["avx512er"] = false;
+ Features["avx512pf"] = Features["avx512dq"] = Features["avx512bw"] = false;
+ Features["avx512vl"] = Features["avx512vbmi"] = false;
+ Features["avx512ifma"] = Features["avx512vpopcntdq"] = false;
+ Features["avx512bitalg"] = Features["avx512vnni"] = false;
+ Features["avx512vbmi2"] = Features["avx512bf16"] = false;
+ Features["avx512vp2intersect"] = false;
break;
}
}
@@ -640,20 +662,20 @@ void X86TargetInfo::setFeatureEnabledImpl(llvm::StringMap<bool> &Features,
setSSELevel(Features, AVX2, Enabled);
} else if (Name == "avx512f") {
setSSELevel(Features, AVX512F, Enabled);
- } else if (Name == "avx512cd" || Name == "avx512er" || Name == "avx512pf" ||
- Name == "avx512dq" || Name == "avx512bw" || Name == "avx512vl" ||
- Name == "avx512vbmi" || Name == "avx512ifma" ||
- Name == "avx512vpopcntdq" || Name == "avx512bitalg" ||
- Name == "avx512vnni" || Name == "avx512vbmi2") {
+ } else if (Name.startswith("avx512")) {
if (Enabled)
setSSELevel(Features, AVX512F, Enabled);
- // Enable BWI instruction if VBMI/VBMI2/BITALG is being enabled.
- if ((Name.startswith("avx512vbmi") || Name == "avx512bitalg") && Enabled)
+ // Enable BWI instruction if certain features are being enabled.
+ if ((Name == "avx512vbmi" || Name == "avx512vbmi2" ||
+ Name == "avx512bitalg" || Name == "avx512bf16") && Enabled)
Features["avx512bw"] = true;
- // Also disable VBMI/VBMI2/BITALG if BWI is being disabled.
- if (Name == "avx512bw" && !Enabled)
- Features["avx512vbmi"] = Features["avx512vbmi2"] =
+ // Also disable some features if BWI is being disabled.
+ if (Name == "avx512bw" && !Enabled) {
+ Features["avx512vbmi"] = false;
+ Features["avx512vbmi2"] = false;
Features["avx512bitalg"] = false;
+ Features["avx512bf16"] = false;
+ }
} else if (Name == "fma") {
if (Enabled)
setSSELevel(Features, AVX, Enabled);
@@ -743,6 +765,8 @@ bool X86TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasAVX512VPOPCNTDQ = true;
} else if (Feature == "+avx512vnni") {
HasAVX512VNNI = true;
+ } else if (Feature == "+avx512bf16") {
+ HasAVX512BF16 = true;
} else if (Feature == "+avx512er") {
HasAVX512ER = true;
} else if (Feature == "+avx512pf") {
@@ -761,6 +785,8 @@ bool X86TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasAVX512VBMI2 = true;
} else if (Feature == "+avx512ifma") {
HasAVX512IFMA = true;
+ } else if (Feature == "+avx512vp2intersect") {
+ HasAVX512VP2INTERSECT = true;
} else if (Feature == "+sha") {
HasSHA = true;
} else if (Feature == "+mpx") {
@@ -771,6 +797,8 @@ bool X86TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasMOVBE = true;
} else if (Feature == "+sgx") {
HasSGX = true;
+ } else if (Feature == "+cx8") {
+ HasCX8 = true;
} else if (Feature == "+cx16") {
HasCX16 = true;
} else if (Feature == "+fxsr") {
@@ -817,6 +845,8 @@ bool X86TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasPTWRITE = true;
} else if (Feature == "+invpcid") {
HasINVPCID = true;
+ } else if (Feature == "+enqcmd") {
+ HasENQCMD = true;
}
X86SSEEnum Level = llvm::StringSwitch<X86SSEEnum>(Feature)
@@ -865,6 +895,9 @@ bool X86TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
/// definitions for this particular subtarget.
void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
+ // Inline assembly supports X86 flag outputs.
+ Builder.defineMacro("__GCC_ASM_FLAG_OUTPUTS__");
+
std::string CodeModel = getTargetOpts().CodeModel;
if (CodeModel == "default")
CodeModel = "small";
@@ -884,6 +917,11 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
DefineStd(Builder, "i386", Opts);
}
+ Builder.defineMacro("__SEG_GS");
+ Builder.defineMacro("__SEG_FS");
+ Builder.defineMacro("__seg_gs", "__attribute__((address_space(256)))");
+ Builder.defineMacro("__seg_fs", "__attribute__((address_space(257)))");
+
// Subtarget options.
// FIXME: We are hard-coding the tune parameters based on the CPU, but they
// truly should be based on -mtune options.
@@ -918,6 +956,7 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__tune_pentium2__");
LLVM_FALLTHROUGH;
case CK_PentiumPro:
+ case CK_i686:
defineCPUMacros(Builder, "i686");
defineCPUMacros(Builder, "pentiumpro");
break;
@@ -957,6 +996,7 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
case CK_SkylakeClient:
case CK_SkylakeServer:
case CK_Cascadelake:
+ case CK_Cooperlake:
case CK_Cannonlake:
case CK_IcelakeClient:
case CK_IcelakeServer:
@@ -1028,6 +1068,9 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
case CK_ZNVER1:
defineCPUMacros(Builder, "znver1");
break;
+ case CK_ZNVER2:
+ defineCPUMacros(Builder, "znver2");
+ break;
case CK_Geode:
defineCPUMacros(Builder, "geode");
break;
@@ -1124,6 +1167,8 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__AVX512VPOPCNTDQ__");
if (HasAVX512VNNI)
Builder.defineMacro("__AVX512VNNI__");
+ if (HasAVX512BF16)
+ Builder.defineMacro("__AVX512BF16__");
if (HasAVX512ER)
Builder.defineMacro("__AVX512ER__");
if (HasAVX512PF)
@@ -1142,7 +1187,8 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__AVX512VBMI2__");
if (HasAVX512IFMA)
Builder.defineMacro("__AVX512IFMA__");
-
+ if (HasAVX512VP2INTERSECT)
+ Builder.defineMacro("__AVX512VP2INTERSECT__");
if (HasSHA)
Builder.defineMacro("__SHA__");
@@ -1190,6 +1236,8 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__PTWRITE__");
if (HasINVPCID)
Builder.defineMacro("__INVPCID__");
+ if (HasENQCMD)
+ Builder.defineMacro("__ENQCMD__");
// Each case falls through to the previous one here.
switch (SSELevel) {
@@ -1262,14 +1310,14 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
break;
}
- if (CPU >= CK_i486) {
+ if (CPU >= CK_i486 || CPU == CK_Generic) {
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
}
- if (CPU >= CK_i586)
+ if (HasCX8)
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8");
- if (HasCX16)
+ if (HasCX16 && getTriple().getArch() == llvm::Triple::x86_64)
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16");
if (HasFloat128)
@@ -1288,6 +1336,7 @@ bool X86TargetInfo::isValidFeatureName(StringRef Name) const {
.Case("avx512cd", true)
.Case("avx512vpopcntdq", true)
.Case("avx512vnni", true)
+ .Case("avx512bf16", true)
.Case("avx512er", true)
.Case("avx512pf", true)
.Case("avx512dq", true)
@@ -1297,6 +1346,7 @@ bool X86TargetInfo::isValidFeatureName(StringRef Name) const {
.Case("avx512vbmi", true)
.Case("avx512vbmi2", true)
.Case("avx512ifma", true)
+ .Case("avx512vp2intersect", true)
.Case("bmi", true)
.Case("bmi2", true)
.Case("cldemote", true)
@@ -1304,6 +1354,7 @@ bool X86TargetInfo::isValidFeatureName(StringRef Name) const {
.Case("clwb", true)
.Case("clzero", true)
.Case("cx16", true)
+ .Case("enqcmd", true)
.Case("f16c", true)
.Case("fma", true)
.Case("fma4", true)
@@ -1366,6 +1417,7 @@ bool X86TargetInfo::hasFeature(StringRef Feature) const {
.Case("avx512cd", HasAVX512CD)
.Case("avx512vpopcntdq", HasAVX512VPOPCNTDQ)
.Case("avx512vnni", HasAVX512VNNI)
+ .Case("avx512bf16", HasAVX512BF16)
.Case("avx512er", HasAVX512ER)
.Case("avx512pf", HasAVX512PF)
.Case("avx512dq", HasAVX512DQ)
@@ -1375,13 +1427,16 @@ bool X86TargetInfo::hasFeature(StringRef Feature) const {
.Case("avx512vbmi", HasAVX512VBMI)
.Case("avx512vbmi2", HasAVX512VBMI2)
.Case("avx512ifma", HasAVX512IFMA)
+ .Case("avx512vp2intersect", HasAVX512VP2INTERSECT)
.Case("bmi", HasBMI)
.Case("bmi2", HasBMI2)
.Case("cldemote", HasCLDEMOTE)
.Case("clflushopt", HasCLFLUSHOPT)
.Case("clwb", HasCLWB)
.Case("clzero", HasCLZERO)
+ .Case("cx8", HasCX8)
.Case("cx16", HasCX16)
+ .Case("enqcmd", HasENQCMD)
.Case("f16c", HasF16C)
.Case("fma", HasFMA)
.Case("fma4", XOPLevel >= FMA4)
@@ -1527,18 +1582,6 @@ void X86TargetInfo::getCPUSpecificCPUDispatchFeatures(
WholeList.split(Features, ',', /*MaxSplit=*/-1, /*KeepEmpty=*/false);
}
-std::string X86TargetInfo::getCPUKindCanonicalName(CPUKind Kind) const {
- switch (Kind) {
- case CK_Generic:
- return "";
-#define PROC(ENUM, STRING, IS64BIT) \
- case CK_##ENUM: \
- return STRING;
-#include "clang/Basic/X86Target.def"
- }
- llvm_unreachable("Invalid CPUKind");
-}
-
// We can't use a generic validation scheme for the cpus accepted here
// versus subtarget cpus accepted in the target attribute because the
// variables intitialized by the runtime only support the below currently
@@ -1554,6 +1597,40 @@ bool X86TargetInfo::validateCpuIs(StringRef FeatureStr) const {
.Default(false);
}
+static unsigned matchAsmCCConstraint(const char *&Name) {
+ auto RV = llvm::StringSwitch<unsigned>(Name)
+ .Case("@cca", 4)
+ .Case("@ccae", 5)
+ .Case("@ccb", 4)
+ .Case("@ccbe", 5)
+ .Case("@ccc", 4)
+ .Case("@cce", 4)
+ .Case("@ccz", 4)
+ .Case("@ccg", 4)
+ .Case("@ccge", 5)
+ .Case("@ccl", 4)
+ .Case("@ccle", 5)
+ .Case("@ccna", 5)
+ .Case("@ccnae", 6)
+ .Case("@ccnb", 5)
+ .Case("@ccnbe", 6)
+ .Case("@ccnc", 5)
+ .Case("@ccne", 5)
+ .Case("@ccnz", 5)
+ .Case("@ccng", 5)
+ .Case("@ccnge", 6)
+ .Case("@ccnl", 5)
+ .Case("@ccnle", 6)
+ .Case("@ccno", 5)
+ .Case("@ccnp", 5)
+ .Case("@ccns", 5)
+ .Case("@cco", 4)
+ .Case("@ccp", 4)
+ .Case("@ccs", 4)
+ .Default(0);
+ return RV;
+}
+
bool X86TargetInfo::validateAsmConstraint(
const char *&Name, TargetInfo::ConstraintInfo &Info) const {
switch (*Name) {
@@ -1636,6 +1713,14 @@ bool X86TargetInfo::validateAsmConstraint(
case 'C': // SSE floating point constant.
case 'G': // x87 floating point constant.
return true;
+ case '@':
+ // CC condition changes.
+ if (auto Len = matchAsmCCConstraint(Name)) {
+ Name += Len - 1;
+ Info.setAllowsRegister();
+ return true;
+ }
+ return false;
}
}
@@ -1707,6 +1792,13 @@ bool X86TargetInfo::validateOperandSize(StringRef Constraint,
std::string X86TargetInfo::convertConstraint(const char *&Constraint) const {
switch (*Constraint) {
+ case '@':
+ if (auto Len = matchAsmCCConstraint(Constraint)) {
+ std::string Converted = "{" + std::string(Constraint, Len) + "}";
+ Constraint += Len - 1;
+ return Converted;
+ }
+ return std::string(1, *Constraint);
case 'a':
return std::string("{ax}");
case 'b':
@@ -1769,10 +1861,9 @@ void X86TargetInfo::fillValidCPUList(SmallVectorImpl<StringRef> &Values) const {
#define PROC(ENUM, STRING, IS64BIT) \
if (IS64BIT || getTriple().getArch() == llvm::Triple::x86) \
Values.emplace_back(STRING);
- // Go through CPUKind checking to ensure that the alias is de-aliased and
- // 64 bit-ness is checked.
+ // For aliases we need to lookup the CPUKind to check get the 64-bit ness.
#define PROC_ALIAS(ENUM, ALIAS) \
- if (checkCPUKind(getCPUKind(ALIAS))) \
+ if (checkCPUKind(CK_##ENUM)) \
Values.emplace_back(ALIAS);
#include "clang/Basic/X86Target.def"
}
diff --git a/lib/Basic/Targets/X86.h b/lib/Basic/Targets/X86.h
index 05930ae9eec0..588b6d3da1d6 100644
--- a/lib/Basic/Targets/X86.h
+++ b/lib/Basic/Targets/X86.h
@@ -1,9 +1,8 @@
//===--- X86.h - Declare X86 target feature support -------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -69,6 +68,7 @@ class LLVM_LIBRARY_VISIBILITY X86TargetInfo : public TargetInfo {
bool HasAVX512CD = false;
bool HasAVX512VPOPCNTDQ = false;
bool HasAVX512VNNI = false;
+ bool HasAVX512BF16 = false;
bool HasAVX512ER = false;
bool HasAVX512PF = false;
bool HasAVX512DQ = false;
@@ -78,10 +78,12 @@ class LLVM_LIBRARY_VISIBILITY X86TargetInfo : public TargetInfo {
bool HasAVX512VBMI = false;
bool HasAVX512VBMI2 = false;
bool HasAVX512IFMA = false;
+ bool HasAVX512VP2INTERSECT = false;
bool HasSHA = false;
bool HasMPX = false;
bool HasSHSTK = false;
bool HasSGX = false;
+ bool HasCX8 = false;
bool HasCX16 = false;
bool HasFXSR = false;
bool HasXSAVE = false;
@@ -106,6 +108,7 @@ class LLVM_LIBRARY_VISIBILITY X86TargetInfo : public TargetInfo {
bool HasMOVDIR64B = false;
bool HasPTWRITE = false;
bool HasINVPCID = false;
+ bool HasENQCMD = false;
protected:
/// Enumeration of all of the X86 CPUs supported by Clang.
@@ -122,8 +125,6 @@ protected:
CPUKind getCPUKind(StringRef CPU) const;
- std::string getCPUKindCanonicalName(CPUKind Kind) const;
-
enum FPMathKind { FP_Default, FP_SSE, FP_387 } FPMath = FP_Default;
public:
@@ -132,6 +133,10 @@ public:
LongDoubleFormat = &llvm::APFloat::x87DoubleExtended();
}
+ const char *getLongDoubleMangling() const override {
+ return LongDoubleFormat == &llvm::APFloat::IEEEquad() ? "g" : "e";
+ }
+
unsigned getFloatEvalMethod() const override {
// X87 evaluates with 80 bits "long double" precision.
return SSELevel == NoSSE ? 2 : 0;
@@ -199,7 +204,7 @@ public:
StringRef Expression) const override {
StringRef::iterator I, E;
for (I = Constraint.begin(), E = Constraint.end(); I != E; ++I) {
- if (isalpha(*I))
+ if (isalpha(*I) || *I == '@')
break;
}
if (I == E)
@@ -347,9 +352,8 @@ public:
(1 << TargetInfo::LongDouble));
// x86-32 has atomics up to 8 bytes
- // FIXME: Check that we actually have cmpxchg8b before setting
- // MaxAtomicInlineWidth. (cmpxchg8b is an i586 instruction.)
- MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64;
+ MaxAtomicPromoteWidth = 64;
+ MaxAtomicInlineWidth = 32;
}
BuiltinVaListKind getBuiltinVaListKind() const override {
@@ -385,6 +389,11 @@ public:
return X86TargetInfo::validateOperandSize(Constraint, Size);
}
+ void setMaxAtomicWidth() override {
+ if (hasFeature("cx8"))
+ MaxAtomicInlineWidth = 64;
+ }
+
ArrayRef<Builtin::Info> getTargetBuiltins() const override;
};
@@ -476,7 +485,6 @@ public:
void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const override {
WindowsX86_32TargetInfo::getTargetDefines(Opts, Builder);
- WindowsX86_32TargetInfo::getVisualStudioDefines(Opts, Builder);
// The value of the following reflects processor type.
// 300=386, 400=486, 500=Pentium, 600=Blend (default)
// We lost the original triple, so we use the default.
@@ -740,7 +748,6 @@ public:
void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const override {
WindowsX86_64TargetInfo::getTargetDefines(Opts, Builder);
- WindowsX86_64TargetInfo::getVisualStudioDefines(Opts, Builder);
Builder.defineMacro("_M_X64", "100");
Builder.defineMacro("_M_AMD64", "100");
}
@@ -842,8 +849,6 @@ public:
: LinuxTargetInfo<X86_64TargetInfo>(Triple, Opts) {
LongDoubleFormat = &llvm::APFloat::IEEEquad();
}
-
- bool useFloat128ManglingForLongDouble() const override { return true; }
};
} // namespace targets
} // namespace clang
diff --git a/lib/Basic/Targets/XCore.cpp b/lib/Basic/Targets/XCore.cpp
index 793dca702dae..da614f10e338 100644
--- a/lib/Basic/Targets/XCore.cpp
+++ b/lib/Basic/Targets/XCore.cpp
@@ -1,9 +1,8 @@
//===--- XCore.cpp - Implement XCore target feature support ---------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Basic/Targets/XCore.h b/lib/Basic/Targets/XCore.h
index 346e0eee15b3..c94f93a99bca 100644
--- a/lib/Basic/Targets/XCore.h
+++ b/lib/Basic/Targets/XCore.h
@@ -1,9 +1,8 @@
//===--- XCore.h - Declare XCore target feature support ---------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Basic/TokenKinds.cpp b/lib/Basic/TokenKinds.cpp
index 3b1f8fe34bef..a71cd72517de 100644
--- a/lib/Basic/TokenKinds.cpp
+++ b/lib/Basic/TokenKinds.cpp
@@ -1,9 +1,8 @@
//===--- TokenKinds.cpp - Token Kinds Support -----------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Basic/Version.cpp b/lib/Basic/Version.cpp
index a1a67c2bc144..3006ca33f213 100644
--- a/lib/Basic/Version.cpp
+++ b/lib/Basic/Version.cpp
@@ -1,9 +1,8 @@
//===- Version.cpp - Clang Version Number -----------------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -18,8 +17,8 @@
#include <cstdlib>
#include <cstring>
-#ifdef HAVE_SVN_VERSION_INC
-# include "SVNVersion.inc"
+#ifdef HAVE_VCS_VERSION_INC
+#include "VCSVersion.inc"
#endif
namespace clang {
@@ -28,13 +27,13 @@ std::string getClangRepositoryPath() {
#if defined(CLANG_REPOSITORY_STRING)
return CLANG_REPOSITORY_STRING;
#else
-#ifdef SVN_REPOSITORY
- StringRef URL(SVN_REPOSITORY);
+#ifdef CLANG_REPOSITORY
+ StringRef URL(CLANG_REPOSITORY);
#else
StringRef URL("");
#endif
- // If the SVN_REPOSITORY is empty, try to use the SVN keyword. This helps us
+ // If the CLANG_REPOSITORY is empty, try to use the SVN keyword. This helps us
// pick up a tag in an SVN export, for example.
StringRef SVNRepository("$URL: https://llvm.org/svn/llvm-project/cfe/trunk/lib/Basic/Version.cpp $");
if (URL.empty()) {
@@ -72,8 +71,8 @@ std::string getLLVMRepositoryPath() {
}
std::string getClangRevision() {
-#ifdef SVN_REVISION
- return SVN_REVISION;
+#ifdef CLANG_REVISION
+ return CLANG_REVISION;
#else
return "";
#endif
diff --git a/lib/Basic/Warnings.cpp b/lib/Basic/Warnings.cpp
index a999c45a0c3e..88ef2eaa6589 100644
--- a/lib/Basic/Warnings.cpp
+++ b/lib/Basic/Warnings.cpp
@@ -1,9 +1,8 @@
//===--- Warnings.cpp - C-Language Front-end ------------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Basic/XRayInstr.cpp b/lib/Basic/XRayInstr.cpp
index 8cc36df79468..ef2470f67200 100644
--- a/lib/Basic/XRayInstr.cpp
+++ b/lib/Basic/XRayInstr.cpp
@@ -1,9 +1,8 @@
//===--- XRayInstr.cpp ------------------------------------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Basic/XRayLists.cpp b/lib/Basic/XRayLists.cpp
index ad331899d2e2..eb549436710a 100644
--- a/lib/Basic/XRayLists.cpp
+++ b/lib/Basic/XRayLists.cpp
@@ -1,9 +1,8 @@
-//===--- XRayFunctionFilter.cpp - XRay automatic-attribution --------------===//
+//===-- XRayLists.cpp - XRay automatic-attribution ------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/CodeGen/ABIInfo.h b/lib/CodeGen/ABIInfo.h
index feed3833f24a..0c3a076da0b5 100644
--- a/lib/CodeGen/ABIInfo.h
+++ b/lib/CodeGen/ABIInfo.h
@@ -1,9 +1,8 @@
//===----- ABIInfo.h - ABI information access & encapsulation ---*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/CodeGen/Address.h b/lib/CodeGen/Address.h
index 334308081ff3..6a8e57f8db33 100644
--- a/lib/CodeGen/Address.h
+++ b/lib/CodeGen/Address.h
@@ -1,9 +1,8 @@
//===-- Address.h - An aligned address -------------------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/CodeGen/BackendUtil.cpp b/lib/CodeGen/BackendUtil.cpp
index b927acabac59..497652e85b47 100644
--- a/lib/CodeGen/BackendUtil.cpp
+++ b/lib/CodeGen/BackendUtil.cpp
@@ -1,9 +1,8 @@
//===--- BackendUtil.cpp - LLVM Backend Utilities -------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -37,11 +36,13 @@
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/SubtargetFeature.h"
#include "llvm/Passes/PassBuilder.h"
+#include "llvm/Passes/PassPlugin.h"
#include "llvm/Support/BuryPointer.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Support/TimeProfiler.h"
#include "llvm/Support/Timer.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetMachine.h"
@@ -53,8 +54,11 @@
#include "llvm/Transforms/IPO/ThinLTOBitcodeWriter.h"
#include "llvm/Transforms/InstCombine/InstCombine.h"
#include "llvm/Transforms/Instrumentation.h"
+#include "llvm/Transforms/Instrumentation/AddressSanitizer.h"
#include "llvm/Transforms/Instrumentation/BoundsChecking.h"
#include "llvm/Transforms/Instrumentation/GCOVProfiler.h"
+#include "llvm/Transforms/Instrumentation/HWAddressSanitizer.h"
+#include "llvm/Transforms/Instrumentation/InstrProfiling.h"
#include "llvm/Transforms/Instrumentation/MemorySanitizer.h"
#include "llvm/Transforms/Instrumentation/ThreadSanitizer.h"
#include "llvm/Transforms/ObjCARC.h"
@@ -62,6 +66,7 @@
#include "llvm/Transforms/Scalar/GVN.h"
#include "llvm/Transforms/Utils.h"
#include "llvm/Transforms/Utils/CanonicalizeAliases.h"
+#include "llvm/Transforms/Utils/EntryExitInstrumenter.h"
#include "llvm/Transforms/Utils/NameAnonGlobals.h"
#include "llvm/Transforms/Utils/SymbolRewriter.h"
#include <memory>
@@ -243,15 +248,15 @@ static void addAddressSanitizerPasses(const PassManagerBuilder &Builder,
bool UseGlobalsGC = asanUseGlobalsGC(T, CGOpts);
PM.add(createAddressSanitizerFunctionPass(/*CompileKernel*/ false, Recover,
UseAfterScope));
- PM.add(createAddressSanitizerModulePass(/*CompileKernel*/ false, Recover,
- UseGlobalsGC, UseOdrIndicator));
+ PM.add(createModuleAddressSanitizerLegacyPassPass(
+ /*CompileKernel*/ false, Recover, UseGlobalsGC, UseOdrIndicator));
}
static void addKernelAddressSanitizerPasses(const PassManagerBuilder &Builder,
legacy::PassManagerBase &PM) {
PM.add(createAddressSanitizerFunctionPass(
/*CompileKernel*/ true, /*Recover*/ true, /*UseAfterScope*/ false));
- PM.add(createAddressSanitizerModulePass(
+ PM.add(createModuleAddressSanitizerLegacyPassPass(
/*CompileKernel*/ true, /*Recover*/ true, /*UseGlobalsGC*/ true,
/*UseOdrIndicator*/ false));
}
@@ -262,12 +267,13 @@ static void addHWAddressSanitizerPasses(const PassManagerBuilder &Builder,
static_cast<const PassManagerBuilderWrapper &>(Builder);
const CodeGenOptions &CGOpts = BuilderWrapper.getCGOpts();
bool Recover = CGOpts.SanitizeRecover.has(SanitizerKind::HWAddress);
- PM.add(createHWAddressSanitizerPass(/*CompileKernel*/ false, Recover));
+ PM.add(
+ createHWAddressSanitizerLegacyPassPass(/*CompileKernel*/ false, Recover));
}
static void addKernelHWAddressSanitizerPasses(const PassManagerBuilder &Builder,
legacy::PassManagerBase &PM) {
- PM.add(createHWAddressSanitizerPass(
+ PM.add(createHWAddressSanitizerLegacyPassPass(
/*CompileKernel*/ true, /*Recover*/ true));
}
@@ -279,7 +285,8 @@ static void addGeneralOptsForMemorySanitizer(const PassManagerBuilder &Builder,
const CodeGenOptions &CGOpts = BuilderWrapper.getCGOpts();
int TrackOrigins = CGOpts.SanitizeMemoryTrackOrigins;
bool Recover = CGOpts.SanitizeRecover.has(SanitizerKind::Memory);
- PM.add(createMemorySanitizerLegacyPassPass(TrackOrigins, Recover, CompileKernel));
+ PM.add(createMemorySanitizerLegacyPassPass(
+ MemorySanitizerOptions{TrackOrigins, Recover, CompileKernel}));
// MemorySanitizer inserts complex instrumentation that mostly follows
// the logic of the original code, but operates on "shadow" values.
@@ -317,19 +324,6 @@ static void addDataFlowSanitizerPass(const PassManagerBuilder &Builder,
PM.add(createDataFlowSanitizerPass(LangOpts.SanitizerBlacklistFiles));
}
-static void addEfficiencySanitizerPass(const PassManagerBuilder &Builder,
- legacy::PassManagerBase &PM) {
- const PassManagerBuilderWrapper &BuilderWrapper =
- static_cast<const PassManagerBuilderWrapper&>(Builder);
- const LangOptions &LangOpts = BuilderWrapper.getLangOpts();
- EfficiencySanitizerOptions Opts;
- if (LangOpts.Sanitize.has(SanitizerKind::EfficiencyCacheFrag))
- Opts.ToolType = EfficiencySanitizerOptions::ESAN_CacheFrag;
- else if (LangOpts.Sanitize.has(SanitizerKind::EfficiencyWorkingSet))
- Opts.ToolType = EfficiencySanitizerOptions::ESAN_WorkingSet;
- PM.add(createEfficiencySanitizerPass(Opts));
-}
-
static TargetLibraryInfoImpl *createTLII(llvm::Triple &TargetTriple,
const CodeGenOptions &CodeGenOpts) {
TargetLibraryInfoImpl *TLII = new TargetLibraryInfoImpl(TargetTriple);
@@ -347,6 +341,9 @@ static TargetLibraryInfoImpl *createTLII(llvm::Triple &TargetTriple,
case CodeGenOptions::Accelerate:
TLII->addVectorizableFunctionsFromVecLib(TargetLibraryInfoImpl::Accelerate);
break;
+ case CodeGenOptions::MASSV:
+ TLII->addVectorizableFunctionsFromVecLib(TargetLibraryInfoImpl::MASSV);
+ break;
case CodeGenOptions::SVML:
TLII->addVectorizableFunctionsFromVecLib(TargetLibraryInfoImpl::SVML);
break;
@@ -473,9 +470,9 @@ static void initTargetOptions(llvm::TargetOptions &Options,
Options.DebuggerTuning = CodeGenOpts.getDebuggerTuning();
Options.EmitStackSizeSection = CodeGenOpts.StackSizeSection;
Options.EmitAddrsig = CodeGenOpts.Addrsig;
+ Options.EnableDebugEntryValues = CodeGenOpts.EnableDebugEntryValues;
- if (CodeGenOpts.getSplitDwarfMode() != CodeGenOptions::NoFission)
- Options.MCOptions.SplitDwarfFile = CodeGenOpts.SplitDwarfFile;
+ Options.MCOptions.SplitDwarfFile = CodeGenOpts.SplitDwarfFile;
Options.MCOptions.MCRelaxAll = CodeGenOpts.RelaxAll;
Options.MCOptions.MCSaveTempLabels = CodeGenOpts.SaveTempLabels;
Options.MCOptions.MCUseDwarfDirectory = !CodeGenOpts.NoDwarfDirectoryAsm;
@@ -515,6 +512,21 @@ static Optional<GCOVOptions> getGCOVOptions(const CodeGenOptions &CodeGenOpts) {
return Options;
}
+static Optional<InstrProfOptions>
+getInstrProfOptions(const CodeGenOptions &CodeGenOpts,
+ const LangOptions &LangOpts) {
+ if (!CodeGenOpts.hasProfileClangInstr())
+ return None;
+ InstrProfOptions Options;
+ Options.NoRedZone = CodeGenOpts.DisableRedZone;
+ Options.InstrProfileOutput = CodeGenOpts.InstrProfileOutput;
+
+ // TODO: Surface the option to emit atomic profile counter increments at
+ // the driver level.
+ Options.Atomic = LangOpts.Sanitize.has(SanitizerKind::Thread);
+ return Options;
+}
+
void EmitAssemblyHelper::CreatePasses(legacy::PassManager &MPM,
legacy::FunctionPassManager &FPM) {
// Handle disabling of all LLVM passes, where we want to preserve the
@@ -554,6 +566,9 @@ void EmitAssemblyHelper::CreatePasses(legacy::PassManager &MPM,
PMBuilder.LoopVectorize = CodeGenOpts.VectorizeLoop;
PMBuilder.DisableUnrollLoops = !CodeGenOpts.UnrollLoops;
+ // Loop interleaving in the loop vectorizer has historically been set to be
+ // enabled when loop unrolling is enabled.
+ PMBuilder.LoopsInterleaved = CodeGenOpts.UnrollLoops;
PMBuilder.MergeFunctions = CodeGenOpts.MergeFunctions;
PMBuilder.PrepareForThinLTO = CodeGenOpts.PrepareForThinLTO;
PMBuilder.PrepareForLTO = CodeGenOpts.PrepareForLTO;
@@ -579,7 +594,7 @@ void EmitAssemblyHelper::CreatePasses(legacy::PassManager &MPM,
addObjCARCOptPass);
}
- if (LangOpts.CoroutinesTS)
+ if (LangOpts.Coroutines)
addCoroutinePassesToExtensionPoints(PMBuilder);
if (LangOpts.Sanitize.has(SanitizerKind::LocalBounds)) {
@@ -654,13 +669,6 @@ void EmitAssemblyHelper::CreatePasses(legacy::PassManager &MPM,
addDataFlowSanitizerPass);
}
- if (LangOpts.Sanitize.hasOneOf(SanitizerKind::Efficiency)) {
- PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast,
- addEfficiencySanitizerPass);
- PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
- addEfficiencySanitizerPass);
- }
-
// Set up the per-function pass manager.
FPM.add(new TargetLibraryInfoWrapperPass(*TLII));
if (CodeGenOpts.VerifyModule)
@@ -676,26 +684,35 @@ void EmitAssemblyHelper::CreatePasses(legacy::PassManager &MPM,
MPM.add(createStripSymbolsPass(true));
}
- if (CodeGenOpts.hasProfileClangInstr()) {
- InstrProfOptions Options;
- Options.NoRedZone = CodeGenOpts.DisableRedZone;
- Options.InstrProfileOutput = CodeGenOpts.InstrProfileOutput;
-
- // TODO: Surface the option to emit atomic profile counter increments at
- // the driver level.
- Options.Atomic = LangOpts.Sanitize.has(SanitizerKind::Thread);
+ if (Optional<InstrProfOptions> Options =
+ getInstrProfOptions(CodeGenOpts, LangOpts))
+ MPM.add(createInstrProfilingLegacyPass(*Options, false));
- MPM.add(createInstrProfilingLegacyPass(Options));
- }
+ bool hasIRInstr = false;
if (CodeGenOpts.hasProfileIRInstr()) {
PMBuilder.EnablePGOInstrGen = true;
+ hasIRInstr = true;
+ }
+ if (CodeGenOpts.hasProfileCSIRInstr()) {
+ assert(!CodeGenOpts.hasProfileCSIRUse() &&
+ "Cannot have both CSProfileUse pass and CSProfileGen pass at the "
+ "same time");
+ assert(!hasIRInstr &&
+ "Cannot have both ProfileGen pass and CSProfileGen pass at the "
+ "same time");
+ PMBuilder.EnablePGOCSInstrGen = true;
+ hasIRInstr = true;
+ }
+ if (hasIRInstr) {
if (!CodeGenOpts.InstrProfileOutput.empty())
PMBuilder.PGOInstrGen = CodeGenOpts.InstrProfileOutput;
else
PMBuilder.PGOInstrGen = DefaultProfileGenName;
}
- if (CodeGenOpts.hasProfileIRUse())
+ if (CodeGenOpts.hasProfileIRUse()) {
PMBuilder.PGOInstrUse = CodeGenOpts.ProfileInstrumentUsePath;
+ PMBuilder.EnablePGOCSInstrUse = CodeGenOpts.hasProfileCSIRUse();
+ }
if (!CodeGenOpts.SampleProfileFile.empty())
PMBuilder.PGOSampleUse = CodeGenOpts.SampleProfileFile;
@@ -845,9 +862,8 @@ void EmitAssemblyHelper::EmitAssembly(BackendAction Action,
break;
default:
- if (!CodeGenOpts.SplitDwarfFile.empty() &&
- (CodeGenOpts.getSplitDwarfMode() == CodeGenOptions::SplitFileFission)) {
- DwoOS = openOutputFile(CodeGenOpts.SplitDwarfFile);
+ if (!CodeGenOpts.SplitDwarfOutput.empty()) {
+ DwoOS = openOutputFile(CodeGenOpts.SplitDwarfOutput);
if (!DwoOS)
return;
}
@@ -916,6 +932,43 @@ static PassBuilder::OptimizationLevel mapToLevel(const CodeGenOptions &Opts) {
}
}
+static void addSanitizersAtO0(ModulePassManager &MPM,
+ const Triple &TargetTriple,
+ const LangOptions &LangOpts,
+ const CodeGenOptions &CodeGenOpts) {
+ auto ASanPass = [&](SanitizerMask Mask, bool CompileKernel) {
+ MPM.addPass(RequireAnalysisPass<ASanGlobalsMetadataAnalysis, Module>());
+ bool Recover = CodeGenOpts.SanitizeRecover.has(Mask);
+ MPM.addPass(createModuleToFunctionPassAdaptor(AddressSanitizerPass(
+ CompileKernel, Recover, CodeGenOpts.SanitizeAddressUseAfterScope)));
+ bool ModuleUseAfterScope = asanUseGlobalsGC(TargetTriple, CodeGenOpts);
+ MPM.addPass(
+ ModuleAddressSanitizerPass(CompileKernel, Recover, ModuleUseAfterScope,
+ CodeGenOpts.SanitizeAddressUseOdrIndicator));
+ };
+
+ if (LangOpts.Sanitize.has(SanitizerKind::Address)) {
+ ASanPass(SanitizerKind::Address, /*CompileKernel=*/false);
+ }
+
+ if (LangOpts.Sanitize.has(SanitizerKind::KernelAddress)) {
+ ASanPass(SanitizerKind::KernelAddress, /*CompileKernel=*/true);
+ }
+
+ if (LangOpts.Sanitize.has(SanitizerKind::Memory)) {
+ MPM.addPass(createModuleToFunctionPassAdaptor(MemorySanitizerPass({})));
+ }
+
+ if (LangOpts.Sanitize.has(SanitizerKind::KernelMemory)) {
+ MPM.addPass(createModuleToFunctionPassAdaptor(
+ MemorySanitizerPass({0, false, /*Kernel=*/true})));
+ }
+
+ if (LangOpts.Sanitize.has(SanitizerKind::Thread)) {
+ MPM.addPass(createModuleToFunctionPassAdaptor(ThreadSanitizerPass()));
+ }
+}
+
/// A clean version of `EmitAssembly` that uses the new pass manager.
///
/// Not all features are currently supported in this system, but where
@@ -929,13 +982,15 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
TimeRegion Region(FrontendTimesIsEnabled ? &CodeGenerationTime : nullptr);
setCommandLineOpts(CodeGenOpts);
- // The new pass manager always makes a target machine available to passes
- // during construction.
- CreateTargetMachine(/*MustCreateTM*/ true);
- if (!TM)
- // This will already be diagnosed, just bail.
+ bool RequiresCodeGen = (Action != Backend_EmitNothing &&
+ Action != Backend_EmitBC &&
+ Action != Backend_EmitLL);
+ CreateTargetMachine(RequiresCodeGen);
+
+ if (RequiresCodeGen && !TM)
return;
- TheModule->setDataLayout(TM->createDataLayout());
+ if (TM)
+ TheModule->setDataLayout(TM->createDataLayout());
Optional<PGOOptions> PGOOpt;
@@ -944,23 +999,69 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
PGOOpt = PGOOptions(CodeGenOpts.InstrProfileOutput.empty()
? DefaultProfileGenName
: CodeGenOpts.InstrProfileOutput,
- "", "", "", true,
+ "", "", PGOOptions::IRInstr, PGOOptions::NoCSAction,
CodeGenOpts.DebugInfoForProfiling);
- else if (CodeGenOpts.hasProfileIRUse())
+ else if (CodeGenOpts.hasProfileIRUse()) {
// -fprofile-use.
- PGOOpt = PGOOptions("", CodeGenOpts.ProfileInstrumentUsePath, "",
- CodeGenOpts.ProfileRemappingFile, false,
- CodeGenOpts.DebugInfoForProfiling);
- else if (!CodeGenOpts.SampleProfileFile.empty())
+ auto CSAction = CodeGenOpts.hasProfileCSIRUse() ? PGOOptions::CSIRUse
+ : PGOOptions::NoCSAction;
+ PGOOpt = PGOOptions(CodeGenOpts.ProfileInstrumentUsePath, "",
+ CodeGenOpts.ProfileRemappingFile, PGOOptions::IRUse,
+ CSAction, CodeGenOpts.DebugInfoForProfiling);
+ } else if (!CodeGenOpts.SampleProfileFile.empty())
// -fprofile-sample-use
- PGOOpt = PGOOptions("", "", CodeGenOpts.SampleProfileFile,
- CodeGenOpts.ProfileRemappingFile, false,
- CodeGenOpts.DebugInfoForProfiling);
+ PGOOpt =
+ PGOOptions(CodeGenOpts.SampleProfileFile, "",
+ CodeGenOpts.ProfileRemappingFile, PGOOptions::SampleUse,
+ PGOOptions::NoCSAction, CodeGenOpts.DebugInfoForProfiling);
else if (CodeGenOpts.DebugInfoForProfiling)
// -fdebug-info-for-profiling
- PGOOpt = PGOOptions("", "", "", "", false, true);
+ PGOOpt = PGOOptions("", "", "", PGOOptions::NoAction,
+ PGOOptions::NoCSAction, true);
+
+ // Check to see if we want to generate a CS profile.
+ if (CodeGenOpts.hasProfileCSIRInstr()) {
+ assert(!CodeGenOpts.hasProfileCSIRUse() &&
+ "Cannot have both CSProfileUse pass and CSProfileGen pass at "
+ "the same time");
+ if (PGOOpt.hasValue()) {
+ assert(PGOOpt->Action != PGOOptions::IRInstr &&
+ PGOOpt->Action != PGOOptions::SampleUse &&
+ "Cannot run CSProfileGen pass with ProfileGen or SampleUse "
+ " pass");
+ PGOOpt->CSProfileGenFile = CodeGenOpts.InstrProfileOutput.empty()
+ ? DefaultProfileGenName
+ : CodeGenOpts.InstrProfileOutput;
+ PGOOpt->CSAction = PGOOptions::CSIRInstr;
+ } else
+ PGOOpt = PGOOptions("",
+ CodeGenOpts.InstrProfileOutput.empty()
+ ? DefaultProfileGenName
+ : CodeGenOpts.InstrProfileOutput,
+ "", PGOOptions::NoAction, PGOOptions::CSIRInstr,
+ CodeGenOpts.DebugInfoForProfiling);
+ }
- PassBuilder PB(TM.get(), PGOOpt);
+ PipelineTuningOptions PTO;
+ PTO.LoopUnrolling = CodeGenOpts.UnrollLoops;
+ // For historical reasons, loop interleaving is set to mirror setting for loop
+ // unrolling.
+ PTO.LoopInterleaving = CodeGenOpts.UnrollLoops;
+ PTO.LoopVectorization = CodeGenOpts.VectorizeLoop;
+ PTO.SLPVectorization = CodeGenOpts.VectorizeSLP;
+
+ PassBuilder PB(TM.get(), PTO, PGOOpt);
+
+ // Attempt to load pass plugins and register their callbacks with PB.
+ for (auto &PluginFN : CodeGenOpts.PassPlugins) {
+ auto PassPlugin = PassPlugin::Load(PluginFN);
+ if (PassPlugin) {
+ PassPlugin->registerPassBuilderCallbacks(PB);
+ } else {
+ Diags.Report(diag::err_fe_unable_to_load_plugin)
+ << PluginFN << toString(PassPlugin.takeError());
+ }
+ }
LoopAnalysisManager LAM(CodeGenOpts.DebugPassManager);
FunctionAnalysisManager FAM(CodeGenOpts.DebugPassManager);
@@ -994,10 +1095,15 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
if (CodeGenOpts.OptimizationLevel == 0) {
if (Optional<GCOVOptions> Options = getGCOVOptions(CodeGenOpts))
MPM.addPass(GCOVProfilerPass(*Options));
+ if (Optional<InstrProfOptions> Options =
+ getInstrProfOptions(CodeGenOpts, LangOpts))
+ MPM.addPass(InstrProfiling(*Options, false));
// Build a minimal pipeline based on the semantics required by Clang,
- // which is just that always inlining occurs.
- MPM.addPass(AlwaysInlinerPass());
+ // which is just that always inlining occurs. Further, disable generating
+ // lifetime intrinsics to avoid enabling further optimizations during
+ // code generation.
+ MPM.addPass(AlwaysInlinerPass(/*InsertLifetimeIntrinsics=*/false));
// At -O0 we directly run necessary sanitizer passes.
if (LangOpts.Sanitize.has(SanitizerKind::LocalBounds))
@@ -1013,17 +1119,61 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
// configure the pipeline.
PassBuilder::OptimizationLevel Level = mapToLevel(CodeGenOpts);
+ PB.registerPipelineStartEPCallback([](ModulePassManager &MPM) {
+ MPM.addPass(createModuleToFunctionPassAdaptor(
+ EntryExitInstrumenterPass(/*PostInlining=*/false)));
+ });
+
// Register callbacks to schedule sanitizer passes at the appropriate part of
// the pipeline.
+ // FIXME: either handle asan/the remaining sanitizers or error out
if (LangOpts.Sanitize.has(SanitizerKind::LocalBounds))
PB.registerScalarOptimizerLateEPCallback(
[](FunctionPassManager &FPM, PassBuilder::OptimizationLevel Level) {
FPM.addPass(BoundsCheckingPass());
});
+ if (LangOpts.Sanitize.has(SanitizerKind::Memory))
+ PB.registerOptimizerLastEPCallback(
+ [](FunctionPassManager &FPM, PassBuilder::OptimizationLevel Level) {
+ FPM.addPass(MemorySanitizerPass({}));
+ });
+ if (LangOpts.Sanitize.has(SanitizerKind::Thread))
+ PB.registerOptimizerLastEPCallback(
+ [](FunctionPassManager &FPM, PassBuilder::OptimizationLevel Level) {
+ FPM.addPass(ThreadSanitizerPass());
+ });
+ if (LangOpts.Sanitize.has(SanitizerKind::Address)) {
+ PB.registerPipelineStartEPCallback([&](ModulePassManager &MPM) {
+ MPM.addPass(
+ RequireAnalysisPass<ASanGlobalsMetadataAnalysis, Module>());
+ });
+ bool Recover = CodeGenOpts.SanitizeRecover.has(SanitizerKind::Address);
+ bool UseAfterScope = CodeGenOpts.SanitizeAddressUseAfterScope;
+ PB.registerOptimizerLastEPCallback(
+ [Recover, UseAfterScope](FunctionPassManager &FPM,
+ PassBuilder::OptimizationLevel Level) {
+ FPM.addPass(AddressSanitizerPass(
+ /*CompileKernel=*/false, Recover, UseAfterScope));
+ });
+ bool ModuleUseAfterScope = asanUseGlobalsGC(TargetTriple, CodeGenOpts);
+ bool UseOdrIndicator = CodeGenOpts.SanitizeAddressUseOdrIndicator;
+ PB.registerPipelineStartEPCallback(
+ [Recover, ModuleUseAfterScope,
+ UseOdrIndicator](ModulePassManager &MPM) {
+ MPM.addPass(ModuleAddressSanitizerPass(
+ /*CompileKernel=*/false, Recover, ModuleUseAfterScope,
+ UseOdrIndicator));
+ });
+ }
if (Optional<GCOVOptions> Options = getGCOVOptions(CodeGenOpts))
PB.registerPipelineStartEPCallback([Options](ModulePassManager &MPM) {
MPM.addPass(GCOVProfilerPass(*Options));
});
+ if (Optional<InstrProfOptions> Options =
+ getInstrProfOptions(CodeGenOpts, LangOpts))
+ PB.registerPipelineStartEPCallback([Options](ModulePassManager &MPM) {
+ MPM.addPass(InstrProfiling(*Options, false));
+ });
if (IsThinLTO) {
MPM = PB.buildThinLTOPreLinkDefaultPipeline(
@@ -1040,6 +1190,19 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
CodeGenOpts.DebugPassManager);
}
}
+
+ if (LangOpts.Sanitize.has(SanitizerKind::HWAddress)) {
+ bool Recover = CodeGenOpts.SanitizeRecover.has(SanitizerKind::HWAddress);
+ MPM.addPass(HWAddressSanitizerPass(
+ /*CompileKernel=*/false, Recover));
+ }
+ if (LangOpts.Sanitize.has(SanitizerKind::KernelHWAddress)) {
+ MPM.addPass(HWAddressSanitizerPass(
+ /*CompileKernel=*/true, /*Recover=*/true));
+ }
+
+ if (CodeGenOpts.OptimizationLevel == 0)
+ addSanitizersAtO0(MPM, TargetTriple, LangOpts, CodeGenOpts);
}
// FIXME: We still use the legacy pass manager to do code generation. We
@@ -1093,8 +1256,8 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
NeedCodeGen = true;
CodeGenPasses.add(
createTargetTransformInfoWrapperPass(getTargetIRAnalysis()));
- if (!CodeGenOpts.SplitDwarfFile.empty()) {
- DwoOS = openOutputFile(CodeGenOpts.SplitDwarfFile);
+ if (!CodeGenOpts.SplitDwarfOutput.empty()) {
+ DwoOS = openOutputFile(CodeGenOpts.SplitDwarfOutput);
if (!DwoOS)
return;
}
@@ -1226,14 +1389,28 @@ static void runThinLTOBackend(ModuleSummaryIndex *CombinedIndex, Module *M,
Conf.MAttrs = TOpts.Features;
Conf.RelocModel = CGOpts.RelocationModel;
Conf.CGOptLevel = getCGOptLevel(CGOpts);
+ Conf.OptLevel = CGOpts.OptimizationLevel;
initTargetOptions(Conf.Options, CGOpts, TOpts, LOpts, HeaderOpts);
Conf.SampleProfile = std::move(SampleProfile);
+
+ // Context sensitive profile.
+ if (CGOpts.hasProfileCSIRInstr()) {
+ Conf.RunCSIRInstr = true;
+ Conf.CSIRProfile = std::move(CGOpts.InstrProfileOutput);
+ } else if (CGOpts.hasProfileCSIRUse()) {
+ Conf.RunCSIRInstr = false;
+ Conf.CSIRProfile = std::move(CGOpts.ProfileInstrumentUsePath);
+ }
+
Conf.ProfileRemapping = std::move(ProfileRemapping);
Conf.UseNewPM = CGOpts.ExperimentalNewPassManager;
Conf.DebugPassManager = CGOpts.DebugPassManager;
Conf.RemarksWithHotness = CGOpts.DiagnosticsWithHotness;
Conf.RemarksFilename = CGOpts.OptRecordFile;
- Conf.DwoPath = CGOpts.SplitDwarfFile;
+ Conf.RemarksPasses = CGOpts.OptRecordPasses;
+ Conf.RemarksFormat = CGOpts.OptRecordFormat;
+ Conf.SplitDwarfFile = CGOpts.SplitDwarfFile;
+ Conf.SplitDwarfOutput = CGOpts.SplitDwarfOutput;
switch (Action) {
case Backend_EmitNothing:
Conf.PreCodeGenModuleHook = [](size_t Task, const Module &Mod) {
@@ -1273,6 +1450,9 @@ void clang::EmitBackendOutput(DiagnosticsEngine &Diags,
const llvm::DataLayout &TDesc, Module *M,
BackendAction Action,
std::unique_ptr<raw_pwrite_stream> OS) {
+
+ llvm::TimeTraceScope TimeScope("Backend", StringRef(""));
+
std::unique_ptr<llvm::Module> EmptyModule;
if (!CGOpts.ThinLTOIndexFile.empty()) {
// If we are performing a ThinLTO importing compile, load the function index
@@ -1339,6 +1519,9 @@ static const char* getSectionNameForBitcode(const Triple &T) {
case Triple::Wasm:
case Triple::UnknownObjectFormat:
return ".llvmbc";
+ case Triple::XCOFF:
+ llvm_unreachable("XCOFF is not yet implemented");
+ break;
}
llvm_unreachable("Unimplemented ObjectFormatType");
}
@@ -1352,6 +1535,9 @@ static const char* getSectionNameForCommandline(const Triple &T) {
case Triple::Wasm:
case Triple::UnknownObjectFormat:
return ".llvmcmd";
+ case Triple::XCOFF:
+ llvm_unreachable("XCOFF is not yet implemented");
+ break;
}
llvm_unreachable("Unimplemented ObjectFormatType");
}
diff --git a/lib/CodeGen/CGAtomic.cpp b/lib/CodeGen/CGAtomic.cpp
index 24056a449def..a95cd12c2d64 100644
--- a/lib/CodeGen/CGAtomic.cpp
+++ b/lib/CodeGen/CGAtomic.cpp
@@ -1,9 +1,8 @@
//===--- CGAtomic.cpp - Emit LLVM IR for atomic operations ----------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -36,7 +35,6 @@ namespace {
uint64_t ValueSizeInBits;
CharUnits AtomicAlign;
CharUnits ValueAlign;
- CharUnits LValueAlign;
TypeEvaluationKind EvaluationKind;
bool UseLibcall;
LValue LVal;
@@ -133,7 +131,6 @@ namespace {
QualType getAtomicType() const { return AtomicTy; }
QualType getValueType() const { return ValueTy; }
CharUnits getAtomicAlignment() const { return AtomicAlign; }
- CharUnits getValueAlignment() const { return ValueAlign; }
uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; }
uint64_t getValueSizeInBits() const { return ValueSizeInBits; }
TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }
@@ -202,7 +199,7 @@ namespace {
assert(LVal.isSimple());
Address addr = getAtomicAddress();
if (hasPadding())
- addr = CGF.Builder.CreateStructGEP(addr, 0, CharUnits());
+ addr = CGF.Builder.CreateStructGEP(addr, 0);
return LValue::MakeAddr(addr, getValueType(), CGF.getContext(),
LVal.getBaseInfo(), LVal.getTBAAInfo());
@@ -308,7 +305,7 @@ static RValue emitAtomicLibcall(CodeGenFunction &CGF,
const CGFunctionInfo &fnInfo =
CGF.CGM.getTypes().arrangeBuiltinFunctionCall(resultType, args);
llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo);
- llvm::Constant *fn = CGF.CGM.CreateRuntimeFunction(fnTy, fnName);
+ llvm::FunctionCallee fn = CGF.CGM.CreateRuntimeFunction(fnTy, fnName);
auto callee = CGCallee::forDirect(fn);
return CGF.EmitCall(fnInfo, callee, ReturnValueSlot(), args);
}
@@ -680,7 +677,8 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *Expr, Address Dest,
// Handle constant scope.
if (auto SC = dyn_cast<llvm::ConstantInt>(Scope)) {
auto SCID = CGF.getTargetHooks().getLLVMSyncScopeID(
- ScopeModel->map(SC->getZExtValue()), CGF.CGM.getLLVMContext());
+ CGF.CGM.getLangOpts(), ScopeModel->map(SC->getZExtValue()),
+ Order, CGF.CGM.getLLVMContext());
EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
Order, SCID);
return;
@@ -709,7 +707,9 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *Expr, Address Dest,
Builder.SetInsertPoint(B);
EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
Order,
- CGF.getTargetHooks().getLLVMSyncScopeID(ScopeModel->map(S),
+ CGF.getTargetHooks().getLLVMSyncScopeID(CGF.CGM.getLangOpts(),
+ ScopeModel->map(S),
+ Order,
CGF.getLLVMContext()));
Builder.CreateBr(ContBB);
}
@@ -1357,7 +1357,7 @@ RValue AtomicInfo::convertAtomicTempToRValue(Address addr,
// Drill into the padding structure if we have one.
if (hasPadding())
- addr = CGF.Builder.CreateStructGEP(addr, 0, CharUnits());
+ addr = CGF.Builder.CreateStructGEP(addr, 0);
// Otherwise, just convert the temporary to an r-value using the
// normal conversion routine.
@@ -1688,7 +1688,7 @@ EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue OldRVal,
UpRVal = OldRVal;
DesiredLVal = CGF.MakeAddrLValue(DesiredAddr, AtomicLVal.getType());
} else {
- // Build new lvalue for temp address
+ // Build new lvalue for temp address.
Address Ptr = Atomics.materializeRValue(OldRVal);
LValue UpdateLVal;
if (AtomicLVal.isBitField()) {
@@ -1721,7 +1721,7 @@ EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue OldRVal,
}
UpRVal = CGF.EmitLoadOfLValue(UpdateLVal, SourceLocation());
}
- // Store new value in the corresponding memory area
+ // Store new value in the corresponding memory area.
RValue NewRVal = UpdateOp(UpRVal);
if (NewRVal.isScalar()) {
CGF.EmitStoreThroughLValue(NewRVal, DesiredLVal);
@@ -1786,7 +1786,7 @@ void AtomicInfo::EmitAtomicUpdateOp(
SourceLocation(), /*AsValue=*/false);
EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, NewAtomicAddr);
auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr);
- // Try to write new value using cmpxchg operation
+ // Try to write new value using cmpxchg operation.
auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
@@ -1797,7 +1797,7 @@ static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics,
RValue UpdateRVal, Address DesiredAddr) {
LValue AtomicLVal = Atomics.getAtomicLValue();
LValue DesiredLVal;
- // Build new lvalue for temp address
+ // Build new lvalue for temp address.
if (AtomicLVal.isBitField()) {
DesiredLVal =
LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
@@ -1814,7 +1814,7 @@ static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics,
DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
}
- // Store new value in the corresponding memory area
+ // Store new value in the corresponding memory area.
assert(UpdateRVal.isScalar());
CGF.EmitStoreThroughLValue(UpdateRVal, DesiredLVal);
}
@@ -1866,7 +1866,7 @@ void AtomicInfo::EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRVal,
}
EmitAtomicUpdateValue(CGF, *this, UpdateRVal, NewAtomicAddr);
auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr);
- // Try to write new value using cmpxchg operation
+ // Try to write new value using cmpxchg operation.
auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
diff --git a/lib/CodeGen/CGBlocks.cpp b/lib/CodeGen/CGBlocks.cpp
index fa3c3ee8610c..c3ee7129d9d7 100644
--- a/lib/CodeGen/CGBlocks.cpp
+++ b/lib/CodeGen/CGBlocks.cpp
@@ -1,9 +1,8 @@
//===--- CGBlocks.cpp - Emit LLVM Code for declarations ---------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -23,7 +22,6 @@
#include "clang/AST/DeclObjC.h"
#include "clang/CodeGen/ConstantInitBuilder.h"
#include "llvm/ADT/SmallSet.h"
-#include "llvm/IR/CallSite.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Module.h"
#include "llvm/Support/ScopedPrinter.h"
@@ -276,6 +274,8 @@ static llvm::Constant *buildBlockDescriptor(CodeGenModule &CGM,
/*constant*/ true, linkage, AddrSpace);
if (linkage == llvm::GlobalValue::LinkOnceODRLinkage) {
+ if (CGM.supportsCOMDAT())
+ global->setComdat(CGM.getModule().getOrInsertComdat(descName));
global->setVisibility(llvm::GlobalValue::HiddenVisibility);
global->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
}
@@ -671,7 +671,7 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
// Sort the layout by alignment. We have to use a stable sort here
// to get reproducible results. There should probably be an
// llvm::array_pod_stable_sort.
- std::stable_sort(layout.begin(), layout.end());
+ llvm::stable_sort(layout);
// Needed for blocks layout info.
info.BlockHeaderForcedGapOffset = info.BlockSize;
@@ -838,9 +838,8 @@ static void enterBlockScope(CodeGenFunction &CGF, BlockDecl *block) {
}
// GEP down to the address.
- Address addr = CGF.Builder.CreateStructGEP(blockInfo.LocalAddress,
- capture.getIndex(),
- capture.getOffset());
+ Address addr =
+ CGF.Builder.CreateStructGEP(blockInfo.LocalAddress, capture.getIndex());
// We can use that GEP as the dominating IP.
if (!blockInfo.DominatingIP)
@@ -977,27 +976,24 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
flags |= BLOCK_IS_NOESCAPE | BLOCK_IS_GLOBAL;
}
- auto projectField =
- [&](unsigned index, CharUnits offset, const Twine &name) -> Address {
- return Builder.CreateStructGEP(blockAddr, index, offset, name);
- };
- auto storeField =
- [&](llvm::Value *value, unsigned index, CharUnits offset,
- const Twine &name) {
- Builder.CreateStore(value, projectField(index, offset, name));
- };
+ auto projectField = [&](unsigned index, const Twine &name) -> Address {
+ return Builder.CreateStructGEP(blockAddr, index, name);
+ };
+ auto storeField = [&](llvm::Value *value, unsigned index, const Twine &name) {
+ Builder.CreateStore(value, projectField(index, name));
+ };
// Initialize the block header.
{
// We assume all the header fields are densely packed.
unsigned index = 0;
CharUnits offset;
- auto addHeaderField =
- [&](llvm::Value *value, CharUnits size, const Twine &name) {
- storeField(value, index, offset, name);
- offset += size;
- index++;
- };
+ auto addHeaderField = [&](llvm::Value *value, CharUnits size,
+ const Twine &name) {
+ storeField(value, index, name);
+ offset += size;
+ index++;
+ };
if (!IsOpenCL) {
addHeaderField(isa, getPointerSize(), "block.isa");
@@ -1033,8 +1029,8 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
// First, 'this'.
if (blockDecl->capturesCXXThis()) {
- Address addr = projectField(blockInfo.CXXThisIndex, blockInfo.CXXThisOffset,
- "block.captured-this.addr");
+ Address addr =
+ projectField(blockInfo.CXXThisIndex, "block.captured-this.addr");
Builder.CreateStore(LoadCXXThis(), addr);
}
@@ -1050,8 +1046,7 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
// This will be a [[type]]*, except that a byref entry will just be
// an i8**.
- Address blockField =
- projectField(capture.getIndex(), capture.getOffset(), "block.captured");
+ Address blockField = projectField(capture.getIndex(), "block.captured");
// Compute the address of the thing we're going to move into the
// block literal.
@@ -1070,7 +1065,6 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
// This is a [[type]]*, except that a byref entry will just be an i8**.
src = Builder.CreateStructGEP(LoadBlockStruct(),
enclosingCapture.getIndex(),
- enclosingCapture.getOffset(),
"block.capture.addr");
} else {
auto I = LocalDeclMap.find(variable);
@@ -1261,52 +1255,49 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr *E,
ReturnValueSlot ReturnValue) {
const BlockPointerType *BPT =
E->getCallee()->getType()->getAs<BlockPointerType>();
-
llvm::Value *BlockPtr = EmitScalarExpr(E->getCallee());
-
- // Get a pointer to the generic block literal.
- // For OpenCL we generate generic AS void ptr to be able to reuse the same
- // block definition for blocks with captures generated as private AS local
- // variables and without captures generated as global AS program scope
- // variables.
- unsigned AddrSpace = 0;
- if (getLangOpts().OpenCL)
- AddrSpace = getContext().getTargetAddressSpace(LangAS::opencl_generic);
-
- llvm::Type *BlockLiteralTy =
- llvm::PointerType::get(CGM.getGenericBlockLiteralType(), AddrSpace);
-
- // Bitcast the callee to a block literal.
- BlockPtr =
- Builder.CreatePointerCast(BlockPtr, BlockLiteralTy, "block.literal");
-
- // Get the function pointer from the literal.
- llvm::Value *FuncPtr =
- Builder.CreateStructGEP(CGM.getGenericBlockLiteralType(), BlockPtr,
- CGM.getLangOpts().OpenCL ? 2 : 3);
-
- // Add the block literal.
+ llvm::Type *GenBlockTy = CGM.getGenericBlockLiteralType();
+ llvm::Value *Func = nullptr;
+ QualType FnType = BPT->getPointeeType();
+ ASTContext &Ctx = getContext();
CallArgList Args;
- QualType VoidPtrQualTy = getContext().VoidPtrTy;
- llvm::Type *GenericVoidPtrTy = VoidPtrTy;
if (getLangOpts().OpenCL) {
- GenericVoidPtrTy = CGM.getOpenCLRuntime().getGenericVoidPointerType();
- VoidPtrQualTy =
- getContext().getPointerType(getContext().getAddrSpaceQualType(
- getContext().VoidTy, LangAS::opencl_generic));
- }
-
- BlockPtr = Builder.CreatePointerCast(BlockPtr, GenericVoidPtrTy);
- Args.add(RValue::get(BlockPtr), VoidPtrQualTy);
-
- QualType FnType = BPT->getPointeeType();
+ // For OpenCL, BlockPtr is already casted to generic block literal.
+
+ // First argument of a block call is a generic block literal casted to
+ // generic void pointer, i.e. i8 addrspace(4)*
+ llvm::Value *BlockDescriptor = Builder.CreatePointerCast(
+ BlockPtr, CGM.getOpenCLRuntime().getGenericVoidPointerType());
+ QualType VoidPtrQualTy = Ctx.getPointerType(
+ Ctx.getAddrSpaceQualType(Ctx.VoidTy, LangAS::opencl_generic));
+ Args.add(RValue::get(BlockDescriptor), VoidPtrQualTy);
+ // And the rest of the arguments.
+ EmitCallArgs(Args, FnType->getAs<FunctionProtoType>(), E->arguments());
+
+ // We *can* call the block directly unless it is a function argument.
+ if (!isa<ParmVarDecl>(E->getCalleeDecl()))
+ Func = CGM.getOpenCLRuntime().getInvokeFunction(E->getCallee());
+ else {
+ llvm::Value *FuncPtr = Builder.CreateStructGEP(GenBlockTy, BlockPtr, 2);
+ Func = Builder.CreateAlignedLoad(FuncPtr, getPointerAlign());
+ }
+ } else {
+ // Bitcast the block literal to a generic block literal.
+ BlockPtr = Builder.CreatePointerCast(
+ BlockPtr, llvm::PointerType::get(GenBlockTy, 0), "block.literal");
+ // Get pointer to the block invoke function
+ llvm::Value *FuncPtr = Builder.CreateStructGEP(GenBlockTy, BlockPtr, 3);
- // And the rest of the arguments.
- EmitCallArgs(Args, FnType->getAs<FunctionProtoType>(), E->arguments());
+ // First argument is a block literal casted to a void pointer
+ BlockPtr = Builder.CreatePointerCast(BlockPtr, VoidPtrTy);
+ Args.add(RValue::get(BlockPtr), Ctx.VoidPtrTy);
+ // And the rest of the arguments.
+ EmitCallArgs(Args, FnType->getAs<FunctionProtoType>(), E->arguments());
- // Load the function.
- llvm::Value *Func = Builder.CreateAlignedLoad(FuncPtr, getPointerAlign());
+ // Load the function.
+ Func = Builder.CreateAlignedLoad(FuncPtr, getPointerAlign());
+ }
const FunctionType *FuncTy = FnType->castAs<FunctionType>();
const CGFunctionInfo &FnInfo =
@@ -1332,9 +1323,8 @@ Address CodeGenFunction::GetAddrOfBlockDecl(const VarDecl *variable) {
// Handle constant captures.
if (capture.isConstant()) return LocalDeclMap.find(variable)->second;
- Address addr =
- Builder.CreateStructGEP(LoadBlockStruct(), capture.getIndex(),
- capture.getOffset(), "block.capture.addr");
+ Address addr = Builder.CreateStructGEP(LoadBlockStruct(), capture.getIndex(),
+ "block.capture.addr");
if (variable->isEscapingByref()) {
// addr should be a void** right now. Load, then cast the result
@@ -1444,10 +1434,12 @@ static llvm::Constant *buildGlobalBlock(CodeGenModule &CGM,
if (CGM.getContext().getLangOpts().OpenCL)
AddrSpace = CGM.getContext().getTargetAddressSpace(LangAS::opencl_global);
- llvm::Constant *literal = fields.finishAndCreateGlobal(
+ llvm::GlobalVariable *literal = fields.finishAndCreateGlobal(
"__block_literal_global", blockInfo.BlockAlign,
/*constant*/ !IsWindows, llvm::GlobalVariable::InternalLinkage, AddrSpace);
+ literal->addAttribute("objc_arc_inert");
+
// Windows does not allow globals to be initialised to point to globals in
// different DLLs. Any such variables must run code to initialise them.
if (IsWindows) {
@@ -1617,9 +1609,8 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
// If we have a C++ 'this' reference, go ahead and force it into
// existence now.
if (blockDecl->capturesCXXThis()) {
- Address addr =
- Builder.CreateStructGEP(LoadBlockStruct(), blockInfo.CXXThisIndex,
- blockInfo.CXXThisOffset, "block.captured-this");
+ Address addr = Builder.CreateStructGEP(
+ LoadBlockStruct(), blockInfo.CXXThisIndex, "block.captured-this");
CXXThisValue = Builder.CreateLoad(addr, "this");
}
@@ -2029,6 +2020,8 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
llvm::Function *Fn =
llvm::Function::Create(LTy, llvm::GlobalValue::LinkOnceODRLinkage,
FuncName, &CGM.getModule());
+ if (CGM.supportsCOMDAT())
+ Fn->setComdat(CGM.getModule().getOrInsertComdat(FuncName));
IdentifierInfo *II = &C.Idents.get(FuncName);
@@ -2062,8 +2055,8 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
BlockFieldFlags flags = CopiedCapture.CopyFlags;
unsigned index = capture.getIndex();
- Address srcField = Builder.CreateStructGEP(src, index, capture.getOffset());
- Address dstField = Builder.CreateStructGEP(dst, index, capture.getOffset());
+ Address srcField = Builder.CreateStructGEP(src, index);
+ Address dstField = Builder.CreateStructGEP(dst, index);
switch (CopiedCapture.CopyKind) {
case BlockCaptureEntityKind::CXXRecord:
@@ -2220,6 +2213,8 @@ CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
llvm::Function *Fn =
llvm::Function::Create(LTy, llvm::GlobalValue::LinkOnceODRLinkage,
FuncName, &CGM.getModule());
+ if (CGM.supportsCOMDAT())
+ Fn->setComdat(CGM.getModule().getOrInsertComdat(FuncName));
IdentifierInfo *II = &C.Idents.get(FuncName);
@@ -2251,8 +2246,7 @@ CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
const CGBlockInfo::Capture &capture = *DestroyedCapture.Capture;
BlockFieldFlags flags = DestroyedCapture.DisposeFlags;
- Address srcField =
- Builder.CreateStructGEP(src, capture.getIndex(), capture.getOffset());
+ Address srcField = Builder.CreateStructGEP(src, capture.getIndex());
pushCaptureCleanup(DestroyedCapture.DisposeKind, srcField,
CI.getVariable()->getType(), flags,
@@ -2286,7 +2280,7 @@ public:
unsigned flags = (Flags | BLOCK_BYREF_CALLER).getBitMask();
llvm::Value *flagsVal = llvm::ConstantInt::get(CGF.Int32Ty, flags);
- llvm::Value *fn = CGF.CGM.getBlockObjectAssign();
+ llvm::FunctionCallee fn = CGF.CGM.getBlockObjectAssign();
llvm::Value *args[] = { destField.getPointer(), srcValue, flagsVal };
CGF.EmitNounwindRuntimeCall(fn, args);
@@ -2712,13 +2706,11 @@ Address CodeGenFunction::emitBlockByrefAddress(Address baseAddr,
const llvm::Twine &name) {
// Chase the forwarding address if requested.
if (followForward) {
- Address forwardingAddr =
- Builder.CreateStructGEP(baseAddr, 1, getPointerSize(), "forwarding");
+ Address forwardingAddr = Builder.CreateStructGEP(baseAddr, 1, "forwarding");
baseAddr = Address(Builder.CreateLoad(forwardingAddr), info.ByrefAlignment);
}
- return Builder.CreateStructGEP(baseAddr, info.FieldIndex,
- info.FieldOffset, name);
+ return Builder.CreateStructGEP(baseAddr, info.FieldIndex, name);
}
/// BuildByrefInfo - This routine changes a __block variable declared as T x
@@ -2836,8 +2828,7 @@ void CodeGenFunction::emitByrefStructureInit(const AutoVarEmission &emission) {
CharUnits nextHeaderOffset;
auto storeHeaderField = [&](llvm::Value *value, CharUnits fieldSize,
const Twine &name) {
- auto fieldAddr = Builder.CreateStructGEP(addr, nextHeaderIndex,
- nextHeaderOffset, name);
+ auto fieldAddr = Builder.CreateStructGEP(addr, nextHeaderIndex, name);
Builder.CreateStore(value, fieldAddr);
nextHeaderIndex++;
@@ -2933,7 +2924,7 @@ void CodeGenFunction::emitByrefStructureInit(const AutoVarEmission &emission) {
void CodeGenFunction::BuildBlockRelease(llvm::Value *V, BlockFieldFlags flags,
bool CanThrow) {
- llvm::Value *F = CGM.getBlockObjectDispose();
+ llvm::FunctionCallee F = CGM.getBlockObjectDispose();
llvm::Value *args[] = {
Builder.CreateBitCast(V, Int8PtrTy),
llvm::ConstantInt::get(Int32Ty, flags.getBitMask())
@@ -2989,7 +2980,7 @@ static void configureBlocksRuntimeObject(CodeGenModule &CGM,
CGM.setDSOLocal(GV);
}
-llvm::Constant *CodeGenModule::getBlockObjectDispose() {
+llvm::FunctionCallee CodeGenModule::getBlockObjectDispose() {
if (BlockObjectDispose)
return BlockObjectDispose;
@@ -2997,11 +2988,12 @@ llvm::Constant *CodeGenModule::getBlockObjectDispose() {
llvm::FunctionType *fty
= llvm::FunctionType::get(VoidTy, args, false);
BlockObjectDispose = CreateRuntimeFunction(fty, "_Block_object_dispose");
- configureBlocksRuntimeObject(*this, BlockObjectDispose);
+ configureBlocksRuntimeObject(
+ *this, cast<llvm::Constant>(BlockObjectDispose.getCallee()));
return BlockObjectDispose;
}
-llvm::Constant *CodeGenModule::getBlockObjectAssign() {
+llvm::FunctionCallee CodeGenModule::getBlockObjectAssign() {
if (BlockObjectAssign)
return BlockObjectAssign;
@@ -3009,7 +3001,8 @@ llvm::Constant *CodeGenModule::getBlockObjectAssign() {
llvm::FunctionType *fty
= llvm::FunctionType::get(VoidTy, args, false);
BlockObjectAssign = CreateRuntimeFunction(fty, "_Block_object_assign");
- configureBlocksRuntimeObject(*this, BlockObjectAssign);
+ configureBlocksRuntimeObject(
+ *this, cast<llvm::Constant>(BlockObjectAssign.getCallee()));
return BlockObjectAssign;
}
diff --git a/lib/CodeGen/CGBlocks.h b/lib/CodeGen/CGBlocks.h
index 3f9fc16d9b10..c4bfde666154 100644
--- a/lib/CodeGen/CGBlocks.h
+++ b/lib/CodeGen/CGBlocks.h
@@ -1,9 +1,8 @@
//===-- CGBlocks.h - state for LLVM CodeGen for blocks ----------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/CodeGen/CGBuilder.h b/lib/CodeGen/CGBuilder.h
index 654ef72060b7..68c8c641139f 100644
--- a/lib/CodeGen/CGBuilder.h
+++ b/lib/CodeGen/CGBuilder.h
@@ -1,9 +1,8 @@
//===-- CGBuilder.h - Choose IRBuilder implementation ----------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -168,19 +167,25 @@ public:
return Address(Ptr, Addr.getAlignment());
}
+ /// Given
+ /// %addr = {T1, T2...}* ...
+ /// produce
+ /// %name = getelementptr inbounds %addr, i32 0, i32 index
+ ///
+ /// This API assumes that drilling into a struct like this is always an
+ /// inbounds operation.
using CGBuilderBaseTy::CreateStructGEP;
- Address CreateStructGEP(Address Addr, unsigned Index, CharUnits Offset,
+ Address CreateStructGEP(Address Addr, unsigned Index,
const llvm::Twine &Name = "") {
+ llvm::StructType *ElTy = cast<llvm::StructType>(Addr.getElementType());
+ const llvm::DataLayout &DL = BB->getParent()->getParent()->getDataLayout();
+ const llvm::StructLayout *Layout = DL.getStructLayout(ElTy);
+ auto Offset = CharUnits::fromQuantity(Layout->getElementOffset(Index));
+
return Address(CreateStructGEP(Addr.getElementType(),
Addr.getPointer(), Index, Name),
Addr.getAlignment().alignmentAtOffset(Offset));
}
- Address CreateStructGEP(Address Addr, unsigned Index,
- const llvm::StructLayout *Layout,
- const llvm::Twine &Name = "") {
- auto Offset = CharUnits::fromQuantity(Layout->getElementOffset(Index));
- return CreateStructGEP(Addr, Index, Offset, Name);
- }
/// Given
/// %addr = [n x T]* ...
@@ -190,15 +195,17 @@ public:
///
/// This API assumes that drilling into an array like this is always
/// an inbounds operation.
- ///
- /// \param EltSize - the size of the type T in bytes
- Address CreateConstArrayGEP(Address Addr, uint64_t Index, CharUnits EltSize,
+ Address CreateConstArrayGEP(Address Addr, uint64_t Index,
const llvm::Twine &Name = "") {
- return Address(CreateInBoundsGEP(Addr.getPointer(),
- {getSize(CharUnits::Zero()),
- getSize(Index)},
- Name),
- Addr.getAlignment().alignmentAtOffset(Index * EltSize));
+ llvm::ArrayType *ElTy = cast<llvm::ArrayType>(Addr.getElementType());
+ const llvm::DataLayout &DL = BB->getParent()->getParent()->getDataLayout();
+ CharUnits EltSize =
+ CharUnits::fromQuantity(DL.getTypeAllocSize(ElTy->getElementType()));
+
+ return Address(
+ CreateInBoundsGEP(Addr.getPointer(),
+ {getSize(CharUnits::Zero()), getSize(Index)}, Name),
+ Addr.getAlignment().alignmentAtOffset(Index * EltSize));
}
/// Given
@@ -206,11 +213,12 @@ public:
/// produce
/// %name = getelementptr inbounds %addr, i64 index
/// where i64 is actually the target word size.
- ///
- /// \param EltSize - the size of the type T in bytes
Address CreateConstInBoundsGEP(Address Addr, uint64_t Index,
- CharUnits EltSize,
const llvm::Twine &Name = "") {
+ llvm::Type *ElTy = Addr.getElementType();
+ const llvm::DataLayout &DL = BB->getParent()->getParent()->getDataLayout();
+ CharUnits EltSize = CharUnits::fromQuantity(DL.getTypeAllocSize(ElTy));
+
return Address(CreateInBoundsGEP(Addr.getElementType(), Addr.getPointer(),
getSize(Index), Name),
Addr.getAlignment().alignmentAtOffset(Index * EltSize));
@@ -221,10 +229,12 @@ public:
/// produce
/// %name = getelementptr inbounds %addr, i64 index
/// where i64 is actually the target word size.
- ///
- /// \param EltSize - the size of the type T in bytes
- Address CreateConstGEP(Address Addr, uint64_t Index, CharUnits EltSize,
+ Address CreateConstGEP(Address Addr, uint64_t Index,
const llvm::Twine &Name = "") {
+ const llvm::DataLayout &DL = BB->getParent()->getParent()->getDataLayout();
+ CharUnits EltSize =
+ CharUnits::fromQuantity(DL.getTypeAllocSize(Addr.getElementType()));
+
return Address(CreateGEP(Addr.getElementType(), Addr.getPointer(),
getSize(Index), Name),
Addr.getAlignment().alignmentAtOffset(Index * EltSize));
@@ -245,31 +255,21 @@ public:
}
using CGBuilderBaseTy::CreateConstInBoundsGEP2_32;
- Address CreateConstInBoundsGEP2_32(Address Addr, unsigned Idx0,
- unsigned Idx1, const llvm::DataLayout &DL,
- const llvm::Twine &Name = "") {
+ Address CreateConstInBoundsGEP2_32(Address Addr, unsigned Idx0, unsigned Idx1,
+ const llvm::Twine &Name = "") {
+ const llvm::DataLayout &DL = BB->getParent()->getParent()->getDataLayout();
+
auto *GEP = cast<llvm::GetElementPtrInst>(CreateConstInBoundsGEP2_32(
Addr.getElementType(), Addr.getPointer(), Idx0, Idx1, Name));
llvm::APInt Offset(
DL.getIndexSizeInBits(Addr.getType()->getPointerAddressSpace()), 0,
- /*IsSigned=*/true);
+ /*isSigned=*/true);
if (!GEP->accumulateConstantOffset(DL, Offset))
llvm_unreachable("offset of GEP with constants is always computable");
return Address(GEP, Addr.getAlignment().alignmentAtOffset(
CharUnits::fromQuantity(Offset.getSExtValue())));
}
- llvm::Value *CreateConstInBoundsByteGEP(llvm::Value *Ptr, CharUnits Offset,
- const llvm::Twine &Name = "") {
- assert(Ptr->getType()->getPointerElementType() == TypeCache.Int8Ty);
- return CreateInBoundsGEP(Ptr, getSize(Offset), Name);
- }
- llvm::Value *CreateConstByteGEP(llvm::Value *Ptr, CharUnits Offset,
- const llvm::Twine &Name = "") {
- assert(Ptr->getType()->getPointerElementType() == TypeCache.Int8Ty);
- return CreateGEP(Ptr, getSize(Offset), Name);
- }
-
using CGBuilderBaseTy::CreateMemCpy;
llvm::CallInst *CreateMemCpy(Address Dest, Address Src, llvm::Value *Size,
bool IsVolatile = false) {
@@ -298,6 +298,21 @@ public:
return CreateMemSet(Dest.getPointer(), Value, Size,
Dest.getAlignment().getQuantity(), IsVolatile);
}
+
+ using CGBuilderBaseTy::CreatePreserveStructAccessIndex;
+ Address CreatePreserveStructAccessIndex(Address Addr,
+ unsigned Index,
+ unsigned FieldIndex,
+ llvm::MDNode *DbgInfo) {
+ llvm::StructType *ElTy = cast<llvm::StructType>(Addr.getElementType());
+ const llvm::DataLayout &DL = BB->getParent()->getParent()->getDataLayout();
+ const llvm::StructLayout *Layout = DL.getStructLayout(ElTy);
+ auto Offset = CharUnits::fromQuantity(Layout->getElementOffset(Index));
+
+ return Address(CreatePreserveStructAccessIndex(Addr.getPointer(),
+ Index, FieldIndex, DbgInfo),
+ Addr.getAlignment().alignmentAtOffset(Offset));
+ }
};
} // end namespace CodeGen
diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp
index a718f2f19aa6..a300bab49f9c 100644
--- a/lib/CodeGen/CGBuiltin.cpp
+++ b/lib/CodeGen/CGBuiltin.cpp
@@ -1,9 +1,8 @@
//===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -18,6 +17,7 @@
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "ConstantEmitter.h"
+#include "PatternInit.h"
#include "TargetInfo.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
@@ -27,7 +27,6 @@
#include "clang/CodeGen/CGFunctionInfo.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/StringExtras.h"
-#include "llvm/IR/CallSite.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/Intrinsics.h"
@@ -46,6 +45,25 @@ int64_t clamp(int64_t Value, int64_t Low, int64_t High) {
return std::min(High, std::max(Low, Value));
}
+static void initializeAlloca(CodeGenFunction &CGF, AllocaInst *AI, Value *Size, unsigned AlignmentInBytes) {
+ ConstantInt *Byte;
+ switch (CGF.getLangOpts().getTrivialAutoVarInit()) {
+ case LangOptions::TrivialAutoVarInitKind::Uninitialized:
+ // Nothing to initialize.
+ return;
+ case LangOptions::TrivialAutoVarInitKind::Zero:
+ Byte = CGF.Builder.getInt8(0x00);
+ break;
+ case LangOptions::TrivialAutoVarInitKind::Pattern: {
+ llvm::Type *Int8 = llvm::IntegerType::getInt8Ty(CGF.CGM.getLLVMContext());
+ Byte = llvm::dyn_cast<llvm::ConstantInt>(
+ initializationPatternFor(CGF.CGM, Int8));
+ break;
+ }
+ }
+ CGF.Builder.CreateMemSet(AI, Byte, Size, AlignmentInBytes);
+}
+
/// getBuiltinLibFunction - Given a builtin id for a function like
/// "__builtin_fabsf", return a Function* for "fabsf".
llvm::Constant *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD,
@@ -300,6 +318,34 @@ static Value *EmitAtomicDecrementValue(CodeGenFunction &CGF, const CallExpr *E,
return CGF.Builder.CreateSub(Result, ConstantInt::get(IntTy, 1));
}
+// Build a plain volatile load.
+static Value *EmitISOVolatileLoad(CodeGenFunction &CGF, const CallExpr *E) {
+ Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
+ QualType ElTy = E->getArg(0)->getType()->getPointeeType();
+ CharUnits LoadSize = CGF.getContext().getTypeSizeInChars(ElTy);
+ llvm::Type *ITy =
+ llvm::IntegerType::get(CGF.getLLVMContext(), LoadSize.getQuantity() * 8);
+ Ptr = CGF.Builder.CreateBitCast(Ptr, ITy->getPointerTo());
+ llvm::LoadInst *Load = CGF.Builder.CreateAlignedLoad(Ptr, LoadSize);
+ Load->setVolatile(true);
+ return Load;
+}
+
+// Build a plain volatile store.
+static Value *EmitISOVolatileStore(CodeGenFunction &CGF, const CallExpr *E) {
+ Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
+ Value *Value = CGF.EmitScalarExpr(E->getArg(1));
+ QualType ElTy = E->getArg(0)->getType()->getPointeeType();
+ CharUnits StoreSize = CGF.getContext().getTypeSizeInChars(ElTy);
+ llvm::Type *ITy =
+ llvm::IntegerType::get(CGF.getLLVMContext(), StoreSize.getQuantity() * 8);
+ Ptr = CGF.Builder.CreateBitCast(Ptr, ITy->getPointerTo());
+ llvm::StoreInst *Store =
+ CGF.Builder.CreateAlignedStore(Value, Ptr, StoreSize);
+ Store->setVolatile(true);
+ return Store;
+}
+
// Emit a simple mangled intrinsic that has 1 argument and a return type
// matching the argument type.
static Value *emitUnaryBuiltin(CodeGenFunction &CGF,
@@ -307,7 +353,7 @@ static Value *emitUnaryBuiltin(CodeGenFunction &CGF,
unsigned IntrinsicID) {
llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
- Value *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
+ Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
return CGF.Builder.CreateCall(F, Src0);
}
@@ -318,7 +364,7 @@ static Value *emitBinaryBuiltin(CodeGenFunction &CGF,
llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
- Value *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
+ Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
return CGF.Builder.CreateCall(F, { Src0, Src1 });
}
@@ -330,7 +376,7 @@ static Value *emitTernaryBuiltin(CodeGenFunction &CGF,
llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2));
- Value *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
+ Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
return CGF.Builder.CreateCall(F, { Src0, Src1, Src2 });
}
@@ -341,13 +387,25 @@ static Value *emitFPIntBuiltin(CodeGenFunction &CGF,
llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
- Value *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
+ Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
return CGF.Builder.CreateCall(F, {Src0, Src1});
}
+// Emit an intrinsic that has overloaded integer result and fp operand.
+static Value *emitFPToIntRoundBuiltin(CodeGenFunction &CGF,
+ const CallExpr *E,
+ unsigned IntrinsicID) {
+ llvm::Type *ResultType = CGF.ConvertType(E->getType());
+ llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
+
+ Function *F = CGF.CGM.getIntrinsic(IntrinsicID,
+ {ResultType, Src0->getType()});
+ return CGF.Builder.CreateCall(F, Src0);
+}
+
/// EmitFAbs - Emit a call to @llvm.fabs().
static Value *EmitFAbs(CodeGenFunction &CGF, Value *V) {
- Value *F = CGF.CGM.getIntrinsic(Intrinsic::fabs, V->getType());
+ Function *F = CGF.CGM.getIntrinsic(Intrinsic::fabs, V->getType());
llvm::CallInst *Call = CGF.Builder.CreateCall(F, V);
Call->setDoesNotAccessMemory();
return Call;
@@ -408,7 +466,7 @@ static llvm::Value *EmitOverflowIntrinsic(CodeGenFunction &CGF,
"Arguments must be the same type. (Did you forget to make sure both "
"arguments have the same integer width?)");
- llvm::Value *Callee = CGF.CGM.getIntrinsic(IntrinsicID, X->getType());
+ Function *Callee = CGF.CGM.getIntrinsic(IntrinsicID, X->getType());
llvm::Value *Tmp = CGF.Builder.CreateCall(Callee, {X, Y});
Carry = CGF.Builder.CreateExtractValue(Tmp, 1);
return CGF.Builder.CreateExtractValue(Tmp, 0);
@@ -419,7 +477,7 @@ static Value *emitRangedBuiltin(CodeGenFunction &CGF,
int low, int high) {
llvm::MDBuilder MDHelper(CGF.getLLVMContext());
llvm::MDNode *RNode = MDHelper.createRange(APInt(32, low), APInt(32, high));
- Value *F = CGF.CGM.getIntrinsic(IntrinsicID, {});
+ Function *F = CGF.CGM.getIntrinsic(IntrinsicID, {});
llvm::Instruction *Call = CGF.Builder.CreateCall(F);
Call->setMetadata(llvm::LLVMContext::MD_range, RNode);
return Call;
@@ -496,10 +554,11 @@ getDefaultBuiltinObjectSizeResult(unsigned Type, llvm::IntegerType *ResType) {
llvm::Value *
CodeGenFunction::evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type,
llvm::IntegerType *ResType,
- llvm::Value *EmittedE) {
+ llvm::Value *EmittedE,
+ bool IsDynamic) {
uint64_t ObjectSize;
if (!E->tryEvaluateObjectSize(ObjectSize, getContext(), Type))
- return emitBuiltinObjectSize(E, Type, ResType, EmittedE);
+ return emitBuiltinObjectSize(E, Type, ResType, EmittedE, IsDynamic);
return ConstantInt::get(ResType, ObjectSize, /*isSigned=*/true);
}
@@ -515,7 +574,7 @@ CodeGenFunction::evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type,
llvm::Value *
CodeGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type,
llvm::IntegerType *ResType,
- llvm::Value *EmittedE) {
+ llvm::Value *EmittedE, bool IsDynamic) {
// We need to reference an argument if the pointer is a parameter with the
// pass_object_size attribute.
if (auto *D = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) {
@@ -530,7 +589,7 @@ CodeGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type,
auto DIter = LocalDeclMap.find(D);
assert(DIter != LocalDeclMap.end());
- return EmitLoadOfScalar(DIter->second, /*volatile=*/false,
+ return EmitLoadOfScalar(DIter->second, /*Volatile=*/false,
getContext().getSizeType(), E->getBeginLoc());
}
}
@@ -545,13 +604,15 @@ CodeGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type,
assert(Ptr->getType()->isPointerTy() &&
"Non-pointer passed to __builtin_object_size?");
- Value *F = CGM.getIntrinsic(Intrinsic::objectsize, {ResType, Ptr->getType()});
+ Function *F =
+ CGM.getIntrinsic(Intrinsic::objectsize, {ResType, Ptr->getType()});
// LLVM only supports 0 and 2, make sure that we pass along that as a boolean.
Value *Min = Builder.getInt1((Type & 2) != 0);
// For GCC compatibility, __builtin_object_size treat NULL as unknown size.
Value *NullIsUnknown = Builder.getTrue();
- return Builder.CreateCall(F, {Ptr, Min, NullIsUnknown});
+ Value *Dynamic = Builder.getInt1(IsDynamic);
+ return Builder.CreateCall(F, {Ptr, Min, NullIsUnknown, Dynamic});
}
namespace {
@@ -658,7 +719,7 @@ static llvm::Value *EmitX86BitTestIntrinsic(CodeGenFunction &CGF,
llvm::FunctionType::get(CGF.Int8Ty, {IntPtrType, IntType}, false);
llvm::InlineAsm *IA =
- llvm::InlineAsm::get(FTy, Asm, Constraints, /*SideEffects=*/true);
+ llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
return CGF.Builder.CreateCall(IA, {BitBase, BitPos});
}
@@ -793,16 +854,16 @@ static RValue EmitMSVCRTSetJmp(CodeGenFunction &CGF, MSVCSetJmpKind SJKind,
llvm::AttributeList ReturnsTwiceAttr = llvm::AttributeList::get(
CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex,
llvm::Attribute::ReturnsTwice);
- llvm::Constant *SetJmpFn = CGF.CGM.CreateRuntimeFunction(
+ llvm::FunctionCallee SetJmpFn = CGF.CGM.CreateRuntimeFunction(
llvm::FunctionType::get(CGF.IntTy, ArgTypes, IsVarArg), Name,
ReturnsTwiceAttr, /*Local=*/true);
llvm::Value *Buf = CGF.Builder.CreateBitOrPointerCast(
CGF.EmitScalarExpr(E->getArg(0)), CGF.Int8PtrTy);
llvm::Value *Args[] = {Buf, Arg1};
- llvm::CallSite CS = CGF.EmitRuntimeCallOrInvoke(SetJmpFn, Args);
- CS.setAttributes(ReturnsTwiceAttr);
- return RValue::get(CS.getInstruction());
+ llvm::CallBase *CB = CGF.EmitRuntimeCallOrInvoke(SetJmpFn, Args);
+ CB->setAttributes(ReturnsTwiceAttr);
+ return RValue::get(CB);
}
// Many of MSVC builtins are on x64, ARM and AArch64; to avoid repeating code,
@@ -876,7 +937,7 @@ Value *CodeGenFunction::EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID,
Address IndexAddress = EmitPointerWithAlignment(E->getArg(0));
if (BuiltinID == MSVCIntrin::_BitScanForward) {
- Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
+ Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false);
Builder.CreateStore(ZeroCount, IndexAddress, false);
@@ -884,7 +945,7 @@ Value *CodeGenFunction::EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID,
unsigned ArgWidth = cast<llvm::IntegerType>(ArgType)->getBitWidth();
Value *ArgTypeLastIndex = llvm::ConstantInt::get(IndexType, ArgWidth - 1);
- Value *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
+ Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false);
Value *Index = Builder.CreateNSWSub(ArgTypeLastIndex, ZeroCount);
@@ -996,16 +1057,19 @@ Value *CodeGenFunction::EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID,
Asm = "udf #251";
Constraints = "{r0}";
break;
+ case llvm::Triple::aarch64:
+ Asm = "brk #0xF003";
+ Constraints = "{w0}";
}
llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, {Int32Ty}, false);
llvm::InlineAsm *IA =
- llvm::InlineAsm::get(FTy, Asm, Constraints, /*SideEffects=*/true);
+ llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
llvm::AttributeList NoReturnAttr = llvm::AttributeList::get(
getLLVMContext(), llvm::AttributeList::FunctionIndex,
llvm::Attribute::NoReturn);
- CallSite CS = Builder.CreateCall(IA, EmitScalarExpr(E->getArg(0)));
- CS.setAttributes(NoReturnAttr);
- return CS.getInstruction();
+ llvm::CallInst *CI = Builder.CreateCall(IA, EmitScalarExpr(E->getArg(0)));
+ CI->setAttributes(NoReturnAttr);
+ return CI;
}
}
llvm_unreachable("Incorrect MSVC intrinsic!");
@@ -1070,9 +1134,10 @@ llvm::Function *CodeGenFunction::generateBuiltinOSLogHelperFunction(
return F;
llvm::SmallVector<QualType, 4> ArgTys;
- llvm::SmallVector<ImplicitParamDecl, 4> Params;
- Params.emplace_back(Ctx, nullptr, SourceLocation(), &Ctx.Idents.get("buffer"),
- Ctx.VoidPtrTy, ImplicitParamDecl::Other);
+ FunctionArgList Args;
+ Args.push_back(ImplicitParamDecl::Create(
+ Ctx, nullptr, SourceLocation(), &Ctx.Idents.get("buffer"), Ctx.VoidPtrTy,
+ ImplicitParamDecl::Other));
ArgTys.emplace_back(Ctx.VoidPtrTy);
for (unsigned int I = 0, E = Layout.Items.size(); I < E; ++I) {
@@ -1081,17 +1146,13 @@ llvm::Function *CodeGenFunction::generateBuiltinOSLogHelperFunction(
continue;
QualType ArgTy = getOSLogArgType(Ctx, Size);
- Params.emplace_back(
+ Args.push_back(ImplicitParamDecl::Create(
Ctx, nullptr, SourceLocation(),
&Ctx.Idents.get(std::string("arg") + llvm::to_string(I)), ArgTy,
- ImplicitParamDecl::Other);
+ ImplicitParamDecl::Other));
ArgTys.emplace_back(ArgTy);
}
- FunctionArgList Args;
- for (auto &P : Params)
- Args.push_back(&P);
-
QualType ReturnTy = Ctx.VoidTy;
QualType FuncionTy = Ctx.getFunctionType(ReturnTy, ArgTys, {});
@@ -1106,6 +1167,7 @@ llvm::Function *CodeGenFunction::generateBuiltinOSLogHelperFunction(
Fn->setVisibility(llvm::GlobalValue::HiddenVisibility);
CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Fn);
CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Fn);
+ Fn->setDoesNotThrow();
// Attach 'noinline' at -Oz.
if (CGM.getCodeGenOpts().OptimizeSize == 2)
@@ -1123,7 +1185,7 @@ llvm::Function *CodeGenFunction::generateBuiltinOSLogHelperFunction(
auto AL = ApplyDebugLocation::CreateArtificial(*this);
CharUnits Offset;
- Address BufAddr(Builder.CreateLoad(GetAddrOfLocalVar(&Params[0]), "buf"),
+ Address BufAddr(Builder.CreateLoad(GetAddrOfLocalVar(Args[0]), "buf"),
BufferAlignment);
Builder.CreateStore(Builder.getInt8(Layout.getSummaryByte()),
Builder.CreateConstByteGEP(BufAddr, Offset++, "summary"));
@@ -1143,7 +1205,7 @@ llvm::Function *CodeGenFunction::generateBuiltinOSLogHelperFunction(
if (!Size.getQuantity())
continue;
- Address Arg = GetAddrOfLocalVar(&Params[I]);
+ Address Arg = GetAddrOfLocalVar(Args[I]);
Address Addr = Builder.CreateConstByteGEP(BufAddr, Offset, "argData");
Addr = Builder.CreateBitCast(Addr, Arg.getPointer()->getType(),
"argDataCast");
@@ -1330,13 +1392,11 @@ EmitCheckedMixedSignMultiply(CodeGenFunction &CGF, const clang::Expr *Op1,
}
static llvm::Value *dumpRecord(CodeGenFunction &CGF, QualType RType,
- Value *&RecordPtr, CharUnits Align, Value *Func,
- int Lvl) {
+ Value *&RecordPtr, CharUnits Align,
+ llvm::FunctionCallee Func, int Lvl) {
const auto *RT = RType->getAs<RecordType>();
ASTContext &Context = CGF.getContext();
RecordDecl *RD = RT->getDecl()->getDefinition();
- ASTContext &Ctx = RD->getASTContext();
- const ASTRecordLayout &RL = Ctx.getASTRecordLayout(RD);
std::string Pad = std::string(Lvl * 4, ' ');
Value *GString =
@@ -1366,9 +1426,6 @@ static llvm::Value *dumpRecord(CodeGenFunction &CGF, QualType RType,
}
for (const auto *FD : RD->fields()) {
- uint64_t Off = RL.getFieldOffset(FD->getFieldIndex());
- Off = Ctx.toCharUnitsFromBits(Off).getQuantity();
-
Value *FieldPtr = RecordPtr;
if (RD->isUnion())
FieldPtr = CGF.Builder.CreatePointerCast(
@@ -1466,7 +1523,7 @@ RValue CodeGenFunction::emitRotate(const CallExpr *E, bool IsRotateRight) {
// Rotate is a special case of LLVM funnel shift - 1st 2 args are the same.
unsigned IID = IsRotateRight ? Intrinsic::fshr : Intrinsic::fshl;
- Value *F = CGM.getIntrinsic(IID, Ty);
+ Function *F = CGM.getIntrinsic(IID, Ty);
return RValue::get(Builder.CreateCall(F, { Src, Src, ShiftAmt }));
}
@@ -1668,6 +1725,38 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_truncl:
return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::trunc));
+ case Builtin::BIlround:
+ case Builtin::BIlroundf:
+ case Builtin::BIlroundl:
+ case Builtin::BI__builtin_lround:
+ case Builtin::BI__builtin_lroundf:
+ case Builtin::BI__builtin_lroundl:
+ return RValue::get(emitFPToIntRoundBuiltin(*this, E, Intrinsic::lround));
+
+ case Builtin::BIllround:
+ case Builtin::BIllroundf:
+ case Builtin::BIllroundl:
+ case Builtin::BI__builtin_llround:
+ case Builtin::BI__builtin_llroundf:
+ case Builtin::BI__builtin_llroundl:
+ return RValue::get(emitFPToIntRoundBuiltin(*this, E, Intrinsic::llround));
+
+ case Builtin::BIlrint:
+ case Builtin::BIlrintf:
+ case Builtin::BIlrintl:
+ case Builtin::BI__builtin_lrint:
+ case Builtin::BI__builtin_lrintf:
+ case Builtin::BI__builtin_lrintl:
+ return RValue::get(emitFPToIntRoundBuiltin(*this, E, Intrinsic::lrint));
+
+ case Builtin::BIllrint:
+ case Builtin::BIllrintf:
+ case Builtin::BIllrintl:
+ case Builtin::BI__builtin_llrint:
+ case Builtin::BI__builtin_llrintf:
+ case Builtin::BI__builtin_llrintl:
+ return RValue::get(emitFPToIntRoundBuiltin(*this, E, Intrinsic::llrint));
+
default:
break;
}
@@ -1735,6 +1824,10 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
}
case Builtin::BI__builtin_dump_struct: {
+ llvm::Type *LLVMIntTy = getTypes().ConvertType(getContext().IntTy);
+ llvm::FunctionType *LLVMFuncType = llvm::FunctionType::get(
+ LLVMIntTy, {llvm::Type::getInt8PtrTy(getLLVMContext())}, true);
+
Value *Func = EmitScalarExpr(E->getArg(1)->IgnoreImpCasts());
CharUnits Arg0Align = EmitPointerWithAlignment(E->getArg(0)).getAlignment();
@@ -1742,7 +1835,29 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
QualType Arg0Type = Arg0->getType()->getPointeeType();
Value *RecordPtr = EmitScalarExpr(Arg0);
- Value *Res = dumpRecord(*this, Arg0Type, RecordPtr, Arg0Align, Func, 0);
+ Value *Res = dumpRecord(*this, Arg0Type, RecordPtr, Arg0Align,
+ {LLVMFuncType, Func}, 0);
+ return RValue::get(Res);
+ }
+
+ case Builtin::BI__builtin_preserve_access_index: {
+ // Only enabled preserved access index region when debuginfo
+ // is available as debuginfo is needed to preserve user-level
+ // access pattern.
+ if (!getDebugInfo()) {
+ CGM.Error(E->getExprLoc(), "using builtin_preserve_access_index() without -g");
+ return RValue::get(EmitScalarExpr(E->getArg(0)));
+ }
+
+ // Nested builtin_preserve_access_index() not supported
+ if (IsInPreservedAIRegion) {
+ CGM.Error(E->getExprLoc(), "nested builtin_preserve_access_index() not supported");
+ return RValue::get(EmitScalarExpr(E->getArg(0)));
+ }
+
+ IsInPreservedAIRegion = true;
+ Value *Res = EmitScalarExpr(E->getArg(0));
+ IsInPreservedAIRegion = false;
return RValue::get(Res);
}
@@ -1763,7 +1878,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Value *ArgValue = EmitScalarExpr(E->getArg(0));
llvm::Type *ArgType = ArgValue->getType();
- Value *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
+ Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
llvm::Type *ResultType = ConvertType(E->getType());
Value *Zero = llvm::Constant::getNullValue(ArgType);
@@ -1783,7 +1898,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Value *ArgValue = EmitCheckedArgForBuiltin(E->getArg(0), BCK_CTZPassedZero);
llvm::Type *ArgType = ArgValue->getType();
- Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
+ Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
llvm::Type *ResultType = ConvertType(E->getType());
Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef());
@@ -1800,7 +1915,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Value *ArgValue = EmitCheckedArgForBuiltin(E->getArg(0), BCK_CLZPassedZero);
llvm::Type *ArgType = ArgValue->getType();
- Value *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
+ Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
llvm::Type *ResultType = ConvertType(E->getType());
Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef());
@@ -1817,7 +1932,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Value *ArgValue = EmitScalarExpr(E->getArg(0));
llvm::Type *ArgType = ArgValue->getType();
- Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
+ Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
llvm::Type *ResultType = ConvertType(E->getType());
Value *Tmp =
@@ -1838,7 +1953,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Value *ArgValue = EmitScalarExpr(E->getArg(0));
llvm::Type *ArgType = ArgValue->getType();
- Value *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
+ Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
llvm::Type *ResultType = ConvertType(E->getType());
Value *Tmp = Builder.CreateCall(F, ArgValue);
@@ -1854,7 +1969,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Value *ArgValue = EmitScalarExpr(E->getArg(0));
llvm::Type *ArgType = ArgValue->getType();
- Value *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
+ Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
llvm::Type *ResultType = ConvertType(E->getType());
Value *Result = Builder.CreateCall(F, {ArgValue, Builder.getFalse()});
@@ -1872,7 +1987,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Value *ArgValue = EmitScalarExpr(E->getArg(0));
llvm::Type *ArgType = ArgValue->getType();
- Value *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
+ Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
llvm::Type *ResultType = ConvertType(E->getType());
Value *Result = Builder.CreateCall(F, ArgValue);
@@ -1898,7 +2013,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
if (CGM.getCodeGenOpts().OptimizationLevel == 0)
return RValue::get(ArgValue);
- Value *FnExpect = CGM.getIntrinsic(Intrinsic::expect, ArgType);
+ Function *FnExpect = CGM.getIntrinsic(Intrinsic::expect, ArgType);
Value *Result =
Builder.CreateCall(FnExpect, {ArgValue, ExpectedValue}, "expval");
return RValue::get(Result);
@@ -1913,7 +2028,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
ConstantInt *AlignmentCI = cast<ConstantInt>(AlignmentValue);
unsigned Alignment = (unsigned)AlignmentCI->getZExtValue();
- EmitAlignmentAssumption(PtrValue, Ptr, /*The expr loc is sufficient.*/ SourceLocation(),
+ EmitAlignmentAssumption(PtrValue, Ptr,
+ /*The expr loc is sufficient.*/ SourceLocation(),
Alignment, OffsetValue);
return RValue::get(PtrValue);
}
@@ -1923,7 +2039,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
return RValue::get(nullptr);
Value *ArgValue = EmitScalarExpr(E->getArg(0));
- Value *FnAssume = CGM.getIntrinsic(Intrinsic::assume);
+ Function *FnAssume = CGM.getIntrinsic(Intrinsic::assume);
return RValue::get(Builder.CreateCall(FnAssume, ArgValue));
}
case Builtin::BI__builtin_bswap16:
@@ -1968,17 +2084,34 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
const Expr *Arg = E->getArg(0);
QualType ArgType = Arg->getType();
- if (!hasScalarEvaluationKind(ArgType) || ArgType->isFunctionType())
- // We can only reason about scalar types.
+ // FIXME: The allowance for Obj-C pointers and block pointers is historical
+ // and likely a mistake.
+ if (!ArgType->isIntegralOrEnumerationType() && !ArgType->isFloatingType() &&
+ !ArgType->isObjCObjectPointerType() && !ArgType->isBlockPointerType())
+ // Per the GCC documentation, only numeric constants are recognized after
+ // inlining.
+ return RValue::get(ConstantInt::get(ResultType, 0));
+
+ if (Arg->HasSideEffects(getContext()))
+ // The argument is unevaluated, so be conservative if it might have
+ // side-effects.
return RValue::get(ConstantInt::get(ResultType, 0));
Value *ArgValue = EmitScalarExpr(Arg);
- Value *F = CGM.getIntrinsic(Intrinsic::is_constant, ConvertType(ArgType));
+ if (ArgType->isObjCObjectPointerType()) {
+ // Convert Objective-C objects to id because we cannot distinguish between
+ // LLVM types for Obj-C classes as they are opaque.
+ ArgType = CGM.getContext().getObjCIdType();
+ ArgValue = Builder.CreateBitCast(ArgValue, ConvertType(ArgType));
+ }
+ Function *F =
+ CGM.getIntrinsic(Intrinsic::is_constant, ConvertType(ArgType));
Value *Result = Builder.CreateCall(F, ArgValue);
if (Result->getType() != ResultType)
Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/false);
return RValue::get(Result);
}
+ case Builtin::BI__builtin_dynamic_object_size:
case Builtin::BI__builtin_object_size: {
unsigned Type =
E->getArg(1)->EvaluateKnownConstInt(getContext()).getZExtValue();
@@ -1986,8 +2119,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
// We pass this builtin onto the optimizer so that it can figure out the
// object size in more complex cases.
+ bool IsDynamic = BuiltinID == Builtin::BI__builtin_dynamic_object_size;
return RValue::get(emitBuiltinObjectSize(E->getArg(0), Type, ResType,
- /*EmittedE=*/nullptr));
+ /*EmittedE=*/nullptr, IsDynamic));
}
case Builtin::BI__builtin_prefetch: {
Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0));
@@ -1997,17 +2131,17 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) :
llvm::ConstantInt::get(Int32Ty, 3);
Value *Data = llvm::ConstantInt::get(Int32Ty, 1);
- Value *F = CGM.getIntrinsic(Intrinsic::prefetch);
+ Function *F = CGM.getIntrinsic(Intrinsic::prefetch);
return RValue::get(Builder.CreateCall(F, {Address, RW, Locality, Data}));
}
case Builtin::BI__builtin_readcyclecounter: {
- Value *F = CGM.getIntrinsic(Intrinsic::readcyclecounter);
+ Function *F = CGM.getIntrinsic(Intrinsic::readcyclecounter);
return RValue::get(Builder.CreateCall(F));
}
case Builtin::BI__builtin___clear_cache: {
Value *Begin = EmitScalarExpr(E->getArg(0));
Value *End = EmitScalarExpr(E->getArg(1));
- Value *F = CGM.getIntrinsic(Intrinsic::clear_cache);
+ Function *F = CGM.getIntrinsic(Intrinsic::clear_cache);
return RValue::get(Builder.CreateCall(F, {Begin, End}));
}
case Builtin::BI__builtin_trap:
@@ -2029,7 +2163,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Value *Base = EmitScalarExpr(E->getArg(0));
Value *Exponent = EmitScalarExpr(E->getArg(1));
llvm::Type *ArgType = Base->getType();
- Value *F = CGM.getIntrinsic(Intrinsic::powi, ArgType);
+ Function *F = CGM.getIntrinsic(Intrinsic::powi, ArgType);
return RValue::get(Builder.CreateCall(F, {Base, Exponent}));
}
@@ -2130,6 +2264,17 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
}
+ case Builtin::BI__builtin_flt_rounds: {
+ Function *F = CGM.getIntrinsic(Intrinsic::flt_rounds);
+
+ llvm::Type *ResultType = ConvertType(E->getType());
+ Value *Result = Builder.CreateCall(F);
+ if (Result->getType() != ResultType)
+ Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
+ "cast");
+ return RValue::get(Result);
+ }
+
case Builtin::BI__builtin_fpclassify: {
Value *V = EmitScalarExpr(E->getArg(5));
llvm::Type *Ty = ConvertType(E->getArg(5)->getType());
@@ -2200,6 +2345,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
.getQuantity();
AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size);
AI->setAlignment(SuitableAlignmentInBytes);
+ initializeAlloca(*this, AI, Size, SuitableAlignmentInBytes);
return RValue::get(AI);
}
@@ -2212,6 +2358,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
CGM.getContext().toCharUnitsFromBits(AlignmentInBits).getQuantity();
AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size);
AI->setAlignment(AlignmentInBytes);
+ initializeAlloca(*this, AI, Size, AlignmentInBytes);
return RValue::get(AI);
}
@@ -2392,24 +2539,24 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
// this instead of hard-coding 0, which is correct for most targets.
int32_t Offset = 0;
- Value *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa);
+ Function *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa);
return RValue::get(Builder.CreateCall(F,
llvm::ConstantInt::get(Int32Ty, Offset)));
}
case Builtin::BI__builtin_return_address: {
Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0),
getContext().UnsignedIntTy);
- Value *F = CGM.getIntrinsic(Intrinsic::returnaddress);
+ Function *F = CGM.getIntrinsic(Intrinsic::returnaddress);
return RValue::get(Builder.CreateCall(F, Depth));
}
case Builtin::BI_ReturnAddress: {
- Value *F = CGM.getIntrinsic(Intrinsic::returnaddress);
+ Function *F = CGM.getIntrinsic(Intrinsic::returnaddress);
return RValue::get(Builder.CreateCall(F, Builder.getInt32(0)));
}
case Builtin::BI__builtin_frame_address: {
Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0),
getContext().UnsignedIntTy);
- Value *F = CGM.getIntrinsic(Intrinsic::frameaddress);
+ Function *F = CGM.getIntrinsic(Intrinsic::frameaddress);
return RValue::get(Builder.CreateCall(F, Depth));
}
case Builtin::BI__builtin_extract_return_addr: {
@@ -2445,9 +2592,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
llvm::IntegerType *IntTy = cast<llvm::IntegerType>(Int->getType());
assert((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) &&
"LLVM's __builtin_eh_return only supports 32- and 64-bit variants");
- Value *F = CGM.getIntrinsic(IntTy->getBitWidth() == 32
- ? Intrinsic::eh_return_i32
- : Intrinsic::eh_return_i64);
+ Function *F =
+ CGM.getIntrinsic(IntTy->getBitWidth() == 32 ? Intrinsic::eh_return_i32
+ : Intrinsic::eh_return_i64);
Builder.CreateCall(F, {Int, Ptr});
Builder.CreateUnreachable();
@@ -2457,7 +2604,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
return RValue::get(nullptr);
}
case Builtin::BI__builtin_unwind_init: {
- Value *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init);
+ Function *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init);
return RValue::get(Builder.CreateCall(F));
}
case Builtin::BI__builtin_extend_pointer: {
@@ -2498,12 +2645,11 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
// Store the stack pointer to the setjmp buffer.
Value *StackAddr =
Builder.CreateCall(CGM.getIntrinsic(Intrinsic::stacksave));
- Address StackSaveSlot =
- Builder.CreateConstInBoundsGEP(Buf, 2, getPointerSize());
+ Address StackSaveSlot = Builder.CreateConstInBoundsGEP(Buf, 2);
Builder.CreateStore(StackAddr, StackSaveSlot);
// Call LLVM's EH setjmp, which is lightweight.
- Value *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp);
+ Function *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp);
Buf = Builder.CreateBitCast(Buf, Int8PtrTy);
return RValue::get(Builder.CreateCall(F, Buf.getPointer()));
}
@@ -2719,7 +2865,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
const CGFunctionInfo &FuncInfo =
CGM.getTypes().arrangeBuiltinFunctionCall(E->getType(), Args);
llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
- llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
+ llvm::FunctionCallee Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
return EmitCall(FuncInfo, CGCallee::forDirect(Func),
ReturnValueSlot(), Args);
}
@@ -2959,14 +3105,15 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
}
// Build and MDTuple of MDStrings and emit the intrinsic call.
- llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::codeview_annotation, {});
+ llvm::Function *F =
+ CGM.getIntrinsic(llvm::Intrinsic::codeview_annotation, {});
MDTuple *StrTuple = MDTuple::get(getLLVMContext(), Strings);
Builder.CreateCall(F, MetadataAsValue::get(getLLVMContext(), StrTuple));
return RValue::getIgnored();
}
case Builtin::BI__builtin_annotation: {
llvm::Value *AnnVal = EmitScalarExpr(E->getArg(0));
- llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::annotation,
+ llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::annotation,
AnnVal->getType());
// Get the annotation string, go through casts. Sema requires this to be a
@@ -3311,6 +3458,19 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI_interlockedbittestandreset_nf:
return RValue::get(EmitBitTestIntrinsic(*this, BuiltinID, E));
+ // These builtins exist to emit regular volatile loads and stores not
+ // affected by the -fms-volatile setting.
+ case Builtin::BI__iso_volatile_load8:
+ case Builtin::BI__iso_volatile_load16:
+ case Builtin::BI__iso_volatile_load32:
+ case Builtin::BI__iso_volatile_load64:
+ return RValue::get(EmitISOVolatileLoad(*this, E));
+ case Builtin::BI__iso_volatile_store8:
+ case Builtin::BI__iso_volatile_store16:
+ case Builtin::BI__iso_volatile_store32:
+ case Builtin::BI__iso_volatile_store64:
+ return RValue::get(EmitISOVolatileStore(*this, E));
+
case Builtin::BI__exception_code:
case Builtin::BI_exception_code:
return RValue::get(EmitSEHExceptionCode());
@@ -3348,7 +3508,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
auto & Context = getContext();
auto SizeTy = Context.getSizeType();
auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
- Value *F = CGM.getIntrinsic(Intrinsic::coro_size, T);
+ Function *F = CGM.getIntrinsic(Intrinsic::coro_size, T);
return RValue::get(Builder.CreateCall(F));
}
@@ -3591,7 +3751,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
AttrBuilder B;
- B.addAttribute(Attribute::ByVal);
+ B.addByValAttr(NDRangeL.getAddress().getElementType());
llvm::AttributeList ByValAttrSet =
llvm::AttributeList::get(CGM.getModule().getContext(), 3U, B);
@@ -3666,21 +3826,35 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
// Any calls now have event arguments passed.
if (NumArgs >= 7) {
llvm::Type *EventTy = ConvertType(getContext().OCLClkEventTy);
- llvm::Type *EventPtrTy = EventTy->getPointerTo(
+ llvm::PointerType *EventPtrTy = EventTy->getPointerTo(
CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic));
llvm::Value *NumEvents =
Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(3)), Int32Ty);
- llvm::Value *EventList =
- E->getArg(4)->getType()->isArrayType()
- ? EmitArrayToPointerDecay(E->getArg(4)).getPointer()
- : EmitScalarExpr(E->getArg(4));
- llvm::Value *ClkEvent = EmitScalarExpr(E->getArg(5));
- // Convert to generic address space.
- EventList = Builder.CreatePointerCast(EventList, EventPtrTy);
- ClkEvent = ClkEvent->getType()->isIntegerTy()
- ? Builder.CreateBitOrPointerCast(ClkEvent, EventPtrTy)
- : Builder.CreatePointerCast(ClkEvent, EventPtrTy);
+
+ // Since SemaOpenCLBuiltinEnqueueKernel allows fifth and sixth arguments
+ // to be a null pointer constant (including `0` literal), we can take it
+ // into account and emit null pointer directly.
+ llvm::Value *EventWaitList = nullptr;
+ if (E->getArg(4)->isNullPointerConstant(
+ getContext(), Expr::NPC_ValueDependentIsNotNull)) {
+ EventWaitList = llvm::ConstantPointerNull::get(EventPtrTy);
+ } else {
+ EventWaitList = E->getArg(4)->getType()->isArrayType()
+ ? EmitArrayToPointerDecay(E->getArg(4)).getPointer()
+ : EmitScalarExpr(E->getArg(4));
+ // Convert to generic address space.
+ EventWaitList = Builder.CreatePointerCast(EventWaitList, EventPtrTy);
+ }
+ llvm::Value *EventRet = nullptr;
+ if (E->getArg(5)->isNullPointerConstant(
+ getContext(), Expr::NPC_ValueDependentIsNotNull)) {
+ EventRet = llvm::ConstantPointerNull::get(EventPtrTy);
+ } else {
+ EventRet =
+ Builder.CreatePointerCast(EmitScalarExpr(E->getArg(5)), EventPtrTy);
+ }
+
auto Info =
CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(6));
llvm::Value *Kernel =
@@ -3692,8 +3866,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
QueueTy, Int32Ty, RangeTy, Int32Ty,
EventPtrTy, EventPtrTy, GenericVoidPtrTy, GenericVoidPtrTy};
- std::vector<llvm::Value *> Args = {Queue, Flags, Range, NumEvents,
- EventList, ClkEvent, Kernel, Block};
+ std::vector<llvm::Value *> Args = {Queue, Flags, Range,
+ NumEvents, EventWaitList, EventRet,
+ Kernel, Block};
if (NumArgs == 7) {
// Has events but no variadics.
@@ -4922,8 +5097,7 @@ findNeonIntrinsicInMap(ArrayRef<NeonIntrinsicInfo> IntrinsicMap,
}
#endif
- const NeonIntrinsicInfo *Builtin =
- std::lower_bound(IntrinsicMap.begin(), IntrinsicMap.end(), BuiltinID);
+ const NeonIntrinsicInfo *Builtin = llvm::lower_bound(IntrinsicMap, BuiltinID);
if (Builtin != IntrinsicMap.end() && Builtin->BuiltinID == BuiltinID)
return Builtin;
@@ -5065,6 +5239,13 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
switch (BuiltinID) {
default: break;
+ case NEON::BI__builtin_neon_vpadd_v:
+ case NEON::BI__builtin_neon_vpaddq_v:
+ // We don't allow fp/int overloading of intrinsics.
+ if (VTy->getElementType()->isFloatingPointTy() &&
+ Int == Intrinsic::aarch64_neon_addp)
+ Int = Intrinsic::aarch64_neon_faddp;
+ break;
case NEON::BI__builtin_neon_vabs_v:
case NEON::BI__builtin_neon_vabsq_v:
if (VTy->getElementType()->isFloatingPointTy())
@@ -5262,7 +5443,7 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
}
case NEON::BI__builtin_neon_vfma_v:
case NEON::BI__builtin_neon_vfmaq_v: {
- Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
+ Function *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
@@ -5731,7 +5912,7 @@ static Value *EmitSpecialRegisterBuiltin(CodeGenFunction &CGF,
&& "Can't fit 64-bit value in 32-bit register");
if (IsRead) {
- llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);
+ llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);
llvm::Value *Call = Builder.CreateCall(F, Metadata);
if (MixedTypes)
@@ -5745,7 +5926,7 @@ static Value *EmitSpecialRegisterBuiltin(CodeGenFunction &CGF,
return Call;
}
- llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
+ llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
llvm::Value *ArgValue = CGF.EmitScalarExpr(E->getArg(1));
if (MixedTypes) {
// Extend 32 bit write value to 64 bit to pass to write.
@@ -5798,34 +5979,6 @@ static bool HasExtraNeonArgument(unsigned BuiltinID) {
return true;
}
-Value *CodeGenFunction::EmitISOVolatileLoad(const CallExpr *E) {
- Value *Ptr = EmitScalarExpr(E->getArg(0));
- QualType ElTy = E->getArg(0)->getType()->getPointeeType();
- CharUnits LoadSize = getContext().getTypeSizeInChars(ElTy);
- llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(),
- LoadSize.getQuantity() * 8);
- Ptr = Builder.CreateBitCast(Ptr, ITy->getPointerTo());
- llvm::LoadInst *Load =
- Builder.CreateAlignedLoad(Ptr, LoadSize);
- Load->setVolatile(true);
- return Load;
-}
-
-Value *CodeGenFunction::EmitISOVolatileStore(const CallExpr *E) {
- Value *Ptr = EmitScalarExpr(E->getArg(0));
- Value *Value = EmitScalarExpr(E->getArg(1));
- QualType ElTy = E->getArg(0)->getType()->getPointeeType();
- CharUnits StoreSize = getContext().getTypeSizeInChars(ElTy);
- llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(),
- StoreSize.getQuantity() * 8);
- Ptr = Builder.CreateBitCast(Ptr, ITy->getPointerTo());
- llvm::StoreInst *Store =
- Builder.CreateAlignedStore(Value, Ptr,
- StoreSize);
- Store->setVolatile(true);
- return Store;
-}
-
Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
const CallExpr *E,
llvm::Triple::ArchType Arch) {
@@ -5846,9 +5999,9 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
llvm::InlineAsm *Emit =
IsThumb ? InlineAsm::get(FTy, ".inst.n 0x" + utohexstr(ZExtValue), "",
- /*SideEffects=*/true)
+ /*hasSideEffects=*/true)
: InlineAsm::get(FTy, ".inst 0x" + utohexstr(ZExtValue), "",
- /*SideEffects=*/true);
+ /*hasSideEffects=*/true);
return Builder.CreateCall(Emit);
}
@@ -5866,7 +6019,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
// Locality is not supported on ARM target
Value *Locality = llvm::ConstantInt::get(Int32Ty, 3);
- Value *F = CGM.getIntrinsic(Intrinsic::prefetch);
+ Function *F = CGM.getIntrinsic(Intrinsic::prefetch);
return Builder.CreateCall(F, {Address, RW, Locality, IsData});
}
@@ -6065,19 +6218,6 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall(F, {StoreVal, StoreAddr}, "strex");
}
- switch (BuiltinID) {
- case ARM::BI__iso_volatile_load8:
- case ARM::BI__iso_volatile_load16:
- case ARM::BI__iso_volatile_load32:
- case ARM::BI__iso_volatile_load64:
- return EmitISOVolatileLoad(E);
- case ARM::BI__iso_volatile_store8:
- case ARM::BI__iso_volatile_store16:
- case ARM::BI__iso_volatile_store32:
- case ARM::BI__iso_volatile_store64:
- return EmitISOVolatileStore(E);
- }
-
if (BuiltinID == ARM::BI__builtin_arm_clrex) {
Function *F = CGM.getIntrinsic(Intrinsic::arm_clrex);
return Builder.CreateCall(F);
@@ -6818,7 +6958,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
// FIXME: We need AArch64 specific LLVM intrinsic if we want to specify
// PLDL3STRM or PLDL2STRM.
- Value *F = CGM.getIntrinsic(Intrinsic::prefetch);
+ Function *F = CGM.getIntrinsic(Intrinsic::prefetch);
return Builder.CreateCall(F, {Address, RW, Locality, IsData});
}
@@ -6837,6 +6977,14 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit");
}
+ if (BuiltinID == AArch64::BI__builtin_arm_jcvt) {
+ assert((getContext().getTypeSize(E->getType()) == 32) &&
+ "__jcvt of unusual size!");
+ llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::aarch64_fjcvtzs), Arg);
+ }
+
if (BuiltinID == AArch64::BI__clear_cache) {
assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments");
const FunctionDecl *FD = E->getDirectCallee();
@@ -6956,7 +7104,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
- llvm::Value *F =
+ llvm::Function *F =
CGM.getIntrinsic(llvm::Intrinsic::read_register, {Int64Ty});
return Builder.CreateCall(F, Metadata);
}
@@ -7002,6 +7150,84 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall(F, {Arg0, Arg1});
}
+ // Memory Tagging Extensions (MTE) Intrinsics
+ Intrinsic::ID MTEIntrinsicID = Intrinsic::not_intrinsic;
+ switch (BuiltinID) {
+ case AArch64::BI__builtin_arm_irg:
+ MTEIntrinsicID = Intrinsic::aarch64_irg; break;
+ case AArch64::BI__builtin_arm_addg:
+ MTEIntrinsicID = Intrinsic::aarch64_addg; break;
+ case AArch64::BI__builtin_arm_gmi:
+ MTEIntrinsicID = Intrinsic::aarch64_gmi; break;
+ case AArch64::BI__builtin_arm_ldg:
+ MTEIntrinsicID = Intrinsic::aarch64_ldg; break;
+ case AArch64::BI__builtin_arm_stg:
+ MTEIntrinsicID = Intrinsic::aarch64_stg; break;
+ case AArch64::BI__builtin_arm_subp:
+ MTEIntrinsicID = Intrinsic::aarch64_subp; break;
+ }
+
+ if (MTEIntrinsicID != Intrinsic::not_intrinsic) {
+ llvm::Type *T = ConvertType(E->getType());
+
+ if (MTEIntrinsicID == Intrinsic::aarch64_irg) {
+ Value *Pointer = EmitScalarExpr(E->getArg(0));
+ Value *Mask = EmitScalarExpr(E->getArg(1));
+
+ Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy);
+ Mask = Builder.CreateZExt(Mask, Int64Ty);
+ Value *RV = Builder.CreateCall(
+ CGM.getIntrinsic(MTEIntrinsicID), {Pointer, Mask});
+ return Builder.CreatePointerCast(RV, T);
+ }
+ if (MTEIntrinsicID == Intrinsic::aarch64_addg) {
+ Value *Pointer = EmitScalarExpr(E->getArg(0));
+ Value *TagOffset = EmitScalarExpr(E->getArg(1));
+
+ Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy);
+ TagOffset = Builder.CreateZExt(TagOffset, Int64Ty);
+ Value *RV = Builder.CreateCall(
+ CGM.getIntrinsic(MTEIntrinsicID), {Pointer, TagOffset});
+ return Builder.CreatePointerCast(RV, T);
+ }
+ if (MTEIntrinsicID == Intrinsic::aarch64_gmi) {
+ Value *Pointer = EmitScalarExpr(E->getArg(0));
+ Value *ExcludedMask = EmitScalarExpr(E->getArg(1));
+
+ ExcludedMask = Builder.CreateZExt(ExcludedMask, Int64Ty);
+ Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy);
+ return Builder.CreateCall(
+ CGM.getIntrinsic(MTEIntrinsicID), {Pointer, ExcludedMask});
+ }
+ // Although it is possible to supply a different return
+ // address (first arg) to this intrinsic, for now we set
+ // return address same as input address.
+ if (MTEIntrinsicID == Intrinsic::aarch64_ldg) {
+ Value *TagAddress = EmitScalarExpr(E->getArg(0));
+ TagAddress = Builder.CreatePointerCast(TagAddress, Int8PtrTy);
+ Value *RV = Builder.CreateCall(
+ CGM.getIntrinsic(MTEIntrinsicID), {TagAddress, TagAddress});
+ return Builder.CreatePointerCast(RV, T);
+ }
+ // Although it is possible to supply a different tag (to set)
+ // to this intrinsic (as first arg), for now we supply
+ // the tag that is in input address arg (common use case).
+ if (MTEIntrinsicID == Intrinsic::aarch64_stg) {
+ Value *TagAddress = EmitScalarExpr(E->getArg(0));
+ TagAddress = Builder.CreatePointerCast(TagAddress, Int8PtrTy);
+ return Builder.CreateCall(
+ CGM.getIntrinsic(MTEIntrinsicID), {TagAddress, TagAddress});
+ }
+ if (MTEIntrinsicID == Intrinsic::aarch64_subp) {
+ Value *PointerA = EmitScalarExpr(E->getArg(0));
+ Value *PointerB = EmitScalarExpr(E->getArg(1));
+ PointerA = Builder.CreatePointerCast(PointerA, Int8PtrTy);
+ PointerB = Builder.CreatePointerCast(PointerB, Int8PtrTy);
+ return Builder.CreateCall(
+ CGM.getIntrinsic(MTEIntrinsicID), {PointerA, PointerB});
+ }
+ }
+
if (BuiltinID == AArch64::BI__builtin_arm_rsr ||
BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
BuiltinID == AArch64::BI__builtin_arm_rsrp ||
@@ -7052,25 +7278,27 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
llvm::Type *RegisterType = Int64Ty;
- llvm::Type *ValueType = Int32Ty;
llvm::Type *Types[] = { RegisterType };
if (BuiltinID == AArch64::BI_ReadStatusReg) {
- llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);
- llvm::Value *Call = Builder.CreateCall(F, Metadata);
+ llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);
- return Builder.CreateTrunc(Call, ValueType);
+ return Builder.CreateCall(F, Metadata);
}
- llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
+ llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
llvm::Value *ArgValue = EmitScalarExpr(E->getArg(1));
- ArgValue = Builder.CreateZExt(ArgValue, RegisterType);
return Builder.CreateCall(F, { Metadata, ArgValue });
}
if (BuiltinID == AArch64::BI_AddressOfReturnAddress) {
- llvm::Value *F = CGM.getIntrinsic(Intrinsic::addressofreturnaddress);
+ llvm::Function *F = CGM.getIntrinsic(Intrinsic::addressofreturnaddress);
+ return Builder.CreateCall(F);
+ }
+
+ if (BuiltinID == AArch64::BI__builtin_sponentry) {
+ llvm::Function *F = CGM.getIntrinsic(Intrinsic::sponentry);
return Builder.CreateCall(F);
}
@@ -7608,13 +7836,13 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Ops.push_back(EmitScalarExpr(E->getArg(1)));
return Builder.CreateFDiv(Ops[0], Ops[1], "vdivh");
case NEON::BI__builtin_neon_vfmah_f16: {
- Value *F = CGM.getIntrinsic(Intrinsic::fma, HalfTy);
+ Function *F = CGM.getIntrinsic(Intrinsic::fma, HalfTy);
// NEON intrinsic puts accumulator first, unlike the LLVM fma.
return Builder.CreateCall(F,
{EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2)), Ops[0]});
}
case NEON::BI__builtin_neon_vfmsh_f16: {
- Value *F = CGM.getIntrinsic(Intrinsic::fma, HalfTy);
+ Function *F = CGM.getIntrinsic(Intrinsic::fma, HalfTy);
Value *Zero = llvm::ConstantFP::getZeroValueForNegation(HalfTy);
Value* Sub = Builder.CreateFSub(Zero, EmitScalarExpr(E->getArg(1)), "vsubh");
// NEON intrinsic puts accumulator first, unlike the LLVM fma.
@@ -7775,6 +8003,14 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
: Intrinsic::aarch64_neon_sqsub;
return EmitNeonCall(CGM.getIntrinsic(AccInt, Int64Ty), Ops, "vqdmlXl");
}
+ case NEON::BI__builtin_neon_vduph_lane_f16: {
+ return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
+ "vget_lane");
+ }
+ case NEON::BI__builtin_neon_vduph_laneq_f16: {
+ return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
+ "vgetq_lane");
+ }
}
llvm::VectorType *VTy = GetNeonType(this, Type);
@@ -7845,11 +8081,11 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
NeonTypeFlags(NeonTypeFlags::Float64, false, true));
Ops[2] = Builder.CreateBitCast(Ops[2], VTy);
Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
- Value *F = CGM.getIntrinsic(Intrinsic::fma, DoubleTy);
+ Function *F = CGM.getIntrinsic(Intrinsic::fma, DoubleTy);
Value *Result = Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0]});
return Builder.CreateBitCast(Result, Ty);
}
- Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
+ Function *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
@@ -7863,7 +8099,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall(F, {Ops[2], Ops[1], Ops[0]});
}
case NEON::BI__builtin_neon_vfmaq_laneq_v: {
- Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
+ Function *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
@@ -7879,7 +8115,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vfmad_laneq_f64: {
Ops.push_back(EmitScalarExpr(E->getArg(3)));
llvm::Type *Ty = ConvertType(E->getCallReturnType(getContext()));
- Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
+ Function *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0]});
}
@@ -8892,16 +9128,6 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Int = Intrinsic::aarch64_neon_suqadd;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vuqadd");
}
- case AArch64::BI__iso_volatile_load8:
- case AArch64::BI__iso_volatile_load16:
- case AArch64::BI__iso_volatile_load32:
- case AArch64::BI__iso_volatile_load64:
- return EmitISOVolatileLoad(E);
- case AArch64::BI__iso_volatile_store8:
- case AArch64::BI__iso_volatile_store16:
- case AArch64::BI__iso_volatile_store32:
- case AArch64::BI__iso_volatile_store64:
- return EmitISOVolatileStore(E);
case AArch64::BI_BitScanForward:
case AArch64::BI_BitScanForward64:
return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanForward, E);
@@ -9139,6 +9365,20 @@ static Value *EmitX86ExpandLoad(CodeGenFunction &CGF,
return CGF.Builder.CreateCall(F, { Ptr, MaskVec, Ops[1] });
}
+static Value *EmitX86CompressExpand(CodeGenFunction &CGF,
+ ArrayRef<Value *> Ops,
+ bool IsCompress) {
+ llvm::Type *ResultTy = Ops[1]->getType();
+
+ Value *MaskVec = getMaskVecValue(CGF, Ops[2],
+ ResultTy->getVectorNumElements());
+
+ Intrinsic::ID IID = IsCompress ? Intrinsic::x86_avx512_mask_compress
+ : Intrinsic::x86_avx512_mask_expand;
+ llvm::Function *F = CGF.CGM.getIntrinsic(IID, ResultTy);
+ return CGF.Builder.CreateCall(F, { Ops[0], Ops[1], MaskVec });
+}
+
static Value *EmitX86CompressStore(CodeGenFunction &CGF,
ArrayRef<Value *> Ops) {
llvm::Type *ResultTy = Ops[1]->getType();
@@ -9184,10 +9424,50 @@ static Value *EmitX86FunnelShift(CodeGenFunction &CGF, Value *Op0, Value *Op1,
}
unsigned IID = IsRight ? Intrinsic::fshr : Intrinsic::fshl;
- Value *F = CGF.CGM.getIntrinsic(IID, Ty);
+ Function *F = CGF.CGM.getIntrinsic(IID, Ty);
return CGF.Builder.CreateCall(F, {Op0, Op1, Amt});
}
+static Value *EmitX86vpcom(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
+ bool IsSigned) {
+ Value *Op0 = Ops[0];
+ Value *Op1 = Ops[1];
+ llvm::Type *Ty = Op0->getType();
+ uint64_t Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7;
+
+ CmpInst::Predicate Pred;
+ switch (Imm) {
+ case 0x0:
+ Pred = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
+ break;
+ case 0x1:
+ Pred = IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE;
+ break;
+ case 0x2:
+ Pred = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
+ break;
+ case 0x3:
+ Pred = IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE;
+ break;
+ case 0x4:
+ Pred = ICmpInst::ICMP_EQ;
+ break;
+ case 0x5:
+ Pred = ICmpInst::ICMP_NE;
+ break;
+ case 0x6:
+ return llvm::Constant::getNullValue(Ty); // FALSE
+ case 0x7:
+ return llvm::Constant::getAllOnesValue(Ty); // TRUE
+ default:
+ llvm_unreachable("Unexpected XOP vpcom/vpcomu predicate");
+ }
+
+ Value *Cmp = CGF.Builder.CreateICmp(Pred, Op0, Op1);
+ Value *Res = CGF.Builder.CreateSExt(Cmp, Ty);
+ return Res;
+}
+
static Value *EmitX86Select(CodeGenFunction &CGF,
Value *Mask, Value *Op0, Value *Op1) {
@@ -9278,6 +9558,25 @@ static Value *EmitX86ConvertToMask(CodeGenFunction &CGF, Value *In) {
return EmitX86MaskedCompare(CGF, 1, true, { In, Zero });
}
+static Value *EmitX86ConvertIntToFp(CodeGenFunction &CGF,
+ ArrayRef<Value *> Ops, bool IsSigned) {
+ unsigned Rnd = cast<llvm::ConstantInt>(Ops[3])->getZExtValue();
+ llvm::Type *Ty = Ops[1]->getType();
+
+ Value *Res;
+ if (Rnd != 4) {
+ Intrinsic::ID IID = IsSigned ? Intrinsic::x86_avx512_sitofp_round
+ : Intrinsic::x86_avx512_uitofp_round;
+ Function *F = CGF.CGM.getIntrinsic(IID, { Ty, Ops[0]->getType() });
+ Res = CGF.Builder.CreateCall(F, { Ops[0], Ops[3] });
+ } else {
+ Res = IsSigned ? CGF.Builder.CreateSIToFP(Ops[0], Ty)
+ : CGF.Builder.CreateUIToFP(Ops[0], Ty);
+ }
+
+ return EmitX86Select(CGF, Ops[2], Res, Ops[1]);
+}
+
static Value *EmitX86Abs(CodeGenFunction &CGF, ArrayRef<Value *> Ops) {
llvm::Type *Ty = Ops[0]->getType();
@@ -9524,6 +9823,18 @@ Value *CodeGenFunction::EmitX86CpuIs(const CallExpr *E) {
return EmitX86CpuIs(CPUStr);
}
+// Convert a BF16 to a float.
+static Value *EmitX86CvtBF16ToFloatExpr(CodeGenFunction &CGF,
+ const CallExpr *E,
+ ArrayRef<Value *> Ops) {
+ llvm::Type *Int32Ty = CGF.Builder.getInt32Ty();
+ Value *ZeroExt = CGF.Builder.CreateZExt(Ops[0], Int32Ty);
+ Value *Shl = CGF.Builder.CreateShl(ZeroExt, 16);
+ llvm::Type *ResultType = CGF.ConvertType(E->getType());
+ Value *BitCast = CGF.Builder.CreateBitCast(Shl, ResultType);
+ return BitCast;
+}
+
Value *CodeGenFunction::EmitX86CpuIs(StringRef CPUStr) {
llvm::Type *Int32Ty = Builder.getInt32Ty();
@@ -9650,10 +9961,11 @@ llvm::Value *CodeGenFunction::EmitX86CpuSupports(uint64_t FeaturesMask) {
Value *CodeGenFunction::EmitX86CpuInit() {
llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy,
/*Variadic*/ false);
- llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, "__cpu_indicator_init");
- cast<llvm::GlobalValue>(Func)->setDSOLocal(true);
- cast<llvm::GlobalValue>(Func)->setDLLStorageClass(
- llvm::GlobalValue::DefaultStorageClass);
+ llvm::FunctionCallee Func =
+ CGM.CreateRuntimeFunction(FTy, "__cpu_indicator_init");
+ cast<llvm::GlobalValue>(Func.getCallee())->setDSOLocal(true);
+ cast<llvm::GlobalValue>(Func.getCallee())
+ ->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
return Builder.CreateCall(Func);
}
@@ -9722,7 +10034,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
Value *RW = ConstantInt::get(Int32Ty, (C->getZExtValue() >> 2) & 0x1);
Value *Locality = ConstantInt::get(Int32Ty, C->getZExtValue() & 0x3);
Value *Data = ConstantInt::get(Int32Ty, 1);
- Value *F = CGM.getIntrinsic(Intrinsic::prefetch);
+ Function *F = CGM.getIntrinsic(Intrinsic::prefetch);
return Builder.CreateCall(F, {Address, RW, Locality, Data});
}
case X86::BI_mm_clflush: {
@@ -9753,13 +10065,13 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_lzcnt_u16:
case X86::BI__builtin_ia32_lzcnt_u32:
case X86::BI__builtin_ia32_lzcnt_u64: {
- Value *F = CGM.getIntrinsic(Intrinsic::ctlz, Ops[0]->getType());
+ Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ops[0]->getType());
return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
}
case X86::BI__builtin_ia32_tzcnt_u16:
case X86::BI__builtin_ia32_tzcnt_u32:
case X86::BI__builtin_ia32_tzcnt_u64: {
- Value *F = CGM.getIntrinsic(Intrinsic::cttz, Ops[0]->getType());
+ Function *F = CGM.getIntrinsic(Intrinsic::cttz, Ops[0]->getType());
return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
}
case X86::BI__builtin_ia32_undef128:
@@ -9833,7 +10145,9 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_xsavec:
case X86::BI__builtin_ia32_xsavec64:
case X86::BI__builtin_ia32_xsaves:
- case X86::BI__builtin_ia32_xsaves64: {
+ case X86::BI__builtin_ia32_xsaves64:
+ case X86::BI__builtin_ia32_xsetbv:
+ case X86::BI_xsetbv: {
Intrinsic::ID ID;
#define INTRINSIC_X86_XSAVE_ID(NAME) \
case X86::BI__builtin_ia32_##NAME: \
@@ -9853,6 +10167,10 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
INTRINSIC_X86_XSAVE_ID(xsavec64);
INTRINSIC_X86_XSAVE_ID(xsaves);
INTRINSIC_X86_XSAVE_ID(xsaves64);
+ INTRINSIC_X86_XSAVE_ID(xsetbv);
+ case X86::BI_xsetbv:
+ ID = Intrinsic::x86_xsetbv;
+ break;
}
#undef INTRINSIC_X86_XSAVE_ID
Value *Mhi = Builder.CreateTrunc(
@@ -9862,6 +10180,9 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
Ops.push_back(Mlo);
return Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
}
+ case X86::BI__builtin_ia32_xgetbv:
+ case X86::BI_xgetbv:
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_xgetbv), Ops);
case X86::BI__builtin_ia32_storedqudi128_mask:
case X86::BI__builtin_ia32_storedqusi128_mask:
case X86::BI__builtin_ia32_storedquhi128_mask:
@@ -9930,6 +10251,15 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_cvtq2mask512:
return EmitX86ConvertToMask(*this, Ops[0]);
+ case X86::BI__builtin_ia32_cvtdq2ps512_mask:
+ case X86::BI__builtin_ia32_cvtqq2ps512_mask:
+ case X86::BI__builtin_ia32_cvtqq2pd512_mask:
+ return EmitX86ConvertIntToFp(*this, Ops, /*IsSigned*/true);
+ case X86::BI__builtin_ia32_cvtudq2ps512_mask:
+ case X86::BI__builtin_ia32_cvtuqq2ps512_mask:
+ case X86::BI__builtin_ia32_cvtuqq2pd512_mask:
+ return EmitX86ConvertIntToFp(*this, Ops, /*IsSigned*/false);
+
case X86::BI__builtin_ia32_vfmaddss3:
case X86::BI__builtin_ia32_vfmaddsd3:
case X86::BI__builtin_ia32_vfmaddss3_mask:
@@ -10073,22 +10403,262 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_compressstoreqi512_mask:
return EmitX86CompressStore(*this, Ops);
- case X86::BI__builtin_ia32_storehps:
- case X86::BI__builtin_ia32_storelps: {
- llvm::Type *PtrTy = llvm::PointerType::getUnqual(Int64Ty);
- llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 2);
+ case X86::BI__builtin_ia32_expanddf128_mask:
+ case X86::BI__builtin_ia32_expanddf256_mask:
+ case X86::BI__builtin_ia32_expanddf512_mask:
+ case X86::BI__builtin_ia32_expandsf128_mask:
+ case X86::BI__builtin_ia32_expandsf256_mask:
+ case X86::BI__builtin_ia32_expandsf512_mask:
+ case X86::BI__builtin_ia32_expanddi128_mask:
+ case X86::BI__builtin_ia32_expanddi256_mask:
+ case X86::BI__builtin_ia32_expanddi512_mask:
+ case X86::BI__builtin_ia32_expandsi128_mask:
+ case X86::BI__builtin_ia32_expandsi256_mask:
+ case X86::BI__builtin_ia32_expandsi512_mask:
+ case X86::BI__builtin_ia32_expandhi128_mask:
+ case X86::BI__builtin_ia32_expandhi256_mask:
+ case X86::BI__builtin_ia32_expandhi512_mask:
+ case X86::BI__builtin_ia32_expandqi128_mask:
+ case X86::BI__builtin_ia32_expandqi256_mask:
+ case X86::BI__builtin_ia32_expandqi512_mask:
+ return EmitX86CompressExpand(*this, Ops, /*IsCompress*/false);
+
+ case X86::BI__builtin_ia32_compressdf128_mask:
+ case X86::BI__builtin_ia32_compressdf256_mask:
+ case X86::BI__builtin_ia32_compressdf512_mask:
+ case X86::BI__builtin_ia32_compresssf128_mask:
+ case X86::BI__builtin_ia32_compresssf256_mask:
+ case X86::BI__builtin_ia32_compresssf512_mask:
+ case X86::BI__builtin_ia32_compressdi128_mask:
+ case X86::BI__builtin_ia32_compressdi256_mask:
+ case X86::BI__builtin_ia32_compressdi512_mask:
+ case X86::BI__builtin_ia32_compresssi128_mask:
+ case X86::BI__builtin_ia32_compresssi256_mask:
+ case X86::BI__builtin_ia32_compresssi512_mask:
+ case X86::BI__builtin_ia32_compresshi128_mask:
+ case X86::BI__builtin_ia32_compresshi256_mask:
+ case X86::BI__builtin_ia32_compresshi512_mask:
+ case X86::BI__builtin_ia32_compressqi128_mask:
+ case X86::BI__builtin_ia32_compressqi256_mask:
+ case X86::BI__builtin_ia32_compressqi512_mask:
+ return EmitX86CompressExpand(*this, Ops, /*IsCompress*/true);
+
+ case X86::BI__builtin_ia32_gather3div2df:
+ case X86::BI__builtin_ia32_gather3div2di:
+ case X86::BI__builtin_ia32_gather3div4df:
+ case X86::BI__builtin_ia32_gather3div4di:
+ case X86::BI__builtin_ia32_gather3div4sf:
+ case X86::BI__builtin_ia32_gather3div4si:
+ case X86::BI__builtin_ia32_gather3div8sf:
+ case X86::BI__builtin_ia32_gather3div8si:
+ case X86::BI__builtin_ia32_gather3siv2df:
+ case X86::BI__builtin_ia32_gather3siv2di:
+ case X86::BI__builtin_ia32_gather3siv4df:
+ case X86::BI__builtin_ia32_gather3siv4di:
+ case X86::BI__builtin_ia32_gather3siv4sf:
+ case X86::BI__builtin_ia32_gather3siv4si:
+ case X86::BI__builtin_ia32_gather3siv8sf:
+ case X86::BI__builtin_ia32_gather3siv8si:
+ case X86::BI__builtin_ia32_gathersiv8df:
+ case X86::BI__builtin_ia32_gathersiv16sf:
+ case X86::BI__builtin_ia32_gatherdiv8df:
+ case X86::BI__builtin_ia32_gatherdiv16sf:
+ case X86::BI__builtin_ia32_gathersiv8di:
+ case X86::BI__builtin_ia32_gathersiv16si:
+ case X86::BI__builtin_ia32_gatherdiv8di:
+ case X86::BI__builtin_ia32_gatherdiv16si: {
+ Intrinsic::ID IID;
+ switch (BuiltinID) {
+ default: llvm_unreachable("Unexpected builtin");
+ case X86::BI__builtin_ia32_gather3div2df:
+ IID = Intrinsic::x86_avx512_mask_gather3div2_df;
+ break;
+ case X86::BI__builtin_ia32_gather3div2di:
+ IID = Intrinsic::x86_avx512_mask_gather3div2_di;
+ break;
+ case X86::BI__builtin_ia32_gather3div4df:
+ IID = Intrinsic::x86_avx512_mask_gather3div4_df;
+ break;
+ case X86::BI__builtin_ia32_gather3div4di:
+ IID = Intrinsic::x86_avx512_mask_gather3div4_di;
+ break;
+ case X86::BI__builtin_ia32_gather3div4sf:
+ IID = Intrinsic::x86_avx512_mask_gather3div4_sf;
+ break;
+ case X86::BI__builtin_ia32_gather3div4si:
+ IID = Intrinsic::x86_avx512_mask_gather3div4_si;
+ break;
+ case X86::BI__builtin_ia32_gather3div8sf:
+ IID = Intrinsic::x86_avx512_mask_gather3div8_sf;
+ break;
+ case X86::BI__builtin_ia32_gather3div8si:
+ IID = Intrinsic::x86_avx512_mask_gather3div8_si;
+ break;
+ case X86::BI__builtin_ia32_gather3siv2df:
+ IID = Intrinsic::x86_avx512_mask_gather3siv2_df;
+ break;
+ case X86::BI__builtin_ia32_gather3siv2di:
+ IID = Intrinsic::x86_avx512_mask_gather3siv2_di;
+ break;
+ case X86::BI__builtin_ia32_gather3siv4df:
+ IID = Intrinsic::x86_avx512_mask_gather3siv4_df;
+ break;
+ case X86::BI__builtin_ia32_gather3siv4di:
+ IID = Intrinsic::x86_avx512_mask_gather3siv4_di;
+ break;
+ case X86::BI__builtin_ia32_gather3siv4sf:
+ IID = Intrinsic::x86_avx512_mask_gather3siv4_sf;
+ break;
+ case X86::BI__builtin_ia32_gather3siv4si:
+ IID = Intrinsic::x86_avx512_mask_gather3siv4_si;
+ break;
+ case X86::BI__builtin_ia32_gather3siv8sf:
+ IID = Intrinsic::x86_avx512_mask_gather3siv8_sf;
+ break;
+ case X86::BI__builtin_ia32_gather3siv8si:
+ IID = Intrinsic::x86_avx512_mask_gather3siv8_si;
+ break;
+ case X86::BI__builtin_ia32_gathersiv8df:
+ IID = Intrinsic::x86_avx512_mask_gather_dpd_512;
+ break;
+ case X86::BI__builtin_ia32_gathersiv16sf:
+ IID = Intrinsic::x86_avx512_mask_gather_dps_512;
+ break;
+ case X86::BI__builtin_ia32_gatherdiv8df:
+ IID = Intrinsic::x86_avx512_mask_gather_qpd_512;
+ break;
+ case X86::BI__builtin_ia32_gatherdiv16sf:
+ IID = Intrinsic::x86_avx512_mask_gather_qps_512;
+ break;
+ case X86::BI__builtin_ia32_gathersiv8di:
+ IID = Intrinsic::x86_avx512_mask_gather_dpq_512;
+ break;
+ case X86::BI__builtin_ia32_gathersiv16si:
+ IID = Intrinsic::x86_avx512_mask_gather_dpi_512;
+ break;
+ case X86::BI__builtin_ia32_gatherdiv8di:
+ IID = Intrinsic::x86_avx512_mask_gather_qpq_512;
+ break;
+ case X86::BI__builtin_ia32_gatherdiv16si:
+ IID = Intrinsic::x86_avx512_mask_gather_qpi_512;
+ break;
+ }
- // cast val v2i64
- Ops[1] = Builder.CreateBitCast(Ops[1], VecTy, "cast");
+ unsigned MinElts = std::min(Ops[0]->getType()->getVectorNumElements(),
+ Ops[2]->getType()->getVectorNumElements());
+ Ops[3] = getMaskVecValue(*this, Ops[3], MinElts);
+ Function *Intr = CGM.getIntrinsic(IID);
+ return Builder.CreateCall(Intr, Ops);
+ }
- // extract (0, 1)
- unsigned Index = BuiltinID == X86::BI__builtin_ia32_storelps ? 0 : 1;
- Ops[1] = Builder.CreateExtractElement(Ops[1], Index, "extract");
+ case X86::BI__builtin_ia32_scattersiv8df:
+ case X86::BI__builtin_ia32_scattersiv16sf:
+ case X86::BI__builtin_ia32_scatterdiv8df:
+ case X86::BI__builtin_ia32_scatterdiv16sf:
+ case X86::BI__builtin_ia32_scattersiv8di:
+ case X86::BI__builtin_ia32_scattersiv16si:
+ case X86::BI__builtin_ia32_scatterdiv8di:
+ case X86::BI__builtin_ia32_scatterdiv16si:
+ case X86::BI__builtin_ia32_scatterdiv2df:
+ case X86::BI__builtin_ia32_scatterdiv2di:
+ case X86::BI__builtin_ia32_scatterdiv4df:
+ case X86::BI__builtin_ia32_scatterdiv4di:
+ case X86::BI__builtin_ia32_scatterdiv4sf:
+ case X86::BI__builtin_ia32_scatterdiv4si:
+ case X86::BI__builtin_ia32_scatterdiv8sf:
+ case X86::BI__builtin_ia32_scatterdiv8si:
+ case X86::BI__builtin_ia32_scattersiv2df:
+ case X86::BI__builtin_ia32_scattersiv2di:
+ case X86::BI__builtin_ia32_scattersiv4df:
+ case X86::BI__builtin_ia32_scattersiv4di:
+ case X86::BI__builtin_ia32_scattersiv4sf:
+ case X86::BI__builtin_ia32_scattersiv4si:
+ case X86::BI__builtin_ia32_scattersiv8sf:
+ case X86::BI__builtin_ia32_scattersiv8si: {
+ Intrinsic::ID IID;
+ switch (BuiltinID) {
+ default: llvm_unreachable("Unexpected builtin");
+ case X86::BI__builtin_ia32_scattersiv8df:
+ IID = Intrinsic::x86_avx512_mask_scatter_dpd_512;
+ break;
+ case X86::BI__builtin_ia32_scattersiv16sf:
+ IID = Intrinsic::x86_avx512_mask_scatter_dps_512;
+ break;
+ case X86::BI__builtin_ia32_scatterdiv8df:
+ IID = Intrinsic::x86_avx512_mask_scatter_qpd_512;
+ break;
+ case X86::BI__builtin_ia32_scatterdiv16sf:
+ IID = Intrinsic::x86_avx512_mask_scatter_qps_512;
+ break;
+ case X86::BI__builtin_ia32_scattersiv8di:
+ IID = Intrinsic::x86_avx512_mask_scatter_dpq_512;
+ break;
+ case X86::BI__builtin_ia32_scattersiv16si:
+ IID = Intrinsic::x86_avx512_mask_scatter_dpi_512;
+ break;
+ case X86::BI__builtin_ia32_scatterdiv8di:
+ IID = Intrinsic::x86_avx512_mask_scatter_qpq_512;
+ break;
+ case X86::BI__builtin_ia32_scatterdiv16si:
+ IID = Intrinsic::x86_avx512_mask_scatter_qpi_512;
+ break;
+ case X86::BI__builtin_ia32_scatterdiv2df:
+ IID = Intrinsic::x86_avx512_mask_scatterdiv2_df;
+ break;
+ case X86::BI__builtin_ia32_scatterdiv2di:
+ IID = Intrinsic::x86_avx512_mask_scatterdiv2_di;
+ break;
+ case X86::BI__builtin_ia32_scatterdiv4df:
+ IID = Intrinsic::x86_avx512_mask_scatterdiv4_df;
+ break;
+ case X86::BI__builtin_ia32_scatterdiv4di:
+ IID = Intrinsic::x86_avx512_mask_scatterdiv4_di;
+ break;
+ case X86::BI__builtin_ia32_scatterdiv4sf:
+ IID = Intrinsic::x86_avx512_mask_scatterdiv4_sf;
+ break;
+ case X86::BI__builtin_ia32_scatterdiv4si:
+ IID = Intrinsic::x86_avx512_mask_scatterdiv4_si;
+ break;
+ case X86::BI__builtin_ia32_scatterdiv8sf:
+ IID = Intrinsic::x86_avx512_mask_scatterdiv8_sf;
+ break;
+ case X86::BI__builtin_ia32_scatterdiv8si:
+ IID = Intrinsic::x86_avx512_mask_scatterdiv8_si;
+ break;
+ case X86::BI__builtin_ia32_scattersiv2df:
+ IID = Intrinsic::x86_avx512_mask_scattersiv2_df;
+ break;
+ case X86::BI__builtin_ia32_scattersiv2di:
+ IID = Intrinsic::x86_avx512_mask_scattersiv2_di;
+ break;
+ case X86::BI__builtin_ia32_scattersiv4df:
+ IID = Intrinsic::x86_avx512_mask_scattersiv4_df;
+ break;
+ case X86::BI__builtin_ia32_scattersiv4di:
+ IID = Intrinsic::x86_avx512_mask_scattersiv4_di;
+ break;
+ case X86::BI__builtin_ia32_scattersiv4sf:
+ IID = Intrinsic::x86_avx512_mask_scattersiv4_sf;
+ break;
+ case X86::BI__builtin_ia32_scattersiv4si:
+ IID = Intrinsic::x86_avx512_mask_scattersiv4_si;
+ break;
+ case X86::BI__builtin_ia32_scattersiv8sf:
+ IID = Intrinsic::x86_avx512_mask_scattersiv8_sf;
+ break;
+ case X86::BI__builtin_ia32_scattersiv8si:
+ IID = Intrinsic::x86_avx512_mask_scattersiv8_si;
+ break;
+ }
- // cast pointer to i64 & store
- Ops[0] = Builder.CreateBitCast(Ops[0], PtrTy);
- return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
+ unsigned MinElts = std::min(Ops[2]->getType()->getVectorNumElements(),
+ Ops[3]->getType()->getVectorNumElements());
+ Ops[1] = getMaskVecValue(*this, Ops[1], MinElts);
+ Function *Intr = CGM.getIntrinsic(IID);
+ return Builder.CreateCall(Intr, Ops);
}
+
case X86::BI__builtin_ia32_vextractf128_pd256:
case X86::BI__builtin_ia32_vextractf128_ps256:
case X86::BI__builtin_ia32_vextractf128_si256:
@@ -10693,6 +11263,16 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7;
return EmitX86MaskedCompare(*this, CC, false, Ops);
}
+ case X86::BI__builtin_ia32_vpcomb:
+ case X86::BI__builtin_ia32_vpcomw:
+ case X86::BI__builtin_ia32_vpcomd:
+ case X86::BI__builtin_ia32_vpcomq:
+ return EmitX86vpcom(*this, Ops, true);
+ case X86::BI__builtin_ia32_vpcomub:
+ case X86::BI__builtin_ia32_vpcomuw:
+ case X86::BI__builtin_ia32_vpcomud:
+ case X86::BI__builtin_ia32_vpcomuq:
+ return EmitX86vpcom(*this, Ops, false);
case X86::BI__builtin_ia32_kortestcqi:
case X86::BI__builtin_ia32_kortestchi:
@@ -11154,6 +11734,47 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
return EmitX86MaskedCompareResult(*this, Fpclass, NumElts, MaskIn);
}
+ case X86::BI__builtin_ia32_vp2intersect_q_512:
+ case X86::BI__builtin_ia32_vp2intersect_q_256:
+ case X86::BI__builtin_ia32_vp2intersect_q_128:
+ case X86::BI__builtin_ia32_vp2intersect_d_512:
+ case X86::BI__builtin_ia32_vp2intersect_d_256:
+ case X86::BI__builtin_ia32_vp2intersect_d_128: {
+ unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
+ Intrinsic::ID ID;
+
+ switch (BuiltinID) {
+ default: llvm_unreachable("Unsupported intrinsic!");
+ case X86::BI__builtin_ia32_vp2intersect_q_512:
+ ID = Intrinsic::x86_avx512_vp2intersect_q_512;
+ break;
+ case X86::BI__builtin_ia32_vp2intersect_q_256:
+ ID = Intrinsic::x86_avx512_vp2intersect_q_256;
+ break;
+ case X86::BI__builtin_ia32_vp2intersect_q_128:
+ ID = Intrinsic::x86_avx512_vp2intersect_q_128;
+ break;
+ case X86::BI__builtin_ia32_vp2intersect_d_512:
+ ID = Intrinsic::x86_avx512_vp2intersect_d_512;
+ break;
+ case X86::BI__builtin_ia32_vp2intersect_d_256:
+ ID = Intrinsic::x86_avx512_vp2intersect_d_256;
+ break;
+ case X86::BI__builtin_ia32_vp2intersect_d_128:
+ ID = Intrinsic::x86_avx512_vp2intersect_d_128;
+ break;
+ }
+
+ Value *Call = Builder.CreateCall(CGM.getIntrinsic(ID), {Ops[0], Ops[1]});
+ Value *Result = Builder.CreateExtractValue(Call, 0);
+ Result = EmitX86MaskedCompareResult(*this, Result, NumElts, nullptr);
+ Builder.CreateDefaultAlignedStore(Result, Ops[2]);
+
+ Result = Builder.CreateExtractValue(Call, 1);
+ Result = EmitX86MaskedCompareResult(*this, Result, NumElts, nullptr);
+ return Builder.CreateDefaultAlignedStore(Result, Ops[3]);
+ }
+
case X86::BI__builtin_ia32_vpmultishiftqb128:
case X86::BI__builtin_ia32_vpmultishiftqb256:
case X86::BI__builtin_ia32_vpmultishiftqb512: {
@@ -11336,6 +11957,32 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_cmpordsd:
return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 7);
+// AVX512 bf16 intrinsics
+ case X86::BI__builtin_ia32_cvtneps2bf16_128_mask: {
+ Ops[2] = getMaskVecValue(*this, Ops[2],
+ Ops[0]->getType()->getVectorNumElements());
+ Intrinsic::ID IID = Intrinsic::x86_avx512bf16_mask_cvtneps2bf16_128;
+ return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
+ }
+ case X86::BI__builtin_ia32_cvtsbf162ss_32:
+ return EmitX86CvtBF16ToFloatExpr(*this, E, Ops);
+
+ case X86::BI__builtin_ia32_cvtneps2bf16_256_mask:
+ case X86::BI__builtin_ia32_cvtneps2bf16_512_mask: {
+ Intrinsic::ID IID;
+ switch (BuiltinID) {
+ default: llvm_unreachable("Unsupported intrinsic!");
+ case X86::BI__builtin_ia32_cvtneps2bf16_256_mask:
+ IID = Intrinsic::x86_avx512bf16_cvtneps2bf16_256;
+ break;
+ case X86::BI__builtin_ia32_cvtneps2bf16_512_mask:
+ IID = Intrinsic::x86_avx512bf16_cvtneps2bf16_512;
+ break;
+ }
+ Value *Res = Builder.CreateCall(CGM.getIntrinsic(IID), Ops[0]);
+ return EmitX86Select(*this, Ops[2], Res, Ops[1]);
+ }
+
case X86::BI__emul:
case X86::BI__emulu: {
llvm::Type *Int64Ty = llvm::IntegerType::get(getLLVMContext(), 64);
@@ -11386,9 +12033,10 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
// Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty);
// return Builder.CreateCall(F, Ops);
llvm::Type *Int128Ty = Builder.getInt128Ty();
- Value *Val = Builder.CreateOr(
- Builder.CreateShl(Builder.CreateZExt(Ops[1], Int128Ty), 64),
- Builder.CreateZExt(Ops[0], Int128Ty));
+ Value *HighPart128 =
+ Builder.CreateShl(Builder.CreateZExt(Ops[1], Int128Ty), 64);
+ Value *LowPart128 = Builder.CreateZExt(Ops[0], Int128Ty);
+ Value *Val = Builder.CreateOr(HighPart128, LowPart128);
Value *Amt = Builder.CreateAnd(Builder.CreateZExt(Ops[2], Int128Ty),
llvm::ConstantInt::get(Int128Ty, 0x3f));
Value *Res;
@@ -11465,7 +12113,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
}
case X86::BI_AddressOfReturnAddress: {
- Value *F = CGM.getIntrinsic(Intrinsic::addressofreturnaddress);
+ Function *F = CGM.getIntrinsic(Intrinsic::addressofreturnaddress);
return Builder.CreateCall(F);
}
case X86::BI__stosb: {
@@ -11480,13 +12128,13 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
// This syscall signals a driver assertion failure in x86 NT kernels.
llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
llvm::InlineAsm *IA =
- llvm::InlineAsm::get(FTy, "int $$0x2c", "", /*SideEffects=*/true);
+ llvm::InlineAsm::get(FTy, "int $$0x2c", "", /*hasSideEffects=*/true);
llvm::AttributeList NoReturnAttr = llvm::AttributeList::get(
getLLVMContext(), llvm::AttributeList::FunctionIndex,
llvm::Attribute::NoReturn);
- CallSite CS = Builder.CreateCall(IA);
- CS.setAttributes(NoReturnAttr);
- return CS.getInstruction();
+ llvm::CallInst *CI = Builder.CreateCall(IA);
+ CI->setAttributes(NoReturnAttr);
+ return CI;
}
case X86::BI__readfsbyte:
case X86::BI__readfsword:
@@ -12001,7 +12649,7 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
llvm::Value *Y = EmitScalarExpr(E->getArg(1));
llvm::Value *Z = EmitScalarExpr(E->getArg(2));
- llvm::Value *Callee = CGM.getIntrinsic(Intrinsic::amdgcn_div_scale,
+ llvm::Function *Callee = CGM.getIntrinsic(Intrinsic::amdgcn_div_scale,
X->getType());
llvm::Value *Tmp = Builder.CreateCall(Callee, {X, Y, Z});
@@ -12023,7 +12671,7 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
llvm::Value *Src3 = EmitScalarExpr(E->getArg(3));
- llvm::Value *F = CGM.getIntrinsic(Intrinsic::amdgcn_div_fmas,
+ llvm::Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_div_fmas,
Src0->getType());
llvm::Value *Src3ToBool = Builder.CreateIsNotNull(Src3);
return Builder.CreateCall(F, {Src0, Src1, Src2, Src3ToBool});
@@ -12031,6 +12679,8 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
case AMDGPU::BI__builtin_amdgcn_ds_swizzle:
return emitBinaryBuiltin(*this, E, Intrinsic::amdgcn_ds_swizzle);
+ case AMDGPU::BI__builtin_amdgcn_mov_dpp8:
+ return emitBinaryBuiltin(*this, E, Intrinsic::amdgcn_mov_dpp8);
case AMDGPU::BI__builtin_amdgcn_mov_dpp:
case AMDGPU::BI__builtin_amdgcn_update_dpp: {
llvm::SmallVector<llvm::Value *, 6> Args;
@@ -12039,7 +12689,7 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
assert(Args.size() == 5 || Args.size() == 6);
if (Args.size() == 5)
Args.insert(Args.begin(), llvm::UndefValue::get(Args[0]->getType()));
- Value *F =
+ Function *F =
CGM.getIntrinsic(Intrinsic::amdgcn_update_dpp, Args[0]->getType());
return Builder.CreateCall(F, Args);
}
@@ -12080,13 +12730,13 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
case AMDGPU::BI__builtin_amdgcn_frexp_exp:
case AMDGPU::BI__builtin_amdgcn_frexp_expf: {
Value *Src0 = EmitScalarExpr(E->getArg(0));
- Value *F = CGM.getIntrinsic(Intrinsic::amdgcn_frexp_exp,
+ Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_frexp_exp,
{ Builder.getInt32Ty(), Src0->getType() });
return Builder.CreateCall(F, Src0);
}
case AMDGPU::BI__builtin_amdgcn_frexp_exph: {
Value *Src0 = EmitScalarExpr(E->getArg(0));
- Value *F = CGM.getIntrinsic(Intrinsic::amdgcn_frexp_exp,
+ Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_frexp_exp,
{ Builder.getInt16Ty(), Src0->getType() });
return Builder.CreateCall(F, Src0);
}
@@ -12096,14 +12746,34 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_fract);
case AMDGPU::BI__builtin_amdgcn_lerp:
return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_lerp);
+ case AMDGPU::BI__builtin_amdgcn_ubfe:
+ return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_ubfe);
+ case AMDGPU::BI__builtin_amdgcn_sbfe:
+ return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_sbfe);
case AMDGPU::BI__builtin_amdgcn_uicmp:
case AMDGPU::BI__builtin_amdgcn_uicmpl:
case AMDGPU::BI__builtin_amdgcn_sicmp:
- case AMDGPU::BI__builtin_amdgcn_sicmpl:
- return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_icmp);
+ case AMDGPU::BI__builtin_amdgcn_sicmpl: {
+ llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
+ llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
+ llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
+
+ // FIXME-GFX10: How should 32 bit mask be handled?
+ Value *F = CGM.getIntrinsic(Intrinsic::amdgcn_icmp,
+ { Builder.getInt64Ty(), Src0->getType() });
+ return Builder.CreateCall(F, { Src0, Src1, Src2 });
+ }
case AMDGPU::BI__builtin_amdgcn_fcmp:
- case AMDGPU::BI__builtin_amdgcn_fcmpf:
- return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_fcmp);
+ case AMDGPU::BI__builtin_amdgcn_fcmpf: {
+ llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
+ llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
+ llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
+
+ // FIXME-GFX10: How should 32 bit mask be handled?
+ Value *F = CGM.getIntrinsic(Intrinsic::amdgcn_fcmp,
+ { Builder.getInt64Ty(), Src0->getType() });
+ return Builder.CreateCall(F, { Src0, Src1, Src2 });
+ }
case AMDGPU::BI__builtin_amdgcn_class:
case AMDGPU::BI__builtin_amdgcn_classf:
case AMDGPU::BI__builtin_amdgcn_classh:
@@ -12111,6 +12781,14 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
case AMDGPU::BI__builtin_amdgcn_fmed3f:
case AMDGPU::BI__builtin_amdgcn_fmed3h:
return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_fmed3);
+ case AMDGPU::BI__builtin_amdgcn_ds_append:
+ case AMDGPU::BI__builtin_amdgcn_ds_consume: {
+ Intrinsic::ID Intrin = BuiltinID == AMDGPU::BI__builtin_amdgcn_ds_append ?
+ Intrinsic::amdgcn_ds_append : Intrinsic::amdgcn_ds_consume;
+ Value *Src0 = EmitScalarExpr(E->getArg(0));
+ Function *F = CGM.getIntrinsic(Intrin, { Src0->getType() });
+ return Builder.CreateCall(F, { Src0, Builder.getFalse() });
+ }
case AMDGPU::BI__builtin_amdgcn_read_exec: {
CallInst *CI = cast<CallInst>(
EmitSpecialRegisterBuiltin(*this, E, Int64Ty, Int64Ty, true, "exec"));
@@ -12160,7 +12838,7 @@ static Value *EmitSystemZIntrinsicWithCC(CodeGenFunction &CGF,
for (unsigned I = 0; I < NumArgs; ++I)
Args[I] = CGF.EmitScalarExpr(E->getArg(I));
Address CCPtr = CGF.EmitPointerWithAlignment(E->getArg(NumArgs));
- Value *F = CGF.CGM.getIntrinsic(IntrinsicID);
+ Function *F = CGF.CGM.getIntrinsic(IntrinsicID);
Value *Call = CGF.Builder.CreateCall(F, Args);
Value *CC = CGF.Builder.CreateExtractValue(Call, 1);
CGF.Builder.CreateStore(CC, CCPtr);
@@ -12173,30 +12851,30 @@ Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
case SystemZ::BI__builtin_tbegin: {
Value *TDB = EmitScalarExpr(E->getArg(0));
Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff0c);
- Value *F = CGM.getIntrinsic(Intrinsic::s390_tbegin);
+ Function *F = CGM.getIntrinsic(Intrinsic::s390_tbegin);
return Builder.CreateCall(F, {TDB, Control});
}
case SystemZ::BI__builtin_tbegin_nofloat: {
Value *TDB = EmitScalarExpr(E->getArg(0));
Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff0c);
- Value *F = CGM.getIntrinsic(Intrinsic::s390_tbegin_nofloat);
+ Function *F = CGM.getIntrinsic(Intrinsic::s390_tbegin_nofloat);
return Builder.CreateCall(F, {TDB, Control});
}
case SystemZ::BI__builtin_tbeginc: {
Value *TDB = llvm::ConstantPointerNull::get(Int8PtrTy);
Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff08);
- Value *F = CGM.getIntrinsic(Intrinsic::s390_tbeginc);
+ Function *F = CGM.getIntrinsic(Intrinsic::s390_tbeginc);
return Builder.CreateCall(F, {TDB, Control});
}
case SystemZ::BI__builtin_tabort: {
Value *Data = EmitScalarExpr(E->getArg(0));
- Value *F = CGM.getIntrinsic(Intrinsic::s390_tabort);
+ Function *F = CGM.getIntrinsic(Intrinsic::s390_tabort);
return Builder.CreateCall(F, Builder.CreateSExt(Data, Int64Ty, "tabort"));
}
case SystemZ::BI__builtin_non_tx_store: {
Value *Address = EmitScalarExpr(E->getArg(0));
Value *Data = EmitScalarExpr(E->getArg(1));
- Value *F = CGM.getIntrinsic(Intrinsic::s390_ntstg);
+ Function *F = CGM.getIntrinsic(Intrinsic::s390_ntstg);
return Builder.CreateCall(F, {Data, Address});
}
@@ -12406,6 +13084,15 @@ Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall(F, {X, Y, M4Value});
}
+ case SystemZ::BI__builtin_s390_vlbrh:
+ case SystemZ::BI__builtin_s390_vlbrf:
+ case SystemZ::BI__builtin_s390_vlbrg: {
+ llvm::Type *ResultType = ConvertType(E->getType());
+ Value *X = EmitScalarExpr(E->getArg(0));
+ Function *F = CGM.getIntrinsic(Intrinsic::bswap, ResultType);
+ return Builder.CreateCall(F, X);
+ }
+
// Vector intrinsics that output the post-instruction CC value.
#define INTRINSIC_WITH_CC(NAME) \
@@ -12481,6 +13168,14 @@ Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
INTRINSIC_WITH_CC(s390_vftcisb);
INTRINSIC_WITH_CC(s390_vftcidb);
+ INTRINSIC_WITH_CC(s390_vstrsb);
+ INTRINSIC_WITH_CC(s390_vstrsh);
+ INTRINSIC_WITH_CC(s390_vstrsf);
+
+ INTRINSIC_WITH_CC(s390_vstrszb);
+ INTRINSIC_WITH_CC(s390_vstrszh);
+ INTRINSIC_WITH_CC(s390_vstrszf);
+
#undef INTRINSIC_WITH_CC
default:
@@ -12488,8 +13183,252 @@ Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
}
}
-Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID,
- const CallExpr *E) {
+namespace {
+// Helper classes for mapping MMA builtins to particular LLVM intrinsic variant.
+struct NVPTXMmaLdstInfo {
+ unsigned NumResults; // Number of elements to load/store
+ // Intrinsic IDs for row/col variants. 0 if particular layout is unsupported.
+ unsigned IID_col;
+ unsigned IID_row;
+};
+
+#define MMA_INTR(geom_op_type, layout) \
+ Intrinsic::nvvm_wmma_##geom_op_type##_##layout##_stride
+#define MMA_LDST(n, geom_op_type) \
+ { n, MMA_INTR(geom_op_type, col), MMA_INTR(geom_op_type, row) }
+
+static NVPTXMmaLdstInfo getNVPTXMmaLdstInfo(unsigned BuiltinID) {
+ switch (BuiltinID) {
+ // FP MMA loads
+ case NVPTX::BI__hmma_m16n16k16_ld_a:
+ return MMA_LDST(8, m16n16k16_load_a_f16);
+ case NVPTX::BI__hmma_m16n16k16_ld_b:
+ return MMA_LDST(8, m16n16k16_load_b_f16);
+ case NVPTX::BI__hmma_m16n16k16_ld_c_f16:
+ return MMA_LDST(4, m16n16k16_load_c_f16);
+ case NVPTX::BI__hmma_m16n16k16_ld_c_f32:
+ return MMA_LDST(8, m16n16k16_load_c_f32);
+ case NVPTX::BI__hmma_m32n8k16_ld_a:
+ return MMA_LDST(8, m32n8k16_load_a_f16);
+ case NVPTX::BI__hmma_m32n8k16_ld_b:
+ return MMA_LDST(8, m32n8k16_load_b_f16);
+ case NVPTX::BI__hmma_m32n8k16_ld_c_f16:
+ return MMA_LDST(4, m32n8k16_load_c_f16);
+ case NVPTX::BI__hmma_m32n8k16_ld_c_f32:
+ return MMA_LDST(8, m32n8k16_load_c_f32);
+ case NVPTX::BI__hmma_m8n32k16_ld_a:
+ return MMA_LDST(8, m8n32k16_load_a_f16);
+ case NVPTX::BI__hmma_m8n32k16_ld_b:
+ return MMA_LDST(8, m8n32k16_load_b_f16);
+ case NVPTX::BI__hmma_m8n32k16_ld_c_f16:
+ return MMA_LDST(4, m8n32k16_load_c_f16);
+ case NVPTX::BI__hmma_m8n32k16_ld_c_f32:
+ return MMA_LDST(8, m8n32k16_load_c_f32);
+
+ // Integer MMA loads
+ case NVPTX::BI__imma_m16n16k16_ld_a_s8:
+ return MMA_LDST(2, m16n16k16_load_a_s8);
+ case NVPTX::BI__imma_m16n16k16_ld_a_u8:
+ return MMA_LDST(2, m16n16k16_load_a_u8);
+ case NVPTX::BI__imma_m16n16k16_ld_b_s8:
+ return MMA_LDST(2, m16n16k16_load_b_s8);
+ case NVPTX::BI__imma_m16n16k16_ld_b_u8:
+ return MMA_LDST(2, m16n16k16_load_b_u8);
+ case NVPTX::BI__imma_m16n16k16_ld_c:
+ return MMA_LDST(8, m16n16k16_load_c_s32);
+ case NVPTX::BI__imma_m32n8k16_ld_a_s8:
+ return MMA_LDST(4, m32n8k16_load_a_s8);
+ case NVPTX::BI__imma_m32n8k16_ld_a_u8:
+ return MMA_LDST(4, m32n8k16_load_a_u8);
+ case NVPTX::BI__imma_m32n8k16_ld_b_s8:
+ return MMA_LDST(1, m32n8k16_load_b_s8);
+ case NVPTX::BI__imma_m32n8k16_ld_b_u8:
+ return MMA_LDST(1, m32n8k16_load_b_u8);
+ case NVPTX::BI__imma_m32n8k16_ld_c:
+ return MMA_LDST(8, m32n8k16_load_c_s32);
+ case NVPTX::BI__imma_m8n32k16_ld_a_s8:
+ return MMA_LDST(1, m8n32k16_load_a_s8);
+ case NVPTX::BI__imma_m8n32k16_ld_a_u8:
+ return MMA_LDST(1, m8n32k16_load_a_u8);
+ case NVPTX::BI__imma_m8n32k16_ld_b_s8:
+ return MMA_LDST(4, m8n32k16_load_b_s8);
+ case NVPTX::BI__imma_m8n32k16_ld_b_u8:
+ return MMA_LDST(4, m8n32k16_load_b_u8);
+ case NVPTX::BI__imma_m8n32k16_ld_c:
+ return MMA_LDST(8, m8n32k16_load_c_s32);
+
+ // Sub-integer MMA loads.
+ // Only row/col layout is supported by A/B fragments.
+ case NVPTX::BI__imma_m8n8k32_ld_a_s4:
+ return {1, 0, MMA_INTR(m8n8k32_load_a_s4, row)};
+ case NVPTX::BI__imma_m8n8k32_ld_a_u4:
+ return {1, 0, MMA_INTR(m8n8k32_load_a_u4, row)};
+ case NVPTX::BI__imma_m8n8k32_ld_b_s4:
+ return {1, MMA_INTR(m8n8k32_load_b_s4, col), 0};
+ case NVPTX::BI__imma_m8n8k32_ld_b_u4:
+ return {1, MMA_INTR(m8n8k32_load_b_u4, col), 0};
+ case NVPTX::BI__imma_m8n8k32_ld_c:
+ return MMA_LDST(2, m8n8k32_load_c_s32);
+ case NVPTX::BI__bmma_m8n8k128_ld_a_b1:
+ return {1, 0, MMA_INTR(m8n8k128_load_a_b1, row)};
+ case NVPTX::BI__bmma_m8n8k128_ld_b_b1:
+ return {1, MMA_INTR(m8n8k128_load_b_b1, col), 0};
+ case NVPTX::BI__bmma_m8n8k128_ld_c:
+ return MMA_LDST(2, m8n8k128_load_c_s32);
+
+ // NOTE: We need to follow inconsitent naming scheme used by NVCC. Unlike
+ // PTX and LLVM IR where stores always use fragment D, NVCC builtins always
+ // use fragment C for both loads and stores.
+ // FP MMA stores.
+ case NVPTX::BI__hmma_m16n16k16_st_c_f16:
+ return MMA_LDST(4, m16n16k16_store_d_f16);
+ case NVPTX::BI__hmma_m16n16k16_st_c_f32:
+ return MMA_LDST(8, m16n16k16_store_d_f32);
+ case NVPTX::BI__hmma_m32n8k16_st_c_f16:
+ return MMA_LDST(4, m32n8k16_store_d_f16);
+ case NVPTX::BI__hmma_m32n8k16_st_c_f32:
+ return MMA_LDST(8, m32n8k16_store_d_f32);
+ case NVPTX::BI__hmma_m8n32k16_st_c_f16:
+ return MMA_LDST(4, m8n32k16_store_d_f16);
+ case NVPTX::BI__hmma_m8n32k16_st_c_f32:
+ return MMA_LDST(8, m8n32k16_store_d_f32);
+
+ // Integer and sub-integer MMA stores.
+ // Another naming quirk. Unlike other MMA builtins that use PTX types in the
+ // name, integer loads/stores use LLVM's i32.
+ case NVPTX::BI__imma_m16n16k16_st_c_i32:
+ return MMA_LDST(8, m16n16k16_store_d_s32);
+ case NVPTX::BI__imma_m32n8k16_st_c_i32:
+ return MMA_LDST(8, m32n8k16_store_d_s32);
+ case NVPTX::BI__imma_m8n32k16_st_c_i32:
+ return MMA_LDST(8, m8n32k16_store_d_s32);
+ case NVPTX::BI__imma_m8n8k32_st_c_i32:
+ return MMA_LDST(2, m8n8k32_store_d_s32);
+ case NVPTX::BI__bmma_m8n8k128_st_c_i32:
+ return MMA_LDST(2, m8n8k128_store_d_s32);
+
+ default:
+ llvm_unreachable("Unknown MMA builtin");
+ }
+}
+#undef MMA_LDST
+#undef MMA_INTR
+
+
+struct NVPTXMmaInfo {
+ unsigned NumEltsA;
+ unsigned NumEltsB;
+ unsigned NumEltsC;
+ unsigned NumEltsD;
+ std::array<unsigned, 8> Variants;
+
+ unsigned getMMAIntrinsic(int Layout, bool Satf) {
+ unsigned Index = Layout * 2 + Satf;
+ if (Index >= Variants.size())
+ return 0;
+ return Variants[Index];
+ }
+};
+
+ // Returns an intrinsic that matches Layout and Satf for valid combinations of
+ // Layout and Satf, 0 otherwise.
+static NVPTXMmaInfo getNVPTXMmaInfo(unsigned BuiltinID) {
+ // clang-format off
+#define MMA_VARIANTS(geom, type) {{ \
+ Intrinsic::nvvm_wmma_##geom##_mma_row_row_##type, \
+ Intrinsic::nvvm_wmma_##geom##_mma_row_row_##type##_satfinite, \
+ Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \
+ Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type##_satfinite, \
+ Intrinsic::nvvm_wmma_##geom##_mma_col_row_##type, \
+ Intrinsic::nvvm_wmma_##geom##_mma_col_row_##type##_satfinite, \
+ Intrinsic::nvvm_wmma_##geom##_mma_col_col_##type, \
+ Intrinsic::nvvm_wmma_##geom##_mma_col_col_##type##_satfinite \
+ }}
+// Sub-integer MMA only supports row.col layout.
+#define MMA_VARIANTS_I4(geom, type) {{ \
+ 0, \
+ 0, \
+ Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \
+ Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type##_satfinite, \
+ 0, \
+ 0, \
+ 0, \
+ 0 \
+ }}
+// b1 MMA does not support .satfinite.
+#define MMA_VARIANTS_B1(geom, type) {{ \
+ 0, \
+ 0, \
+ Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \
+ 0, \
+ 0, \
+ 0, \
+ 0, \
+ 0 \
+ }}
+ // clang-format on
+ switch (BuiltinID) {
+ // FP MMA
+ // Note that 'type' argument of MMA_VARIANT uses D_C notation, while
+ // NumEltsN of return value are ordered as A,B,C,D.
+ case NVPTX::BI__hmma_m16n16k16_mma_f16f16:
+ return {8, 8, 4, 4, MMA_VARIANTS(m16n16k16, f16_f16)};
+ case NVPTX::BI__hmma_m16n16k16_mma_f32f16:
+ return {8, 8, 4, 8, MMA_VARIANTS(m16n16k16, f32_f16)};
+ case NVPTX::BI__hmma_m16n16k16_mma_f16f32:
+ return {8, 8, 8, 4, MMA_VARIANTS(m16n16k16, f16_f32)};
+ case NVPTX::BI__hmma_m16n16k16_mma_f32f32:
+ return {8, 8, 8, 8, MMA_VARIANTS(m16n16k16, f32_f32)};
+ case NVPTX::BI__hmma_m32n8k16_mma_f16f16:
+ return {8, 8, 4, 4, MMA_VARIANTS(m32n8k16, f16_f16)};
+ case NVPTX::BI__hmma_m32n8k16_mma_f32f16:
+ return {8, 8, 4, 8, MMA_VARIANTS(m32n8k16, f32_f16)};
+ case NVPTX::BI__hmma_m32n8k16_mma_f16f32:
+ return {8, 8, 8, 4, MMA_VARIANTS(m32n8k16, f16_f32)};
+ case NVPTX::BI__hmma_m32n8k16_mma_f32f32:
+ return {8, 8, 8, 8, MMA_VARIANTS(m32n8k16, f32_f32)};
+ case NVPTX::BI__hmma_m8n32k16_mma_f16f16:
+ return {8, 8, 4, 4, MMA_VARIANTS(m8n32k16, f16_f16)};
+ case NVPTX::BI__hmma_m8n32k16_mma_f32f16:
+ return {8, 8, 4, 8, MMA_VARIANTS(m8n32k16, f32_f16)};
+ case NVPTX::BI__hmma_m8n32k16_mma_f16f32:
+ return {8, 8, 8, 4, MMA_VARIANTS(m8n32k16, f16_f32)};
+ case NVPTX::BI__hmma_m8n32k16_mma_f32f32:
+ return {8, 8, 8, 8, MMA_VARIANTS(m8n32k16, f32_f32)};
+
+ // Integer MMA
+ case NVPTX::BI__imma_m16n16k16_mma_s8:
+ return {2, 2, 8, 8, MMA_VARIANTS(m16n16k16, s8)};
+ case NVPTX::BI__imma_m16n16k16_mma_u8:
+ return {2, 2, 8, 8, MMA_VARIANTS(m16n16k16, u8)};
+ case NVPTX::BI__imma_m32n8k16_mma_s8:
+ return {4, 1, 8, 8, MMA_VARIANTS(m32n8k16, s8)};
+ case NVPTX::BI__imma_m32n8k16_mma_u8:
+ return {4, 1, 8, 8, MMA_VARIANTS(m32n8k16, u8)};
+ case NVPTX::BI__imma_m8n32k16_mma_s8:
+ return {1, 4, 8, 8, MMA_VARIANTS(m8n32k16, s8)};
+ case NVPTX::BI__imma_m8n32k16_mma_u8:
+ return {1, 4, 8, 8, MMA_VARIANTS(m8n32k16, u8)};
+
+ // Sub-integer MMA
+ case NVPTX::BI__imma_m8n8k32_mma_s4:
+ return {1, 1, 2, 2, MMA_VARIANTS_I4(m8n8k32, s4)};
+ case NVPTX::BI__imma_m8n8k32_mma_u4:
+ return {1, 1, 2, 2, MMA_VARIANTS_I4(m8n8k32, u4)};
+ case NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1:
+ return {1, 1, 2, 2, MMA_VARIANTS_B1(m8n8k128, b1)};
+ default:
+ llvm_unreachable("Unexpected builtin ID.");
+ }
+#undef MMA_VARIANTS
+#undef MMA_VARIANTS_I4
+#undef MMA_VARIANTS_B1
+}
+
+} // namespace
+
+Value *
+CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E) {
auto MakeLdg = [&](unsigned IntrinsicID) {
Value *Ptr = EmitScalarExpr(E->getArg(0));
clang::CharUnits Align =
@@ -12564,30 +13503,18 @@ Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID,
// success flag.
return MakeAtomicCmpXchgValue(*this, E, /*ReturnBool=*/false);
- case NVPTX::BI__nvvm_atom_add_gen_f: {
- Value *Ptr = EmitScalarExpr(E->getArg(0));
- Value *Val = EmitScalarExpr(E->getArg(1));
- // atomicrmw only deals with integer arguments so we need to use
- // LLVM's nvvm_atomic_load_add_f32 intrinsic for that.
- Value *FnALAF32 =
- CGM.getIntrinsic(Intrinsic::nvvm_atomic_load_add_f32, Ptr->getType());
- return Builder.CreateCall(FnALAF32, {Ptr, Val});
- }
-
+ case NVPTX::BI__nvvm_atom_add_gen_f:
case NVPTX::BI__nvvm_atom_add_gen_d: {
Value *Ptr = EmitScalarExpr(E->getArg(0));
Value *Val = EmitScalarExpr(E->getArg(1));
- // atomicrmw only deals with integer arguments, so we need to use
- // LLVM's nvvm_atomic_load_add_f64 intrinsic.
- Value *FnALAF64 =
- CGM.getIntrinsic(Intrinsic::nvvm_atomic_load_add_f64, Ptr->getType());
- return Builder.CreateCall(FnALAF64, {Ptr, Val});
+ return Builder.CreateAtomicRMW(llvm::AtomicRMWInst::FAdd, Ptr, Val,
+ AtomicOrdering::SequentiallyConsistent);
}
case NVPTX::BI__nvvm_atom_inc_gen_ui: {
Value *Ptr = EmitScalarExpr(E->getArg(0));
Value *Val = EmitScalarExpr(E->getArg(1));
- Value *FnALI32 =
+ Function *FnALI32 =
CGM.getIntrinsic(Intrinsic::nvvm_atomic_load_inc_32, Ptr->getType());
return Builder.CreateCall(FnALI32, {Ptr, Val});
}
@@ -12595,7 +13522,7 @@ Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID,
case NVPTX::BI__nvvm_atom_dec_gen_ui: {
Value *Ptr = EmitScalarExpr(E->getArg(0));
Value *Val = EmitScalarExpr(E->getArg(1));
- Value *FnALD32 =
+ Function *FnALD32 =
CGM.getIntrinsic(Intrinsic::nvvm_atomic_load_dec_32, Ptr->getType());
return Builder.CreateCall(FnALD32, {Ptr, Val});
}
@@ -12752,6 +13679,8 @@ Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID,
Builder.CreateStore(Pred, PredOutPtr);
return Builder.CreateExtractValue(ResultPair, 0);
}
+
+ // FP MMA loads
case NVPTX::BI__hmma_m16n16k16_ld_a:
case NVPTX::BI__hmma_m16n16k16_ld_b:
case NVPTX::BI__hmma_m16n16k16_ld_c_f16:
@@ -12763,7 +13692,33 @@ Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID,
case NVPTX::BI__hmma_m8n32k16_ld_a:
case NVPTX::BI__hmma_m8n32k16_ld_b:
case NVPTX::BI__hmma_m8n32k16_ld_c_f16:
- case NVPTX::BI__hmma_m8n32k16_ld_c_f32: {
+ case NVPTX::BI__hmma_m8n32k16_ld_c_f32:
+ // Integer MMA loads.
+ case NVPTX::BI__imma_m16n16k16_ld_a_s8:
+ case NVPTX::BI__imma_m16n16k16_ld_a_u8:
+ case NVPTX::BI__imma_m16n16k16_ld_b_s8:
+ case NVPTX::BI__imma_m16n16k16_ld_b_u8:
+ case NVPTX::BI__imma_m16n16k16_ld_c:
+ case NVPTX::BI__imma_m32n8k16_ld_a_s8:
+ case NVPTX::BI__imma_m32n8k16_ld_a_u8:
+ case NVPTX::BI__imma_m32n8k16_ld_b_s8:
+ case NVPTX::BI__imma_m32n8k16_ld_b_u8:
+ case NVPTX::BI__imma_m32n8k16_ld_c:
+ case NVPTX::BI__imma_m8n32k16_ld_a_s8:
+ case NVPTX::BI__imma_m8n32k16_ld_a_u8:
+ case NVPTX::BI__imma_m8n32k16_ld_b_s8:
+ case NVPTX::BI__imma_m8n32k16_ld_b_u8:
+ case NVPTX::BI__imma_m8n32k16_ld_c:
+ // Sub-integer MMA loads.
+ case NVPTX::BI__imma_m8n8k32_ld_a_s4:
+ case NVPTX::BI__imma_m8n8k32_ld_a_u4:
+ case NVPTX::BI__imma_m8n8k32_ld_b_s4:
+ case NVPTX::BI__imma_m8n8k32_ld_b_u4:
+ case NVPTX::BI__imma_m8n8k32_ld_c:
+ case NVPTX::BI__bmma_m8n8k128_ld_a_b1:
+ case NVPTX::BI__bmma_m8n8k128_ld_b_b1:
+ case NVPTX::BI__bmma_m8n8k128_ld_c:
+ {
Address Dst = EmitPointerWithAlignment(E->getArg(0));
Value *Src = EmitScalarExpr(E->getArg(1));
Value *Ldm = EmitScalarExpr(E->getArg(2));
@@ -12771,82 +13726,28 @@ Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID,
if (!E->getArg(3)->isIntegerConstantExpr(isColMajorArg, getContext()))
return nullptr;
bool isColMajor = isColMajorArg.getSExtValue();
- unsigned IID;
- unsigned NumResults;
- switch (BuiltinID) {
- case NVPTX::BI__hmma_m16n16k16_ld_a:
- IID = isColMajor ? Intrinsic::nvvm_wmma_m16n16k16_load_a_f16_col_stride
- : Intrinsic::nvvm_wmma_m16n16k16_load_a_f16_row_stride;
- NumResults = 8;
- break;
- case NVPTX::BI__hmma_m16n16k16_ld_b:
- IID = isColMajor ? Intrinsic::nvvm_wmma_m16n16k16_load_b_f16_col_stride
- : Intrinsic::nvvm_wmma_m16n16k16_load_b_f16_row_stride;
- NumResults = 8;
- break;
- case NVPTX::BI__hmma_m16n16k16_ld_c_f16:
- IID = isColMajor ? Intrinsic::nvvm_wmma_m16n16k16_load_c_f16_col_stride
- : Intrinsic::nvvm_wmma_m16n16k16_load_c_f16_row_stride;
- NumResults = 4;
- break;
- case NVPTX::BI__hmma_m16n16k16_ld_c_f32:
- IID = isColMajor ? Intrinsic::nvvm_wmma_m16n16k16_load_c_f32_col_stride
- : Intrinsic::nvvm_wmma_m16n16k16_load_c_f32_row_stride;
- NumResults = 8;
- break;
- case NVPTX::BI__hmma_m32n8k16_ld_a:
- IID = isColMajor ? Intrinsic::nvvm_wmma_m32n8k16_load_a_f16_col_stride
- : Intrinsic::nvvm_wmma_m32n8k16_load_a_f16_row_stride;
- NumResults = 8;
- break;
- case NVPTX::BI__hmma_m32n8k16_ld_b:
- IID = isColMajor ? Intrinsic::nvvm_wmma_m32n8k16_load_b_f16_col_stride
- : Intrinsic::nvvm_wmma_m32n8k16_load_b_f16_row_stride;
- NumResults = 8;
- break;
- case NVPTX::BI__hmma_m32n8k16_ld_c_f16:
- IID = isColMajor ? Intrinsic::nvvm_wmma_m32n8k16_load_c_f16_col_stride
- : Intrinsic::nvvm_wmma_m32n8k16_load_c_f16_row_stride;
- NumResults = 4;
- break;
- case NVPTX::BI__hmma_m32n8k16_ld_c_f32:
- IID = isColMajor ? Intrinsic::nvvm_wmma_m32n8k16_load_c_f32_col_stride
- : Intrinsic::nvvm_wmma_m32n8k16_load_c_f32_row_stride;
- NumResults = 8;
- break;
- case NVPTX::BI__hmma_m8n32k16_ld_a:
- IID = isColMajor ? Intrinsic::nvvm_wmma_m8n32k16_load_a_f16_col_stride
- : Intrinsic::nvvm_wmma_m8n32k16_load_a_f16_row_stride;
- NumResults = 8;
- break;
- case NVPTX::BI__hmma_m8n32k16_ld_b:
- IID = isColMajor ? Intrinsic::nvvm_wmma_m8n32k16_load_b_f16_col_stride
- : Intrinsic::nvvm_wmma_m8n32k16_load_b_f16_row_stride;
- NumResults = 8;
- break;
- case NVPTX::BI__hmma_m8n32k16_ld_c_f16:
- IID = isColMajor ? Intrinsic::nvvm_wmma_m8n32k16_load_c_f16_col_stride
- : Intrinsic::nvvm_wmma_m8n32k16_load_c_f16_row_stride;
- NumResults = 4;
- break;
- case NVPTX::BI__hmma_m8n32k16_ld_c_f32:
- IID = isColMajor ? Intrinsic::nvvm_wmma_m8n32k16_load_c_f32_col_stride
- : Intrinsic::nvvm_wmma_m8n32k16_load_c_f32_row_stride;
- NumResults = 8;
- break;
- default:
- llvm_unreachable("Unexpected builtin ID.");
- }
+ NVPTXMmaLdstInfo II = getNVPTXMmaLdstInfo(BuiltinID);
+ unsigned IID = isColMajor ? II.IID_col : II.IID_row;
+ if (IID == 0)
+ return nullptr;
+
Value *Result =
Builder.CreateCall(CGM.getIntrinsic(IID, Src->getType()), {Src, Ldm});
// Save returned values.
- for (unsigned i = 0; i < NumResults; ++i) {
- Builder.CreateAlignedStore(
- Builder.CreateBitCast(Builder.CreateExtractValue(Result, i),
- Dst.getElementType()),
- Builder.CreateGEP(Dst.getPointer(), llvm::ConstantInt::get(IntTy, i)),
- CharUnits::fromQuantity(4));
+ assert(II.NumResults);
+ if (II.NumResults == 1) {
+ Builder.CreateAlignedStore(Result, Dst.getPointer(),
+ CharUnits::fromQuantity(4));
+ } else {
+ for (unsigned i = 0; i < II.NumResults; ++i) {
+ Builder.CreateAlignedStore(
+ Builder.CreateBitCast(Builder.CreateExtractValue(Result, i),
+ Dst.getElementType()),
+ Builder.CreateGEP(Dst.getPointer(),
+ llvm::ConstantInt::get(IntTy, i)),
+ CharUnits::fromQuantity(4));
+ }
}
return Result;
}
@@ -12856,7 +13757,12 @@ Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID,
case NVPTX::BI__hmma_m32n8k16_st_c_f16:
case NVPTX::BI__hmma_m32n8k16_st_c_f32:
case NVPTX::BI__hmma_m8n32k16_st_c_f16:
- case NVPTX::BI__hmma_m8n32k16_st_c_f32: {
+ case NVPTX::BI__hmma_m8n32k16_st_c_f32:
+ case NVPTX::BI__imma_m16n16k16_st_c_i32:
+ case NVPTX::BI__imma_m32n8k16_st_c_i32:
+ case NVPTX::BI__imma_m8n32k16_st_c_i32:
+ case NVPTX::BI__imma_m8n8k32_st_c_i32:
+ case NVPTX::BI__bmma_m8n8k128_st_c_i32: {
Value *Dst = EmitScalarExpr(E->getArg(0));
Address Src = EmitPointerWithAlignment(E->getArg(1));
Value *Ldm = EmitScalarExpr(E->getArg(2));
@@ -12864,45 +13770,15 @@ Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID,
if (!E->getArg(3)->isIntegerConstantExpr(isColMajorArg, getContext()))
return nullptr;
bool isColMajor = isColMajorArg.getSExtValue();
- unsigned IID;
- unsigned NumResults = 8;
- // PTX Instructions (and LLVM intrinsics) are defined for slice _d_, yet
- // for some reason nvcc builtins use _c_.
- switch (BuiltinID) {
- case NVPTX::BI__hmma_m16n16k16_st_c_f16:
- IID = isColMajor ? Intrinsic::nvvm_wmma_m16n16k16_store_d_f16_col_stride
- : Intrinsic::nvvm_wmma_m16n16k16_store_d_f16_row_stride;
- NumResults = 4;
- break;
- case NVPTX::BI__hmma_m16n16k16_st_c_f32:
- IID = isColMajor ? Intrinsic::nvvm_wmma_m16n16k16_store_d_f32_col_stride
- : Intrinsic::nvvm_wmma_m16n16k16_store_d_f32_row_stride;
- break;
- case NVPTX::BI__hmma_m32n8k16_st_c_f16:
- IID = isColMajor ? Intrinsic::nvvm_wmma_m32n8k16_store_d_f16_col_stride
- : Intrinsic::nvvm_wmma_m32n8k16_store_d_f16_row_stride;
- NumResults = 4;
- break;
- case NVPTX::BI__hmma_m32n8k16_st_c_f32:
- IID = isColMajor ? Intrinsic::nvvm_wmma_m32n8k16_store_d_f32_col_stride
- : Intrinsic::nvvm_wmma_m32n8k16_store_d_f32_row_stride;
- break;
- case NVPTX::BI__hmma_m8n32k16_st_c_f16:
- IID = isColMajor ? Intrinsic::nvvm_wmma_m8n32k16_store_d_f16_col_stride
- : Intrinsic::nvvm_wmma_m8n32k16_store_d_f16_row_stride;
- NumResults = 4;
- break;
- case NVPTX::BI__hmma_m8n32k16_st_c_f32:
- IID = isColMajor ? Intrinsic::nvvm_wmma_m8n32k16_store_d_f32_col_stride
- : Intrinsic::nvvm_wmma_m8n32k16_store_d_f32_row_stride;
- break;
- default:
- llvm_unreachable("Unexpected builtin ID.");
- }
- Function *Intrinsic = CGM.getIntrinsic(IID, Dst->getType());
+ NVPTXMmaLdstInfo II = getNVPTXMmaLdstInfo(BuiltinID);
+ unsigned IID = isColMajor ? II.IID_col : II.IID_row;
+ if (IID == 0)
+ return nullptr;
+ Function *Intrinsic =
+ CGM.getIntrinsic(IID, Dst->getType());
llvm::Type *ParamType = Intrinsic->getFunctionType()->getParamType(1);
SmallVector<Value *, 10> Values = {Dst};
- for (unsigned i = 0; i < NumResults; ++i) {
+ for (unsigned i = 0; i < II.NumResults; ++i) {
Value *V = Builder.CreateAlignedLoad(
Builder.CreateGEP(Src.getPointer(), llvm::ConstantInt::get(IntTy, i)),
CharUnits::fromQuantity(4));
@@ -12926,7 +13802,16 @@ Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID,
case NVPTX::BI__hmma_m8n32k16_mma_f16f16:
case NVPTX::BI__hmma_m8n32k16_mma_f32f16:
case NVPTX::BI__hmma_m8n32k16_mma_f32f32:
- case NVPTX::BI__hmma_m8n32k16_mma_f16f32: {
+ case NVPTX::BI__hmma_m8n32k16_mma_f16f32:
+ case NVPTX::BI__imma_m16n16k16_mma_s8:
+ case NVPTX::BI__imma_m16n16k16_mma_u8:
+ case NVPTX::BI__imma_m32n8k16_mma_s8:
+ case NVPTX::BI__imma_m32n8k16_mma_u8:
+ case NVPTX::BI__imma_m8n32k16_mma_s8:
+ case NVPTX::BI__imma_m8n32k16_mma_u8:
+ case NVPTX::BI__imma_m8n8k32_mma_s4:
+ case NVPTX::BI__imma_m8n8k32_mma_u4:
+ case NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1: {
Address Dst = EmitPointerWithAlignment(E->getArg(0));
Address SrcA = EmitPointerWithAlignment(E->getArg(1));
Address SrcB = EmitPointerWithAlignment(E->getArg(2));
@@ -12938,119 +13823,40 @@ Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID,
if (Layout < 0 || Layout > 3)
return nullptr;
llvm::APSInt SatfArg;
- if (!E->getArg(5)->isIntegerConstantExpr(SatfArg, getContext()))
+ if (BuiltinID == NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1)
+ SatfArg = 0; // .b1 does not have satf argument.
+ else if (!E->getArg(5)->isIntegerConstantExpr(SatfArg, getContext()))
return nullptr;
bool Satf = SatfArg.getSExtValue();
-
- // clang-format off
-#define MMA_VARIANTS(geom, type) {{ \
- Intrinsic::nvvm_wmma_##geom##_mma_row_row_##type, \
- Intrinsic::nvvm_wmma_##geom##_mma_row_row_##type##_satfinite, \
- Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \
- Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type##_satfinite, \
- Intrinsic::nvvm_wmma_##geom##_mma_col_row_##type, \
- Intrinsic::nvvm_wmma_##geom##_mma_col_row_##type##_satfinite, \
- Intrinsic::nvvm_wmma_##geom##_mma_col_col_##type, \
- Intrinsic::nvvm_wmma_##geom##_mma_col_col_##type##_satfinite \
- }}
- // clang-format on
-
- auto getMMAIntrinsic = [Layout, Satf](std::array<unsigned, 8> Variants) {
- unsigned Index = Layout * 2 + Satf;
- assert(Index < 8);
- return Variants[Index];
- };
- unsigned IID;
- unsigned NumEltsC;
- unsigned NumEltsD;
- switch (BuiltinID) {
- case NVPTX::BI__hmma_m16n16k16_mma_f16f16:
- IID = getMMAIntrinsic(MMA_VARIANTS(m16n16k16, f16_f16));
- NumEltsC = 4;
- NumEltsD = 4;
- break;
- case NVPTX::BI__hmma_m16n16k16_mma_f32f16:
- IID = getMMAIntrinsic(MMA_VARIANTS(m16n16k16, f32_f16));
- NumEltsC = 4;
- NumEltsD = 8;
- break;
- case NVPTX::BI__hmma_m16n16k16_mma_f16f32:
- IID = getMMAIntrinsic(MMA_VARIANTS(m16n16k16, f16_f32));
- NumEltsC = 8;
- NumEltsD = 4;
- break;
- case NVPTX::BI__hmma_m16n16k16_mma_f32f32:
- IID = getMMAIntrinsic(MMA_VARIANTS(m16n16k16, f32_f32));
- NumEltsC = 8;
- NumEltsD = 8;
- break;
- case NVPTX::BI__hmma_m32n8k16_mma_f16f16:
- IID = getMMAIntrinsic(MMA_VARIANTS(m32n8k16, f16_f16));
- NumEltsC = 4;
- NumEltsD = 4;
- break;
- case NVPTX::BI__hmma_m32n8k16_mma_f32f16:
- IID = getMMAIntrinsic(MMA_VARIANTS(m32n8k16, f32_f16));
- NumEltsC = 4;
- NumEltsD = 8;
- break;
- case NVPTX::BI__hmma_m32n8k16_mma_f16f32:
- IID = getMMAIntrinsic(MMA_VARIANTS(m32n8k16, f16_f32));
- NumEltsC = 8;
- NumEltsD = 4;
- break;
- case NVPTX::BI__hmma_m32n8k16_mma_f32f32:
- IID = getMMAIntrinsic(MMA_VARIANTS(m32n8k16, f32_f32));
- NumEltsC = 8;
- NumEltsD = 8;
- break;
- case NVPTX::BI__hmma_m8n32k16_mma_f16f16:
- IID = getMMAIntrinsic(MMA_VARIANTS(m8n32k16, f16_f16));
- NumEltsC = 4;
- NumEltsD = 4;
- break;
- case NVPTX::BI__hmma_m8n32k16_mma_f32f16:
- IID = getMMAIntrinsic(MMA_VARIANTS(m8n32k16, f32_f16));
- NumEltsC = 4;
- NumEltsD = 8;
- break;
- case NVPTX::BI__hmma_m8n32k16_mma_f16f32:
- IID = getMMAIntrinsic(MMA_VARIANTS(m8n32k16, f16_f32));
- NumEltsC = 8;
- NumEltsD = 4;
- break;
- case NVPTX::BI__hmma_m8n32k16_mma_f32f32:
- IID = getMMAIntrinsic(MMA_VARIANTS(m8n32k16, f32_f32));
- NumEltsC = 8;
- NumEltsD = 8;
- break;
- default:
- llvm_unreachable("Unexpected builtin ID.");
- }
-#undef MMA_VARIANTS
+ NVPTXMmaInfo MI = getNVPTXMmaInfo(BuiltinID);
+ unsigned IID = MI.getMMAIntrinsic(Layout, Satf);
+ if (IID == 0) // Unsupported combination of Layout/Satf.
+ return nullptr;
SmallVector<Value *, 24> Values;
Function *Intrinsic = CGM.getIntrinsic(IID);
- llvm::Type *ABType = Intrinsic->getFunctionType()->getParamType(0);
+ llvm::Type *AType = Intrinsic->getFunctionType()->getParamType(0);
// Load A
- for (unsigned i = 0; i < 8; ++i) {
+ for (unsigned i = 0; i < MI.NumEltsA; ++i) {
Value *V = Builder.CreateAlignedLoad(
Builder.CreateGEP(SrcA.getPointer(),
llvm::ConstantInt::get(IntTy, i)),
CharUnits::fromQuantity(4));
- Values.push_back(Builder.CreateBitCast(V, ABType));
+ Values.push_back(Builder.CreateBitCast(V, AType));
}
// Load B
- for (unsigned i = 0; i < 8; ++i) {
+ llvm::Type *BType = Intrinsic->getFunctionType()->getParamType(MI.NumEltsA);
+ for (unsigned i = 0; i < MI.NumEltsB; ++i) {
Value *V = Builder.CreateAlignedLoad(
Builder.CreateGEP(SrcB.getPointer(),
llvm::ConstantInt::get(IntTy, i)),
CharUnits::fromQuantity(4));
- Values.push_back(Builder.CreateBitCast(V, ABType));
+ Values.push_back(Builder.CreateBitCast(V, BType));
}
// Load C
- llvm::Type *CType = Intrinsic->getFunctionType()->getParamType(16);
- for (unsigned i = 0; i < NumEltsC; ++i) {
+ llvm::Type *CType =
+ Intrinsic->getFunctionType()->getParamType(MI.NumEltsA + MI.NumEltsB);
+ for (unsigned i = 0; i < MI.NumEltsC; ++i) {
Value *V = Builder.CreateAlignedLoad(
Builder.CreateGEP(SrcC.getPointer(),
llvm::ConstantInt::get(IntTy, i)),
@@ -13059,7 +13865,7 @@ Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID,
}
Value *Result = Builder.CreateCall(Intrinsic, Values);
llvm::Type *DType = Dst.getElementType();
- for (unsigned i = 0; i < NumEltsD; ++i)
+ for (unsigned i = 0; i < MI.NumEltsD; ++i)
Builder.CreateAlignedStore(
Builder.CreateBitCast(Builder.CreateExtractValue(Result, i), DType),
Builder.CreateGEP(Dst.getPointer(), llvm::ConstantInt::get(IntTy, i)),
@@ -13077,7 +13883,7 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
case WebAssembly::BI__builtin_wasm_memory_size: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *I = EmitScalarExpr(E->getArg(0));
- Value *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_size, ResultType);
+ Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_size, ResultType);
return Builder.CreateCall(Callee, I);
}
case WebAssembly::BI__builtin_wasm_memory_grow: {
@@ -13086,37 +13892,66 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
EmitScalarExpr(E->getArg(0)),
EmitScalarExpr(E->getArg(1))
};
- Value *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_grow, ResultType);
+ Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_grow, ResultType);
+ return Builder.CreateCall(Callee, Args);
+ }
+ case WebAssembly::BI__builtin_wasm_memory_init: {
+ llvm::APSInt SegConst;
+ if (!E->getArg(0)->isIntegerConstantExpr(SegConst, getContext()))
+ llvm_unreachable("Constant arg isn't actually constant?");
+ llvm::APSInt MemConst;
+ if (!E->getArg(1)->isIntegerConstantExpr(MemConst, getContext()))
+ llvm_unreachable("Constant arg isn't actually constant?");
+ if (!MemConst.isNullValue())
+ ErrorUnsupported(E, "non-zero memory index");
+ Value *Args[] = {llvm::ConstantInt::get(getLLVMContext(), SegConst),
+ llvm::ConstantInt::get(getLLVMContext(), MemConst),
+ EmitScalarExpr(E->getArg(2)), EmitScalarExpr(E->getArg(3)),
+ EmitScalarExpr(E->getArg(4))};
+ Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_init);
return Builder.CreateCall(Callee, Args);
}
+ case WebAssembly::BI__builtin_wasm_data_drop: {
+ llvm::APSInt SegConst;
+ if (!E->getArg(0)->isIntegerConstantExpr(SegConst, getContext()))
+ llvm_unreachable("Constant arg isn't actually constant?");
+ Value *Arg = llvm::ConstantInt::get(getLLVMContext(), SegConst);
+ Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_data_drop);
+ return Builder.CreateCall(Callee, {Arg});
+ }
+ case WebAssembly::BI__builtin_wasm_tls_size: {
+ llvm::Type *ResultType = ConvertType(E->getType());
+ Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_tls_size, ResultType);
+ return Builder.CreateCall(Callee);
+ }
case WebAssembly::BI__builtin_wasm_throw: {
Value *Tag = EmitScalarExpr(E->getArg(0));
Value *Obj = EmitScalarExpr(E->getArg(1));
- Value *Callee = CGM.getIntrinsic(Intrinsic::wasm_throw);
+ Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_throw);
return Builder.CreateCall(Callee, {Tag, Obj});
}
- case WebAssembly::BI__builtin_wasm_rethrow: {
- Value *Callee = CGM.getIntrinsic(Intrinsic::wasm_rethrow);
+ case WebAssembly::BI__builtin_wasm_rethrow_in_catch: {
+ Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_rethrow_in_catch);
return Builder.CreateCall(Callee);
}
case WebAssembly::BI__builtin_wasm_atomic_wait_i32: {
Value *Addr = EmitScalarExpr(E->getArg(0));
Value *Expected = EmitScalarExpr(E->getArg(1));
Value *Timeout = EmitScalarExpr(E->getArg(2));
- Value *Callee = CGM.getIntrinsic(Intrinsic::wasm_atomic_wait_i32);
+ Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_atomic_wait_i32);
return Builder.CreateCall(Callee, {Addr, Expected, Timeout});
}
case WebAssembly::BI__builtin_wasm_atomic_wait_i64: {
Value *Addr = EmitScalarExpr(E->getArg(0));
Value *Expected = EmitScalarExpr(E->getArg(1));
Value *Timeout = EmitScalarExpr(E->getArg(2));
- Value *Callee = CGM.getIntrinsic(Intrinsic::wasm_atomic_wait_i64);
+ Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_atomic_wait_i64);
return Builder.CreateCall(Callee, {Addr, Expected, Timeout});
}
case WebAssembly::BI__builtin_wasm_atomic_notify: {
Value *Addr = EmitScalarExpr(E->getArg(0));
Value *Count = EmitScalarExpr(E->getArg(1));
- Value *Callee = CGM.getIntrinsic(Intrinsic::wasm_atomic_notify);
+ Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_atomic_notify);
return Builder.CreateCall(Callee, {Addr, Count});
}
case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32_f32:
@@ -13127,7 +13962,7 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i64x2_f64x2: {
Value *Src = EmitScalarExpr(E->getArg(0));
llvm::Type *ResT = ConvertType(E->getType());
- Value *Callee = CGM.getIntrinsic(Intrinsic::wasm_trunc_saturate_signed,
+ Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_trunc_saturate_signed,
{ResT, Src->getType()});
return Builder.CreateCall(Callee, {Src});
}
@@ -13139,7 +13974,7 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i64x2_f64x2: {
Value *Src = EmitScalarExpr(E->getArg(0));
llvm::Type *ResT = ConvertType(E->getType());
- Value *Callee = CGM.getIntrinsic(Intrinsic::wasm_trunc_saturate_unsigned,
+ Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_trunc_saturate_unsigned,
{ResT, Src->getType()});
return Builder.CreateCall(Callee, {Src});
}
@@ -13149,7 +13984,7 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
case WebAssembly::BI__builtin_wasm_min_f64x2: {
Value *LHS = EmitScalarExpr(E->getArg(0));
Value *RHS = EmitScalarExpr(E->getArg(1));
- Value *Callee = CGM.getIntrinsic(Intrinsic::minimum,
+ Function *Callee = CGM.getIntrinsic(Intrinsic::minimum,
ConvertType(E->getType()));
return Builder.CreateCall(Callee, {LHS, RHS});
}
@@ -13159,7 +13994,7 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
case WebAssembly::BI__builtin_wasm_max_f64x2: {
Value *LHS = EmitScalarExpr(E->getArg(0));
Value *RHS = EmitScalarExpr(E->getArg(1));
- Value *Callee = CGM.getIntrinsic(Intrinsic::maximum,
+ Function *Callee = CGM.getIntrinsic(Intrinsic::maximum,
ConvertType(E->getType()));
return Builder.CreateCall(Callee, {LHS, RHS});
}
@@ -13252,14 +14087,14 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
}
Value *LHS = EmitScalarExpr(E->getArg(0));
Value *RHS = EmitScalarExpr(E->getArg(1));
- Value *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType()));
+ Function *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType()));
return Builder.CreateCall(Callee, {LHS, RHS});
}
case WebAssembly::BI__builtin_wasm_bitselect: {
Value *V1 = EmitScalarExpr(E->getArg(0));
Value *V2 = EmitScalarExpr(E->getArg(1));
Value *C = EmitScalarExpr(E->getArg(2));
- Value *Callee = CGM.getIntrinsic(Intrinsic::wasm_bitselect,
+ Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_bitselect,
ConvertType(E->getType()));
return Builder.CreateCall(Callee, {V1, V2, C});
}
@@ -13289,19 +14124,19 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
llvm_unreachable("unexpected builtin ID");
}
Value *Vec = EmitScalarExpr(E->getArg(0));
- Value *Callee = CGM.getIntrinsic(IntNo, Vec->getType());
+ Function *Callee = CGM.getIntrinsic(IntNo, Vec->getType());
return Builder.CreateCall(Callee, {Vec});
}
case WebAssembly::BI__builtin_wasm_abs_f32x4:
case WebAssembly::BI__builtin_wasm_abs_f64x2: {
Value *Vec = EmitScalarExpr(E->getArg(0));
- Value *Callee = CGM.getIntrinsic(Intrinsic::fabs, Vec->getType());
+ Function *Callee = CGM.getIntrinsic(Intrinsic::fabs, Vec->getType());
return Builder.CreateCall(Callee, {Vec});
}
case WebAssembly::BI__builtin_wasm_sqrt_f32x4:
case WebAssembly::BI__builtin_wasm_sqrt_f64x2: {
Value *Vec = EmitScalarExpr(E->getArg(0));
- Value *Callee = CGM.getIntrinsic(Intrinsic::sqrt, Vec->getType());
+ Function *Callee = CGM.getIntrinsic(Intrinsic::sqrt, Vec->getType());
return Builder.CreateCall(Callee, {Vec});
}
diff --git a/lib/CodeGen/CGCUDANV.cpp b/lib/CodeGen/CGCUDANV.cpp
index 1c578bd151bd..4d4038dae9cf 100644
--- a/lib/CodeGen/CGCUDANV.cpp
+++ b/lib/CodeGen/CGCUDANV.cpp
@@ -1,9 +1,8 @@
//===----- CGCUDANV.cpp - Interface to NVIDIA CUDA Runtime ----------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -16,9 +15,10 @@
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "clang/AST/Decl.h"
+#include "clang/Basic/Cuda.h"
+#include "clang/CodeGen/CodeGenABITypes.h"
#include "clang/CodeGen/ConstantInitBuilder.h"
#include "llvm/IR/BasicBlock.h"
-#include "llvm/IR/CallSite.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/Support/Format.h"
@@ -42,17 +42,28 @@ private:
/// Convenience reference to the current module
llvm::Module &TheModule;
/// Keeps track of kernel launch stubs emitted in this module
- llvm::SmallVector<llvm::Function *, 16> EmittedKernels;
- llvm::SmallVector<std::pair<llvm::GlobalVariable *, unsigned>, 16> DeviceVars;
+ struct KernelInfo {
+ llvm::Function *Kernel;
+ const Decl *D;
+ };
+ llvm::SmallVector<KernelInfo, 16> EmittedKernels;
+ struct VarInfo {
+ llvm::GlobalVariable *Var;
+ const VarDecl *D;
+ unsigned Flag;
+ };
+ llvm::SmallVector<VarInfo, 16> DeviceVars;
/// Keeps track of variable containing handle of GPU binary. Populated by
/// ModuleCtorFunction() and used to create corresponding cleanup calls in
/// ModuleDtorFunction()
llvm::GlobalVariable *GpuBinaryHandle = nullptr;
/// Whether we generate relocatable device code.
bool RelocatableDeviceCode;
+ /// Mangle context for device.
+ std::unique_ptr<MangleContext> DeviceMC;
- llvm::Constant *getSetupArgumentFn() const;
- llvm::Constant *getLaunchFn() const;
+ llvm::FunctionCallee getSetupArgumentFn() const;
+ llvm::FunctionCallee getLaunchFn() const;
llvm::FunctionType *getRegisterGlobalsFnTy() const;
llvm::FunctionType *getCallbackFnTy() const;
@@ -104,20 +115,25 @@ private:
return DummyFunc;
}
- void emitDeviceStubBody(CodeGenFunction &CGF, FunctionArgList &Args);
+ void emitDeviceStubBodyLegacy(CodeGenFunction &CGF, FunctionArgList &Args);
+ void emitDeviceStubBodyNew(CodeGenFunction &CGF, FunctionArgList &Args);
+ std::string getDeviceSideName(const Decl *ND);
public:
CGNVCUDARuntime(CodeGenModule &CGM);
void emitDeviceStub(CodeGenFunction &CGF, FunctionArgList &Args) override;
- void registerDeviceVar(llvm::GlobalVariable &Var, unsigned Flags) override {
- DeviceVars.push_back(std::make_pair(&Var, Flags));
+ void registerDeviceVar(const VarDecl *VD, llvm::GlobalVariable &Var,
+ unsigned Flags) override {
+ DeviceVars.push_back({&Var, VD, Flags});
}
/// Creates module constructor function
llvm::Function *makeModuleCtorFunction() override;
/// Creates module destructor function
llvm::Function *makeModuleDtorFunction() override;
+ /// Construct and return the stub name of a kernel.
+ std::string getDeviceStubName(llvm::StringRef Name) const override;
};
}
@@ -137,7 +153,9 @@ CGNVCUDARuntime::addUnderscoredPrefixToName(StringRef FuncName) const {
CGNVCUDARuntime::CGNVCUDARuntime(CodeGenModule &CGM)
: CGCUDARuntime(CGM), Context(CGM.getLLVMContext()),
TheModule(CGM.getModule()),
- RelocatableDeviceCode(CGM.getLangOpts().GPURelocatableDeviceCode) {
+ RelocatableDeviceCode(CGM.getLangOpts().GPURelocatableDeviceCode),
+ DeviceMC(CGM.getContext().createMangleContext(
+ CGM.getContext().getAuxTargetInfo())) {
CodeGen::CodeGenTypes &Types = CGM.getTypes();
ASTContext &Ctx = CGM.getContext();
@@ -150,7 +168,7 @@ CGNVCUDARuntime::CGNVCUDARuntime(CodeGenModule &CGM)
VoidPtrPtrTy = VoidPtrTy->getPointerTo();
}
-llvm::Constant *CGNVCUDARuntime::getSetupArgumentFn() const {
+llvm::FunctionCallee CGNVCUDARuntime::getSetupArgumentFn() const {
// cudaError_t cudaSetupArgument(void *, size_t, size_t)
llvm::Type *Params[] = {VoidPtrTy, SizeTy, SizeTy};
return CGM.CreateRuntimeFunction(
@@ -158,7 +176,7 @@ llvm::Constant *CGNVCUDARuntime::getSetupArgumentFn() const {
addPrefixToName("SetupArgument"));
}
-llvm::Constant *CGNVCUDARuntime::getLaunchFn() const {
+llvm::FunctionCallee CGNVCUDARuntime::getLaunchFn() const {
if (CGM.getLangOpts().HIP) {
// hipError_t hipLaunchByPtr(char *);
return CGM.CreateRuntimeFunction(
@@ -186,16 +204,143 @@ llvm::FunctionType *CGNVCUDARuntime::getRegisterLinkedBinaryFnTy() const {
return llvm::FunctionType::get(VoidTy, Params, false);
}
+std::string CGNVCUDARuntime::getDeviceSideName(const Decl *D) {
+ auto *ND = cast<const NamedDecl>(D);
+ std::string DeviceSideName;
+ if (DeviceMC->shouldMangleDeclName(ND)) {
+ SmallString<256> Buffer;
+ llvm::raw_svector_ostream Out(Buffer);
+ DeviceMC->mangleName(ND, Out);
+ DeviceSideName = Out.str();
+ } else
+ DeviceSideName = ND->getIdentifier()->getName();
+ return DeviceSideName;
+}
+
void CGNVCUDARuntime::emitDeviceStub(CodeGenFunction &CGF,
FunctionArgList &Args) {
- EmittedKernels.push_back(CGF.CurFn);
- emitDeviceStubBody(CGF, Args);
+ // Ensure either we have different ABIs between host and device compilations,
+ // says host compilation following MSVC ABI but device compilation follows
+ // Itanium C++ ABI or, if they follow the same ABI, kernel names after
+ // mangling should be the same after name stubbing. The later checking is
+ // very important as the device kernel name being mangled in host-compilation
+ // is used to resolve the device binaries to be executed. Inconsistent naming
+ // result in undefined behavior. Even though we cannot check that naming
+ // directly between host- and device-compilations, the host- and
+ // device-mangling in host compilation could help catching certain ones.
+ assert((CGF.CGM.getContext().getAuxTargetInfo() &&
+ (CGF.CGM.getContext().getAuxTargetInfo()->getCXXABI() !=
+ CGF.CGM.getContext().getTargetInfo().getCXXABI())) ||
+ getDeviceStubName(getDeviceSideName(CGF.CurFuncDecl)) ==
+ CGF.CurFn->getName());
+
+ EmittedKernels.push_back({CGF.CurFn, CGF.CurFuncDecl});
+ if (CudaFeatureEnabled(CGM.getTarget().getSDKVersion(),
+ CudaFeature::CUDA_USES_NEW_LAUNCH))
+ emitDeviceStubBodyNew(CGF, Args);
+ else
+ emitDeviceStubBodyLegacy(CGF, Args);
}
-void CGNVCUDARuntime::emitDeviceStubBody(CodeGenFunction &CGF,
- FunctionArgList &Args) {
+// CUDA 9.0+ uses new way to launch kernels. Parameters are packed in a local
+// array and kernels are launched using cudaLaunchKernel().
+void CGNVCUDARuntime::emitDeviceStubBodyNew(CodeGenFunction &CGF,
+ FunctionArgList &Args) {
+ // Build the shadow stack entry at the very start of the function.
+
+ // Calculate amount of space we will need for all arguments. If we have no
+ // args, allocate a single pointer so we still have a valid pointer to the
+ // argument array that we can pass to runtime, even if it will be unused.
+ Address KernelArgs = CGF.CreateTempAlloca(
+ VoidPtrTy, CharUnits::fromQuantity(16), "kernel_args",
+ llvm::ConstantInt::get(SizeTy, std::max<size_t>(1, Args.size())));
+ // Store pointers to the arguments in a locally allocated launch_args.
+ for (unsigned i = 0; i < Args.size(); ++i) {
+ llvm::Value* VarPtr = CGF.GetAddrOfLocalVar(Args[i]).getPointer();
+ llvm::Value *VoidVarPtr = CGF.Builder.CreatePointerCast(VarPtr, VoidPtrTy);
+ CGF.Builder.CreateDefaultAlignedStore(
+ VoidVarPtr, CGF.Builder.CreateConstGEP1_32(KernelArgs.getPointer(), i));
+ }
+
+ llvm::BasicBlock *EndBlock = CGF.createBasicBlock("setup.end");
+
+ // Lookup cudaLaunchKernel function.
+ // cudaError_t cudaLaunchKernel(const void *func, dim3 gridDim, dim3 blockDim,
+ // void **args, size_t sharedMem,
+ // cudaStream_t stream);
+ TranslationUnitDecl *TUDecl = CGM.getContext().getTranslationUnitDecl();
+ DeclContext *DC = TranslationUnitDecl::castToDeclContext(TUDecl);
+ IdentifierInfo &cudaLaunchKernelII =
+ CGM.getContext().Idents.get("cudaLaunchKernel");
+ FunctionDecl *cudaLaunchKernelFD = nullptr;
+ for (const auto &Result : DC->lookup(&cudaLaunchKernelII)) {
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(Result))
+ cudaLaunchKernelFD = FD;
+ }
+
+ if (cudaLaunchKernelFD == nullptr) {
+ CGM.Error(CGF.CurFuncDecl->getLocation(),
+ "Can't find declaration for cudaLaunchKernel()");
+ return;
+ }
+ // Create temporary dim3 grid_dim, block_dim.
+ ParmVarDecl *GridDimParam = cudaLaunchKernelFD->getParamDecl(1);
+ QualType Dim3Ty = GridDimParam->getType();
+ Address GridDim =
+ CGF.CreateMemTemp(Dim3Ty, CharUnits::fromQuantity(8), "grid_dim");
+ Address BlockDim =
+ CGF.CreateMemTemp(Dim3Ty, CharUnits::fromQuantity(8), "block_dim");
+ Address ShmemSize =
+ CGF.CreateTempAlloca(SizeTy, CGM.getSizeAlign(), "shmem_size");
+ Address Stream =
+ CGF.CreateTempAlloca(VoidPtrTy, CGM.getPointerAlign(), "stream");
+ llvm::FunctionCallee cudaPopConfigFn = CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(IntTy,
+ {/*gridDim=*/GridDim.getType(),
+ /*blockDim=*/BlockDim.getType(),
+ /*ShmemSize=*/ShmemSize.getType(),
+ /*Stream=*/Stream.getType()},
+ /*isVarArg=*/false),
+ "__cudaPopCallConfiguration");
+
+ CGF.EmitRuntimeCallOrInvoke(cudaPopConfigFn,
+ {GridDim.getPointer(), BlockDim.getPointer(),
+ ShmemSize.getPointer(), Stream.getPointer()});
+
+ // Emit the call to cudaLaunch
+ llvm::Value *Kernel = CGF.Builder.CreatePointerCast(CGF.CurFn, VoidPtrTy);
+ CallArgList LaunchKernelArgs;
+ LaunchKernelArgs.add(RValue::get(Kernel),
+ cudaLaunchKernelFD->getParamDecl(0)->getType());
+ LaunchKernelArgs.add(RValue::getAggregate(GridDim), Dim3Ty);
+ LaunchKernelArgs.add(RValue::getAggregate(BlockDim), Dim3Ty);
+ LaunchKernelArgs.add(RValue::get(KernelArgs.getPointer()),
+ cudaLaunchKernelFD->getParamDecl(3)->getType());
+ LaunchKernelArgs.add(RValue::get(CGF.Builder.CreateLoad(ShmemSize)),
+ cudaLaunchKernelFD->getParamDecl(4)->getType());
+ LaunchKernelArgs.add(RValue::get(CGF.Builder.CreateLoad(Stream)),
+ cudaLaunchKernelFD->getParamDecl(5)->getType());
+
+ QualType QT = cudaLaunchKernelFD->getType();
+ QualType CQT = QT.getCanonicalType();
+ llvm::Type *Ty = CGM.getTypes().ConvertType(CQT);
+ llvm::FunctionType *FTy = dyn_cast<llvm::FunctionType>(Ty);
+
+ const CGFunctionInfo &FI =
+ CGM.getTypes().arrangeFunctionDeclaration(cudaLaunchKernelFD);
+ llvm::FunctionCallee cudaLaunchKernelFn =
+ CGM.CreateRuntimeFunction(FTy, "cudaLaunchKernel");
+ CGF.EmitCall(FI, CGCallee::forDirect(cudaLaunchKernelFn), ReturnValueSlot(),
+ LaunchKernelArgs);
+ CGF.EmitBranch(EndBlock);
+
+ CGF.EmitBlock(EndBlock);
+}
+
+void CGNVCUDARuntime::emitDeviceStubBodyLegacy(CodeGenFunction &CGF,
+ FunctionArgList &Args) {
// Emit a call to cudaSetupArgument for each arg in Args.
- llvm::Constant *cudaSetupArgFn = getSetupArgumentFn();
+ llvm::FunctionCallee cudaSetupArgFn = getSetupArgumentFn();
llvm::BasicBlock *EndBlock = CGF.createBasicBlock("setup.end");
CharUnits Offset = CharUnits::Zero();
for (const VarDecl *A : Args) {
@@ -209,17 +354,17 @@ void CGNVCUDARuntime::emitDeviceStubBody(CodeGenFunction &CGF,
llvm::ConstantInt::get(SizeTy, TyWidth.getQuantity()),
llvm::ConstantInt::get(SizeTy, Offset.getQuantity()),
};
- llvm::CallSite CS = CGF.EmitRuntimeCallOrInvoke(cudaSetupArgFn, Args);
+ llvm::CallBase *CB = CGF.EmitRuntimeCallOrInvoke(cudaSetupArgFn, Args);
llvm::Constant *Zero = llvm::ConstantInt::get(IntTy, 0);
- llvm::Value *CSZero = CGF.Builder.CreateICmpEQ(CS.getInstruction(), Zero);
+ llvm::Value *CBZero = CGF.Builder.CreateICmpEQ(CB, Zero);
llvm::BasicBlock *NextBlock = CGF.createBasicBlock("setup.next");
- CGF.Builder.CreateCondBr(CSZero, NextBlock, EndBlock);
+ CGF.Builder.CreateCondBr(CBZero, NextBlock, EndBlock);
CGF.EmitBlock(NextBlock);
Offset += TyWidth;
}
// Emit the call to cudaLaunch
- llvm::Constant *cudaLaunchFn = getLaunchFn();
+ llvm::FunctionCallee cudaLaunchFn = getLaunchFn();
llvm::Value *Arg = CGF.Builder.CreatePointerCast(CGF.CurFn, CharPtrTy);
CGF.EmitRuntimeCallOrInvoke(cudaLaunchFn, Arg);
CGF.EmitBranch(EndBlock);
@@ -259,7 +404,7 @@ llvm::Function *CGNVCUDARuntime::makeRegisterGlobalsFn() {
llvm::Type *RegisterFuncParams[] = {
VoidPtrPtrTy, CharPtrTy, CharPtrTy, CharPtrTy, IntTy,
VoidPtrTy, VoidPtrTy, VoidPtrTy, VoidPtrTy, IntTy->getPointerTo()};
- llvm::Constant *RegisterFunc = CGM.CreateRuntimeFunction(
+ llvm::FunctionCallee RegisterFunc = CGM.CreateRuntimeFunction(
llvm::FunctionType::get(IntTy, RegisterFuncParams, false),
addUnderscoredPrefixToName("RegisterFunction"));
@@ -267,13 +412,19 @@ llvm::Function *CGNVCUDARuntime::makeRegisterGlobalsFn() {
// __cuda_register_globals() and generate __cudaRegisterFunction() call for
// each emitted kernel.
llvm::Argument &GpuBinaryHandlePtr = *RegisterKernelsFunc->arg_begin();
- for (llvm::Function *Kernel : EmittedKernels) {
- llvm::Constant *KernelName = makeConstantString(Kernel->getName());
+ for (auto &&I : EmittedKernels) {
+ llvm::Constant *KernelName = makeConstantString(getDeviceSideName(I.D));
llvm::Constant *NullPtr = llvm::ConstantPointerNull::get(VoidPtrTy);
llvm::Value *Args[] = {
- &GpuBinaryHandlePtr, Builder.CreateBitCast(Kernel, VoidPtrTy),
- KernelName, KernelName, llvm::ConstantInt::get(IntTy, -1), NullPtr,
- NullPtr, NullPtr, NullPtr,
+ &GpuBinaryHandlePtr,
+ Builder.CreateBitCast(I.Kernel, VoidPtrTy),
+ KernelName,
+ KernelName,
+ llvm::ConstantInt::get(IntTy, -1),
+ NullPtr,
+ NullPtr,
+ NullPtr,
+ NullPtr,
llvm::ConstantPointerNull::get(IntTy->getPointerTo())};
Builder.CreateCall(RegisterFunc, Args);
}
@@ -283,13 +434,13 @@ llvm::Function *CGNVCUDARuntime::makeRegisterGlobalsFn() {
llvm::Type *RegisterVarParams[] = {VoidPtrPtrTy, CharPtrTy, CharPtrTy,
CharPtrTy, IntTy, IntTy,
IntTy, IntTy};
- llvm::Constant *RegisterVar = CGM.CreateRuntimeFunction(
+ llvm::FunctionCallee RegisterVar = CGM.CreateRuntimeFunction(
llvm::FunctionType::get(IntTy, RegisterVarParams, false),
addUnderscoredPrefixToName("RegisterVar"));
- for (auto &Pair : DeviceVars) {
- llvm::GlobalVariable *Var = Pair.first;
- unsigned Flags = Pair.second;
- llvm::Constant *VarName = makeConstantString(Var->getName());
+ for (auto &&Info : DeviceVars) {
+ llvm::GlobalVariable *Var = Info.Var;
+ unsigned Flags = Info.Flag;
+ llvm::Constant *VarName = makeConstantString(getDeviceSideName(Info.D));
uint64_t VarSize =
CGM.getDataLayout().getTypeAllocSize(Var->getValueType());
llvm::Value *Args[] = {
@@ -329,10 +480,14 @@ llvm::Function *CGNVCUDARuntime::makeRegisterGlobalsFn() {
/// \endcode
llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
bool IsHIP = CGM.getLangOpts().HIP;
+ bool IsCUDA = CGM.getLangOpts().CUDA;
// No need to generate ctors/dtors if there is no GPU binary.
StringRef CudaGpuBinaryFileName = CGM.getCodeGenOpts().CudaGpuBinaryFileName;
if (CudaGpuBinaryFileName.empty() && !IsHIP)
return nullptr;
+ if ((IsHIP || (IsCUDA && !RelocatableDeviceCode)) && EmittedKernels.empty() &&
+ DeviceVars.empty())
+ return nullptr;
// void __{cuda|hip}_register_globals(void* handle);
llvm::Function *RegisterGlobalsFunc = makeRegisterGlobalsFn();
@@ -342,7 +497,7 @@ llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
RegisterGlobalsFunc = makeDummyFunction(getRegisterGlobalsFnTy());
// void ** __{cuda|hip}RegisterFatBinary(void *);
- llvm::Constant *RegisterFatbinFunc = CGM.CreateRuntimeFunction(
+ llvm::FunctionCallee RegisterFatbinFunc = CGM.CreateRuntimeFunction(
llvm::FunctionType::get(VoidPtrPtrTy, VoidPtrTy, false),
addUnderscoredPrefixToName("RegisterFatBinary"));
// struct { int magic, int version, void * gpu_binary, void * dont_care };
@@ -516,6 +671,16 @@ llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
// Call __cuda_register_globals(GpuBinaryHandle);
if (RegisterGlobalsFunc)
CtorBuilder.CreateCall(RegisterGlobalsFunc, RegisterFatbinCall);
+
+ // Call __cudaRegisterFatBinaryEnd(Handle) if this CUDA version needs it.
+ if (CudaFeatureEnabled(CGM.getTarget().getSDKVersion(),
+ CudaFeature::CUDA_USES_FATBIN_REGISTER_END)) {
+ // void __cudaRegisterFatBinaryEnd(void **);
+ llvm::FunctionCallee RegisterFatbinEndFunc = CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(VoidTy, VoidPtrPtrTy, false),
+ "__cudaRegisterFatBinaryEnd");
+ CtorBuilder.CreateCall(RegisterFatbinEndFunc, RegisterFatbinCall);
+ }
} else {
// Generate a unique module ID.
SmallString<64> ModuleID;
@@ -532,7 +697,7 @@ llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
// void *, void (*)(void **))
SmallString<128> RegisterLinkedBinaryName("__cudaRegisterLinkedBinary");
RegisterLinkedBinaryName += ModuleID;
- llvm::Constant *RegisterLinkedBinaryFunc = CGM.CreateRuntimeFunction(
+ llvm::FunctionCallee RegisterLinkedBinaryFunc = CGM.CreateRuntimeFunction(
getRegisterLinkedBinaryFnTy(), RegisterLinkedBinaryName);
assert(RegisterGlobalsFunc && "Expecting at least dummy function!");
@@ -550,7 +715,7 @@ llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
// extern "C" int atexit(void (*f)(void));
llvm::FunctionType *AtExitTy =
llvm::FunctionType::get(IntTy, CleanupFn->getType(), false);
- llvm::Constant *AtExitFunc =
+ llvm::FunctionCallee AtExitFunc =
CGM.CreateRuntimeFunction(AtExitTy, "atexit", llvm::AttributeList(),
/*Local=*/true);
CtorBuilder.CreateCall(AtExitFunc, CleanupFn);
@@ -585,7 +750,7 @@ llvm::Function *CGNVCUDARuntime::makeModuleDtorFunction() {
return nullptr;
// void __cudaUnregisterFatBinary(void ** handle);
- llvm::Constant *UnregisterFatbinFunc = CGM.CreateRuntimeFunction(
+ llvm::FunctionCallee UnregisterFatbinFunc = CGM.CreateRuntimeFunction(
llvm::FunctionType::get(VoidTy, VoidPtrPtrTy, false),
addUnderscoredPrefixToName("UnregisterFatBinary"));
@@ -627,6 +792,12 @@ llvm::Function *CGNVCUDARuntime::makeModuleDtorFunction() {
return ModuleDtorFunc;
}
+std::string CGNVCUDARuntime::getDeviceStubName(llvm::StringRef Name) const {
+ if (!CGM.getLangOpts().HIP)
+ return Name;
+ return (Name + ".stub").str();
+}
+
CGCUDARuntime *CodeGen::CreateNVCUDARuntime(CodeGenModule &CGM) {
return new CGNVCUDARuntime(CGM);
}
diff --git a/lib/CodeGen/CGCUDARuntime.cpp b/lib/CodeGen/CGCUDARuntime.cpp
index 1936f9f13692..c14a9d3f2bbb 100644
--- a/lib/CodeGen/CGCUDARuntime.cpp
+++ b/lib/CodeGen/CGCUDARuntime.cpp
@@ -1,9 +1,8 @@
//===----- CGCUDARuntime.cpp - Interface to CUDA Runtimes -----------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/CodeGen/CGCUDARuntime.h b/lib/CodeGen/CGCUDARuntime.h
index 0168f4f9e942..e548a3a546d4 100644
--- a/lib/CodeGen/CGCUDARuntime.h
+++ b/lib/CodeGen/CGCUDARuntime.h
@@ -1,9 +1,8 @@
//===----- CGCUDARuntime.h - Interface to CUDA Runtimes ---------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -16,6 +15,8 @@
#ifndef LLVM_CLANG_LIB_CODEGEN_CGCUDARUNTIME_H
#define LLVM_CLANG_LIB_CODEGEN_CGCUDARUNTIME_H
+#include "llvm/ADT/StringRef.h"
+
namespace llvm {
class Function;
class GlobalVariable;
@@ -24,6 +25,7 @@ class GlobalVariable;
namespace clang {
class CUDAKernelCallExpr;
+class VarDecl;
namespace CodeGen {
@@ -53,7 +55,8 @@ public:
/// Emits a kernel launch stub.
virtual void emitDeviceStub(CodeGenFunction &CGF, FunctionArgList &Args) = 0;
- virtual void registerDeviceVar(llvm::GlobalVariable &Var, unsigned Flags) = 0;
+ virtual void registerDeviceVar(const VarDecl *VD, llvm::GlobalVariable &Var,
+ unsigned Flags) = 0;
/// Constructs and returns a module initialization function or nullptr if it's
/// not needed. Must be called after all kernels have been emitted.
@@ -62,6 +65,9 @@ public:
/// Returns a module cleanup function or nullptr if it's not needed.
/// Must be called after ModuleCtorFunction
virtual llvm::Function *makeModuleDtorFunction() = 0;
+
+ /// Construct and return the stub name of a kernel.
+ virtual std::string getDeviceStubName(llvm::StringRef Name) const = 0;
};
/// Creates an instance of a CUDA runtime class.
diff --git a/lib/CodeGen/CGCXX.cpp b/lib/CodeGen/CGCXX.cpp
index 8b0733fbec3e..6d903a0d09e2 100644
--- a/lib/CodeGen/CGCXX.cpp
+++ b/lib/CodeGen/CGCXX.cpp
@@ -1,9 +1,8 @@
//===--- CGCXX.cpp - Emit LLVM Code for declarations ----------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -204,55 +203,44 @@ bool CodeGenModule::TryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D) {
return false;
}
-llvm::Function *CodeGenModule::codegenCXXStructor(const CXXMethodDecl *MD,
- StructorType Type) {
- const CGFunctionInfo &FnInfo =
- getTypes().arrangeCXXStructorDeclaration(MD, Type);
+llvm::Function *CodeGenModule::codegenCXXStructor(GlobalDecl GD) {
+ const CGFunctionInfo &FnInfo = getTypes().arrangeCXXStructorDeclaration(GD);
auto *Fn = cast<llvm::Function>(
- getAddrOfCXXStructor(MD, Type, &FnInfo, /*FnType=*/nullptr,
+ getAddrOfCXXStructor(GD, &FnInfo, /*FnType=*/nullptr,
/*DontDefer=*/true, ForDefinition));
- GlobalDecl GD;
- if (const auto *DD = dyn_cast<CXXDestructorDecl>(MD)) {
- GD = GlobalDecl(DD, toCXXDtorType(Type));
- } else {
- const auto *CD = cast<CXXConstructorDecl>(MD);
- GD = GlobalDecl(CD, toCXXCtorType(Type));
- }
-
setFunctionLinkage(GD, Fn);
CodeGenFunction(*this).GenerateCode(GD, Fn, FnInfo);
setNonAliasAttributes(GD, Fn);
- SetLLVMFunctionAttributesForDefinition(MD, Fn);
+ SetLLVMFunctionAttributesForDefinition(cast<CXXMethodDecl>(GD.getDecl()), Fn);
return Fn;
}
-llvm::Constant *CodeGenModule::getAddrOfCXXStructor(
- const CXXMethodDecl *MD, StructorType Type, const CGFunctionInfo *FnInfo,
- llvm::FunctionType *FnType, bool DontDefer,
- ForDefinition_t IsForDefinition) {
- GlobalDecl GD;
- if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
- GD = GlobalDecl(CD, toCXXCtorType(Type));
- } else {
+llvm::FunctionCallee CodeGenModule::getAddrAndTypeOfCXXStructor(
+ GlobalDecl GD, const CGFunctionInfo *FnInfo, llvm::FunctionType *FnType,
+ bool DontDefer, ForDefinition_t IsForDefinition) {
+ auto *MD = cast<CXXMethodDecl>(GD.getDecl());
+
+ if (isa<CXXDestructorDecl>(MD)) {
// Always alias equivalent complete destructors to base destructors in the
// MS ABI.
if (getTarget().getCXXABI().isMicrosoft() &&
- Type == StructorType::Complete && MD->getParent()->getNumVBases() == 0)
- Type = StructorType::Base;
- GD = GlobalDecl(cast<CXXDestructorDecl>(MD), toCXXDtorType(Type));
+ GD.getDtorType() == Dtor_Complete &&
+ MD->getParent()->getNumVBases() == 0)
+ GD = GD.getWithDtorType(Dtor_Base);
}
if (!FnType) {
if (!FnInfo)
- FnInfo = &getTypes().arrangeCXXStructorDeclaration(MD, Type);
+ FnInfo = &getTypes().arrangeCXXStructorDeclaration(GD);
FnType = getTypes().GetFunctionType(*FnInfo);
}
- return GetOrCreateLLVMFunction(
+ llvm::Constant *Ptr = GetOrCreateLLVMFunction(
getMangledName(GD), FnType, GD, /*ForVTable=*/false, DontDefer,
- /*isThunk=*/false, /*ExtraAttrs=*/llvm::AttributeList(), IsForDefinition);
+ /*IsThunk=*/false, /*ExtraAttrs=*/llvm::AttributeList(), IsForDefinition);
+ return {FnType, Ptr};
}
static CGCallee BuildAppleKextVirtualCall(CodeGenFunction &CGF,
@@ -312,7 +300,7 @@ CodeGenFunction::BuildAppleKextVirtualDestructorCall(
assert(DD->isVirtual() && Type != Dtor_Base);
// Compute the function type we're calling.
const CGFunctionInfo &FInfo = CGM.getTypes().arrangeCXXStructorDeclaration(
- DD, StructorType::Complete);
+ GlobalDecl(DD, Dtor_Complete));
llvm::Type *Ty = CGM.getTypes().GetFunctionType(FInfo);
return ::BuildAppleKextVirtualCall(*this, GlobalDecl(DD, Type), Ty, RD);
}
diff --git a/lib/CodeGen/CGCXXABI.cpp b/lib/CodeGen/CGCXXABI.cpp
index ed168b1ce72d..041c0f8959fd 100644
--- a/lib/CodeGen/CGCXXABI.cpp
+++ b/lib/CodeGen/CGCXXABI.cpp
@@ -1,9 +1,8 @@
//===----- CGCXXABI.cpp - Interface to C++ ABIs ---------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -29,12 +28,6 @@ void CGCXXABI::ErrorUnsupportedABI(CodeGenFunction &CGF, StringRef S) {
<< S;
}
-bool CGCXXABI::canCopyArgument(const CXXRecordDecl *RD) const {
- // We can only copy the argument if there exists at least one trivial,
- // non-deleted copy or move constructor.
- return RD->canPassInRegisters();
-}
-
llvm::Constant *CGCXXABI::GetBogusMemberPointer(QualType T) {
return llvm::Constant::getNullValue(CGM.getTypes().ConvertType(T));
}
@@ -298,7 +291,7 @@ llvm::GlobalValue::LinkageTypes CGCXXABI::getCXXDestructorLinkage(
GVALinkage Linkage, const CXXDestructorDecl *Dtor, CXXDtorType DT) const {
// Delegate back to CGM by default.
return CGM.getLLVMLinkageForDeclarator(Dtor, Linkage,
- /*isConstantVariable=*/false);
+ /*IsConstantVariable=*/false);
}
bool CGCXXABI::NeedsVTTParameter(GlobalDecl GD) {
diff --git a/lib/CodeGen/CGCXXABI.h b/lib/CodeGen/CGCXXABI.h
index 65b50e14f436..3a9c3b347439 100644
--- a/lib/CodeGen/CGCXXABI.h
+++ b/lib/CodeGen/CGCXXABI.h
@@ -1,9 +1,8 @@
//===----- CGCXXABI.h - Interface to C++ ABIs -------------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -137,10 +136,6 @@ public:
RAA_Indirect
};
- /// Returns true if C++ allows us to copy the memory of an object of type RD
- /// when it is passed as an argument.
- bool canCopyArgument(const CXXRecordDecl *RD) const;
-
/// Returns how an argument of the given record type should be passed.
virtual RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const = 0;
@@ -310,7 +305,7 @@ public:
/// adding any required parameters. For convenience, ArgTys has been
/// initialized with the type of 'this'.
virtual AddedStructorArgs
- buildStructorSignature(const CXXMethodDecl *MD, StructorType T,
+ buildStructorSignature(GlobalDecl GD,
SmallVectorImpl<CanQualType> &ArgTys) = 0;
/// Returns true if the given destructor type should be emitted as a linkonce
@@ -383,7 +378,7 @@ public:
virtual void EmitDestructorCall(CodeGenFunction &CGF,
const CXXDestructorDecl *DD, CXXDtorType Type,
bool ForVirtualBase, bool Delegating,
- Address This) = 0;
+ Address This, QualType ThisTy) = 0;
/// Emits the VTable definitions required for the given record type.
virtual void emitVTableDefinitions(CodeGenVTables &CGVT,
@@ -426,11 +421,15 @@ public:
llvm::Type *Ty,
SourceLocation Loc) = 0;
+ using DeleteOrMemberCallExpr =
+ llvm::PointerUnion<const CXXDeleteExpr *, const CXXMemberCallExpr *>;
+
/// Emit the ABI-specific virtual destructor call.
- virtual llvm::Value *
- EmitVirtualDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *Dtor,
- CXXDtorType DtorType, Address This,
- const CXXMemberCallExpr *CE) = 0;
+ virtual llvm::Value *EmitVirtualDestructorCall(CodeGenFunction &CGF,
+ const CXXDestructorDecl *Dtor,
+ CXXDtorType DtorType,
+ Address This,
+ DeleteOrMemberCallExpr E) = 0;
virtual void adjustCallArgsForDestructorThunk(CodeGenFunction &CGF,
GlobalDecl GD,
@@ -557,7 +556,7 @@ public:
/// \param Dtor - a function taking a single pointer argument
/// \param Addr - a pointer to pass to the destructor function.
virtual void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
- llvm::Constant *Dtor,
+ llvm::FunctionCallee Dtor,
llvm::Constant *Addr) = 0;
/*************************** thread_local initialization ********************/
@@ -589,7 +588,7 @@ public:
/// Emit a single constructor/destructor with the given type from a C++
/// constructor Decl.
- virtual void emitCXXStructor(const CXXMethodDecl *MD, StructorType Type) = 0;
+ virtual void emitCXXStructor(GlobalDecl GD) = 0;
/// Load a vtable from This, an object of polymorphic type RD, or from one of
/// its virtual bases if it does not have its own vtable. Returns the vtable
diff --git a/lib/CodeGen/CGCall.cpp b/lib/CodeGen/CGCall.cpp
index 7d494bb1f1c7..cf8024550eee 100644
--- a/lib/CodeGen/CGCall.cpp
+++ b/lib/CodeGen/CGCall.cpp
@@ -1,9 +1,8 @@
//===--- CGCall.cpp - Encapsulate calling convention details --------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -32,7 +31,6 @@
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/Attributes.h"
-#include "llvm/IR/CallSite.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/InlineAsm.h"
@@ -69,12 +67,19 @@ unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) {
}
/// Derives the 'this' type for codegen purposes, i.e. ignoring method CVR
-/// qualification.
-static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD,
- const CXXMethodDecl *MD) {
- QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
+/// qualification. Either or both of RD and MD may be null. A null RD indicates
+/// that there is no meaningful 'this' type, and a null MD can occur when
+/// calling a method pointer.
+CanQualType CodeGenTypes::DeriveThisType(const CXXRecordDecl *RD,
+ const CXXMethodDecl *MD) {
+ QualType RecTy;
+ if (RD)
+ RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
+ else
+ RecTy = Context.VoidTy;
+
if (MD)
- RecTy = Context.getAddrSpaceQualType(RecTy, MD->getTypeQualifiers().getAddressSpace());
+ RecTy = Context.getAddrSpaceQualType(RecTy, MD->getMethodQualifiers().getAddressSpace());
return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
}
@@ -169,11 +174,9 @@ static void appendParameterTypes(const CodeGenTypes &CGT,
static const CGFunctionInfo &
arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod,
SmallVectorImpl<CanQualType> &prefix,
- CanQual<FunctionProtoType> FTP,
- const FunctionDecl *FD) {
+ CanQual<FunctionProtoType> FTP) {
SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
- RequiredArgs Required =
- RequiredArgs::forPrototypePlus(FTP, prefix.size(), FD);
+ RequiredArgs Required = RequiredArgs::forPrototypePlus(FTP, prefix.size());
// FIXME: Kill copy.
appendParameterTypes(CGT, prefix, paramInfos, FTP);
CanQualType resultType = FTP->getReturnType().getUnqualifiedType();
@@ -187,11 +190,10 @@ arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod,
/// Arrange the argument and result information for a value of the
/// given freestanding function type.
const CGFunctionInfo &
-CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP,
- const FunctionDecl *FD) {
+CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) {
SmallVector<CanQualType, 16> argTypes;
return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes,
- FTP, FD);
+ FTP);
}
static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) {
@@ -240,7 +242,7 @@ static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) {
/// Arrange the argument and result information for a call to an
/// unknown C++ non-static member function of the given abstract type.
-/// (Zero value of RD means we don't have any meaningful "this" argument type,
+/// (A null RD means we don't have any meaningful "this" argument type,
/// so fall back to a generic pointer type).
/// The member function must be an ordinary function, i.e. not a
/// constructor or destructor.
@@ -251,14 +253,11 @@ CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
SmallVector<CanQualType, 16> argTypes;
// Add the 'this' pointer.
- if (RD)
- argTypes.push_back(GetThisType(Context, RD, MD));
- else
- argTypes.push_back(Context.VoidPtrTy);
+ argTypes.push_back(DeriveThisType(RD, MD));
return ::arrangeLLVMFunctionInfo(
*this, true, argTypes,
- FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>(), MD);
+ FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
}
/// Set calling convention for CUDA/HIP kernel.
@@ -290,7 +289,7 @@ CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD);
}
- return arrangeFreeFunctionType(prototype, MD);
+ return arrangeFreeFunctionType(prototype);
}
bool CodeGenTypes::inheritingCtorHasParams(
@@ -300,29 +299,23 @@ bool CodeGenTypes::inheritingCtorHasParams(
return Type == Ctor_Complete ||
!Inherited.getShadowDecl()->constructsVirtualBase() ||
!Target.getCXXABI().hasConstructorVariants();
- }
+}
const CGFunctionInfo &
-CodeGenTypes::arrangeCXXStructorDeclaration(const CXXMethodDecl *MD,
- StructorType Type) {
+CodeGenTypes::arrangeCXXStructorDeclaration(GlobalDecl GD) {
+ auto *MD = cast<CXXMethodDecl>(GD.getDecl());
SmallVector<CanQualType, 16> argTypes;
SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
- argTypes.push_back(GetThisType(Context, MD->getParent(), MD));
+ argTypes.push_back(DeriveThisType(MD->getParent(), MD));
bool PassParams = true;
- GlobalDecl GD;
if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
- GD = GlobalDecl(CD, toCXXCtorType(Type));
-
// A base class inheriting constructor doesn't get forwarded arguments
// needed to construct a virtual base (or base class thereof).
if (auto Inherited = CD->getInheritedConstructor())
- PassParams = inheritingCtorHasParams(Inherited, toCXXCtorType(Type));
- } else {
- auto *DD = dyn_cast<CXXDestructorDecl>(MD);
- GD = GlobalDecl(DD, toCXXDtorType(Type));
+ PassParams = inheritingCtorHasParams(Inherited, GD.getCtorType());
}
CanQual<FunctionProtoType> FTP = GetFormalType(MD);
@@ -332,7 +325,7 @@ CodeGenTypes::arrangeCXXStructorDeclaration(const CXXMethodDecl *MD,
appendParameterTypes(*this, argTypes, paramInfos, FTP);
CGCXXABI::AddedStructorArgs AddedArgs =
- TheCXXABI.buildStructorSignature(MD, Type, argTypes);
+ TheCXXABI.buildStructorSignature(GD, argTypes);
if (!paramInfos.empty()) {
// Note: prefix implies after the first param.
if (AddedArgs.Prefix)
@@ -408,8 +401,11 @@ CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args,
unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs;
CanQual<FunctionProtoType> FPT = GetFormalType(D);
- RequiredArgs Required =
- RequiredArgs::forPrototypePlus(FPT, TotalPrefixArgs + ExtraSuffixArgs, D);
+ RequiredArgs Required = PassProtoArgs
+ ? RequiredArgs::forPrototypePlus(
+ FPT, TotalPrefixArgs + ExtraSuffixArgs)
+ : RequiredArgs::All;
+
GlobalDecl GD(D, CtorKind);
CanQualType ResultType = TheCXXABI.HasThisReturn(GD)
? ArgTypes.front()
@@ -452,7 +448,7 @@ CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
/*chainCall=*/false, None, noProto->getExtInfo(), {},RequiredArgs::All);
}
- return arrangeFreeFunctionType(FTy.castAs<FunctionProtoType>(), FD);
+ return arrangeFreeFunctionType(FTy.castAs<FunctionProtoType>());
}
/// Arrange the argument and result information for the declaration or
@@ -517,11 +513,9 @@ CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
// FIXME: Do we need to handle ObjCMethodDecl?
const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
- if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
- return arrangeCXXStructorDeclaration(CD, getFromCtorType(GD.getCtorType()));
-
- if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
- return arrangeCXXStructorDeclaration(DD, getFromDtorType(GD.getDtorType()));
+ if (isa<CXXConstructorDecl>(GD.getDecl()) ||
+ isa<CXXDestructorDecl>(GD.getDecl()))
+ return arrangeCXXStructorDeclaration(GD);
return arrangeFunctionDeclaration(FD);
}
@@ -535,7 +529,7 @@ const CGFunctionInfo &
CodeGenTypes::arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD) {
assert(MD->isVirtual() && "only methods have thunks");
CanQual<FunctionProtoType> FTP = GetFormalType(MD);
- CanQualType ArgTys[] = { GetThisType(Context, MD->getParent(), MD) };
+ CanQualType ArgTys[] = {DeriveThisType(MD->getParent(), MD)};
return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false,
/*chainCall=*/false, ArgTys,
FTP->getExtInfo(), {}, RequiredArgs(1));
@@ -549,7 +543,7 @@ CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD,
CanQual<FunctionProtoType> FTP = GetFormalType(CD);
SmallVector<CanQualType, 2> ArgTys;
const CXXRecordDecl *RD = CD->getParent();
- ArgTys.push_back(GetThisType(Context, RD, CD));
+ ArgTys.push_back(DeriveThisType(RD, CD));
if (CT == Ctor_CopyingClosure)
ArgTys.push_back(*FTP->param_type_begin());
if (RD->getNumVBases() > 0)
@@ -582,7 +576,7 @@ arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
// extra prefix plus the arguments in the prototype.
if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
if (proto->isVariadic())
- required = RequiredArgs(proto->getNumParams() + numExtraRequiredArgs);
+ required = RequiredArgs::forPrototypePlus(proto, numExtraRequiredArgs);
if (proto->hasExtParameterInfos())
addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs,
@@ -635,11 +629,10 @@ CodeGenTypes::arrangeBlockFunctionDeclaration(const FunctionProtoType *proto,
auto paramInfos = getExtParameterInfosForCall(proto, 1, params.size());
auto argTypes = getArgTypesForDeclaration(Context, params);
- return arrangeLLVMFunctionInfo(
- GetReturnType(proto->getReturnType()),
- /*instanceMethod*/ false, /*chainCall*/ false, argTypes,
- proto->getExtInfo(), paramInfos,
- RequiredArgs::forPrototypePlus(proto, 1, nullptr));
+ return arrangeLLVMFunctionInfo(GetReturnType(proto->getReturnType()),
+ /*instanceMethod*/ false, /*chainCall*/ false,
+ argTypes, proto->getExtInfo(), paramInfos,
+ RequiredArgs::forPrototypePlus(proto, 1));
}
const CGFunctionInfo &
@@ -808,6 +801,8 @@ CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
ArrayRef<CanQualType> argTypes,
RequiredArgs required) {
assert(paramInfos.empty() || paramInfos.size() == argTypes.size());
+ assert(!required.allowsOptionalArgs() ||
+ required.getNumRequiredArgs() <= argTypes.size());
void *buffer =
operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>(
@@ -1148,7 +1143,7 @@ EnterStructPointerForCoercedAccess(Address SrcPtr,
return SrcPtr;
// GEP into the first element.
- SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, CharUnits(), "coerce.dive");
+ SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, "coerce.dive");
// If the first element is a struct, recurse.
llvm::Type *SrcTy = SrcPtr.getElementType();
@@ -1276,12 +1271,8 @@ static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
// Prefer scalar stores to first-class aggregate stores.
if (llvm::StructType *STy =
dyn_cast<llvm::StructType>(Val->getType())) {
- const llvm::StructLayout *Layout =
- CGF.CGM.getDataLayout().getStructLayout(STy);
-
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
- auto EltOffset = CharUnits::fromQuantity(Layout->getElementOffset(i));
- Address EltPtr = CGF.Builder.CreateStructGEP(Dest, i, EltOffset);
+ Address EltPtr = CGF.Builder.CreateStructGEP(Dest, i);
llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
CGF.Builder.CreateStore(Elt, EltPtr, DestIsVolatile);
}
@@ -1682,13 +1673,7 @@ llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
if (!isFuncTypeConvertible(FPT))
return llvm::StructType::get(getLLVMContext());
- const CGFunctionInfo *Info;
- if (isa<CXXDestructorDecl>(MD))
- Info =
- &arrangeCXXStructorDeclaration(MD, getFromDtorType(GD.getDtorType()));
- else
- Info = &arrangeCXXMethodDeclaration(MD);
- return GetFunctionType(*Info);
+ return GetFunctionType(GD);
}
static void AddAttributesFromFunctionProtoType(ASTContext &Ctx,
@@ -1793,8 +1778,6 @@ void CodeGenModule::ConstructDefaultFnAttrList(StringRef Name, bool HasOptnone,
if (CodeGenOpts.Backchain)
FuncAttrs.addAttribute("backchain");
- // FIXME: The interaction of this attribute with the SLH command line flag
- // has not been determined.
if (CodeGenOpts.SpeculativeLoadHardening)
FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
}
@@ -1826,9 +1809,8 @@ void CodeGenModule::ConstructDefaultFnAttrList(StringRef Name, bool HasOptnone,
void CodeGenModule::AddDefaultFnAttrs(llvm::Function &F) {
llvm::AttrBuilder FuncAttrs;
- ConstructDefaultFnAttrList(F.getName(),
- F.hasFnAttribute(llvm::Attribute::OptimizeNone),
- /* AttrOnCallsite = */ false, FuncAttrs);
+ ConstructDefaultFnAttrList(F.getName(), F.hasOptNone(),
+ /* AttrOnCallSite = */ false, FuncAttrs);
F.addAttributes(llvm::AttributeList::FunctionIndex, FuncAttrs);
}
@@ -1864,8 +1846,6 @@ void CodeGenModule::ConstructAttributeList(
FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
if (TargetDecl->hasAttr<ConvergentAttr>())
FuncAttrs.addAttribute(llvm::Attribute::Convergent);
- if (TargetDecl->hasAttr<SpeculativeLoadHardeningAttr>())
- FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
AddAttributesFromFunctionProtoType(
@@ -1910,6 +1890,16 @@ void CodeGenModule::ConstructAttributeList(
ConstructDefaultFnAttrList(Name, HasOptnone, AttrOnCallSite, FuncAttrs);
+ // This must run after constructing the default function attribute list
+ // to ensure that the speculative load hardening attribute is removed
+ // in the case where the -mspeculative-load-hardening flag was passed.
+ if (TargetDecl) {
+ if (TargetDecl->hasAttr<NoSpeculativeLoadHardeningAttr>())
+ FuncAttrs.removeAttribute(llvm::Attribute::SpeculativeLoadHardening);
+ if (TargetDecl->hasAttr<SpeculativeLoadHardeningAttr>())
+ FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
+ }
+
if (CodeGenOpts.EnableSegmentedStacks &&
!(TargetDecl && TargetDecl->hasAttr<NoSplitStackAttr>()))
FuncAttrs.addAttribute("split-stack");
@@ -2009,8 +1999,7 @@ void CodeGenModule::ConstructAttributeList(
// Attach attributes to sret.
if (IRFunctionArgs.hasSRetArg()) {
llvm::AttrBuilder SRETAttrs;
- if (!RetAI.getSuppressSRet())
- SRETAttrs.addAttribute(llvm::Attribute::StructRet);
+ SRETAttrs.addAttribute(llvm::Attribute::StructRet);
hasUsedSRet = true;
if (RetAI.getInReg())
SRETAttrs.addAttribute(llvm::Attribute::InReg);
@@ -2066,7 +2055,7 @@ void CodeGenModule::ConstructAttributeList(
Attrs.addAttribute(llvm::Attribute::InReg);
if (AI.getIndirectByVal())
- Attrs.addAttribute(llvm::Attribute::ByVal);
+ Attrs.addByValAttr(getTypes().ConvertTypeForMem(ParamType));
CharUnits Align = AI.getIndirectAlign();
@@ -2262,9 +2251,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// If we're using inalloca, all the memory arguments are GEPs off of the last
// parameter, which is a pointer to the complete memory area.
Address ArgStruct = Address::invalid();
- const llvm::StructLayout *ArgStructLayout = nullptr;
if (IRFunctionArgs.hasInallocaArg()) {
- ArgStructLayout = CGM.getDataLayout().getStructLayout(FI.getArgStruct());
ArgStruct = Address(FnArgs[IRFunctionArgs.getInallocaArgNo()],
FI.getArgStructAlignment());
@@ -2313,10 +2300,8 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
case ABIArgInfo::InAlloca: {
assert(NumIRArgs == 0);
auto FieldIndex = ArgI.getInAllocaFieldIndex();
- CharUnits FieldOffset =
- CharUnits::fromQuantity(ArgStructLayout->getElementOffset(FieldIndex));
- Address V = Builder.CreateStructGEP(ArgStruct, FieldIndex, FieldOffset,
- Arg->getName());
+ Address V =
+ Builder.CreateStructGEP(ArgStruct, FieldIndex, Arg->getName());
ArgVals.push_back(ParamValue::forIndirect(V));
break;
}
@@ -2476,7 +2461,6 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy &&
STy->getNumElements() > 1) {
- auto SrcLayout = CGM.getDataLayout().getStructLayout(STy);
uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
llvm::Type *DstTy = Ptr.getElementType();
uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
@@ -2493,9 +2477,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
auto AI = FnArgs[FirstIRArg + i];
AI->setName(Arg->getName() + ".coerce" + Twine(i));
- auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i));
- Address EltPtr =
- Builder.CreateStructGEP(AddrToStoreInto, i, Offset);
+ Address EltPtr = Builder.CreateStructGEP(AddrToStoreInto, i);
Builder.CreateStore(AI, EltPtr);
}
@@ -2508,7 +2490,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
assert(NumIRArgs == 1);
auto AI = FnArgs[FirstIRArg];
AI->setName(Arg->getName() + ".coerce");
- CreateCoercedStore(AI, Ptr, /*DestIsVolatile=*/false, *this);
+ CreateCoercedStore(AI, Ptr, /*DstIsVolatile=*/false, *this);
}
// Match to what EmitParmDecl is expecting for this type.
@@ -2531,7 +2513,6 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
auto coercionType = ArgI.getCoerceAndExpandType();
alloca = Builder.CreateElementBitCast(alloca, coercionType);
- auto layout = CGM.getDataLayout().getStructLayout(coercionType);
unsigned argIndex = FirstIRArg;
for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
@@ -2539,7 +2520,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType))
continue;
- auto eltAddr = Builder.CreateStructGEP(alloca, i, layout);
+ auto eltAddr = Builder.CreateStructGEP(alloca, i);
auto elt = FnArgs[argIndex++];
Builder.CreateStore(elt, eltAddr);
}
@@ -2891,15 +2872,6 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
RV = SI->getValueOperand();
SI->eraseFromParent();
- // If that was the only use of the return value, nuke it as well now.
- auto returnValueInst = ReturnValue.getPointer();
- if (returnValueInst->use_empty()) {
- if (auto alloca = dyn_cast<llvm::AllocaInst>(returnValueInst)) {
- alloca->eraseFromParent();
- ReturnValue = Address::invalid();
- }
- }
-
// Otherwise, we have to do a simple load.
} else {
RV = Builder.CreateLoad(ReturnValue);
@@ -2944,7 +2916,6 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
case ABIArgInfo::CoerceAndExpand: {
auto coercionType = RetAI.getCoerceAndExpandType();
- auto layout = CGM.getDataLayout().getStructLayout(coercionType);
// Load all of the coerced elements out into results.
llvm::SmallVector<llvm::Value*, 4> results;
@@ -2954,7 +2925,7 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
if (ABIArgInfo::isPaddingForCoerceAndExpand(coercedEltType))
continue;
- auto eltAddr = Builder.CreateStructGEP(addr, i, layout);
+ auto eltAddr = Builder.CreateStructGEP(addr, i);
auto elt = Builder.CreateLoad(eltAddr);
results.push_back(elt);
}
@@ -3368,7 +3339,7 @@ void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) {
void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const {
if (StackBase) {
// Restore the stack after the call.
- llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
+ llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
CGF.Builder.CreateCall(F, StackBase);
}
}
@@ -3455,7 +3426,8 @@ void CodeGenFunction::EmitCallArgs(
auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
assert(EmittedArg.getScalarVal() && "We emitted nothing for the arg?");
llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T,
- EmittedArg.getScalarVal());
+ EmittedArg.getScalarVal(),
+ PS->isDynamic());
Args.add(RValue::get(V), SizeTy);
// If we're emitting args in reverse, be sure to do so with
// pass_object_size, as well.
@@ -3530,7 +3502,7 @@ struct DestroyUnpassedArg final : EHScopeStack::Cleanup {
const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor();
assert(!Dtor->isTrivial());
CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false,
- /*Delegating=*/false, Addr);
+ /*Delegating=*/false, Addr, Ty);
} else {
CGF.callCStructDestructor(CGF.MakeAddrLValue(Addr, Ty));
}
@@ -3565,7 +3537,7 @@ RValue CallArg::getRValue(CodeGenFunction &CGF) const {
void CallArg::copyInto(CodeGenFunction &CGF, Address Addr) const {
LValue Dst = CGF.MakeAddrLValue(Addr, Ty);
if (!HasLV && RV.isScalar())
- CGF.EmitStoreOfScalar(RV.getScalarVal(), Dst, /*init=*/true);
+ CGF.EmitStoreOfScalar(RV.getScalarVal(), Dst, /*isInit=*/true);
else if (!HasLV && RV.isComplex())
CGF.EmitStoreOfComplex(RV.getComplexVal(), Dst, /*init=*/true);
else {
@@ -3678,15 +3650,15 @@ CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
/// Emits a call to the given no-arguments nounwind runtime function.
llvm::CallInst *
-CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee,
+CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
const llvm::Twine &name) {
return EmitNounwindRuntimeCall(callee, None, name);
}
/// Emits a call to the given nounwind runtime function.
llvm::CallInst *
-CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee,
- ArrayRef<llvm::Value*> args,
+CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
+ ArrayRef<llvm::Value *> args,
const llvm::Twine &name) {
llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
call->setDoesNotThrow();
@@ -3695,9 +3667,8 @@ CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee,
/// Emits a simple call (never an invoke) to the given no-arguments
/// runtime function.
-llvm::CallInst *
-CodeGenFunction::EmitRuntimeCall(llvm::Value *callee,
- const llvm::Twine &name) {
+llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee,
+ const llvm::Twine &name) {
return EmitRuntimeCall(callee, None, name);
}
@@ -3721,21 +3692,20 @@ CodeGenFunction::getBundlesForFunclet(llvm::Value *Callee) {
}
/// Emits a simple call (never an invoke) to the given runtime function.
-llvm::CallInst *
-CodeGenFunction::EmitRuntimeCall(llvm::Value *callee,
- ArrayRef<llvm::Value*> args,
- const llvm::Twine &name) {
- llvm::CallInst *call =
- Builder.CreateCall(callee, args, getBundlesForFunclet(callee), name);
+llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee,
+ ArrayRef<llvm::Value *> args,
+ const llvm::Twine &name) {
+ llvm::CallInst *call = Builder.CreateCall(
+ callee, args, getBundlesForFunclet(callee.getCallee()), name);
call->setCallingConv(getRuntimeCC());
return call;
}
/// Emits a call or invoke to the given noreturn runtime function.
-void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee,
- ArrayRef<llvm::Value*> args) {
+void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(
+ llvm::FunctionCallee callee, ArrayRef<llvm::Value *> args) {
SmallVector<llvm::OperandBundleDef, 1> BundleList =
- getBundlesForFunclet(callee);
+ getBundlesForFunclet(callee.getCallee());
if (getInvokeDest()) {
llvm::InvokeInst *invoke =
@@ -3755,33 +3725,32 @@ void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee,
}
/// Emits a call or invoke instruction to the given nullary runtime function.
-llvm::CallSite
-CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee,
+llvm::CallBase *
+CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
const Twine &name) {
return EmitRuntimeCallOrInvoke(callee, None, name);
}
/// Emits a call or invoke instruction to the given runtime function.
-llvm::CallSite
-CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee,
- ArrayRef<llvm::Value*> args,
+llvm::CallBase *
+CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
+ ArrayRef<llvm::Value *> args,
const Twine &name) {
- llvm::CallSite callSite = EmitCallOrInvoke(callee, args, name);
- callSite.setCallingConv(getRuntimeCC());
- return callSite;
+ llvm::CallBase *call = EmitCallOrInvoke(callee, args, name);
+ call->setCallingConv(getRuntimeCC());
+ return call;
}
/// Emits a call or invoke instruction to the given function, depending
/// on the current state of the EH stack.
-llvm::CallSite
-CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
- ArrayRef<llvm::Value *> Args,
- const Twine &Name) {
+llvm::CallBase *CodeGenFunction::EmitCallOrInvoke(llvm::FunctionCallee Callee,
+ ArrayRef<llvm::Value *> Args,
+ const Twine &Name) {
llvm::BasicBlock *InvokeDest = getInvokeDest();
SmallVector<llvm::OperandBundleDef, 1> BundleList =
- getBundlesForFunclet(Callee);
+ getBundlesForFunclet(Callee.getCallee());
- llvm::Instruction *Inst;
+ llvm::CallBase *Inst;
if (!InvokeDest)
Inst = Builder.CreateCall(Callee, Args, BundleList, Name);
else {
@@ -3796,7 +3765,7 @@ CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
if (CGM.getLangOpts().ObjCAutoRefCount)
AddObjCARCExceptionMetadata(Inst);
- return llvm::CallSite(Inst);
+ return Inst;
}
void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
@@ -3808,7 +3777,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
const CGCallee &Callee,
ReturnValueSlot ReturnValue,
const CallArgList &CallArgs,
- llvm::Instruction **callOrInvoke,
+ llvm::CallBase **callOrInvoke,
SourceLocation Loc) {
// FIXME: We no longer need the types from CallArgs; lift up and simplify.
@@ -3819,17 +3788,46 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
QualType RetTy = CallInfo.getReturnType();
const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
- llvm::FunctionType *IRFuncTy = Callee.getFunctionType();
+ llvm::FunctionType *IRFuncTy = getTypes().GetFunctionType(CallInfo);
+
+ const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl();
+ if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl))
+ // We can only guarantee that a function is called from the correct
+ // context/function based on the appropriate target attributes,
+ // so only check in the case where we have both always_inline and target
+ // since otherwise we could be making a conditional call after a check for
+ // the proper cpu features (and it won't cause code generation issues due to
+ // function based code generation).
+ if (TargetDecl->hasAttr<AlwaysInlineAttr>() &&
+ TargetDecl->hasAttr<TargetAttr>())
+ checkTargetFeatures(Loc, FD);
+
+#ifndef NDEBUG
+ if (!(CallInfo.isVariadic() && CallInfo.getArgStruct())) {
+ // For an inalloca varargs function, we don't expect CallInfo to match the
+ // function pointer's type, because the inalloca struct a will have extra
+ // fields in it for the varargs parameters. Code later in this function
+ // bitcasts the function pointer to the type derived from CallInfo.
+ //
+ // In other cases, we assert that the types match up (until pointers stop
+ // having pointee types).
+ llvm::Type *TypeFromVal;
+ if (Callee.isVirtual())
+ TypeFromVal = Callee.getVirtualFunctionType();
+ else
+ TypeFromVal =
+ Callee.getFunctionPointer()->getType()->getPointerElementType();
+ assert(IRFuncTy == TypeFromVal);
+ }
+#endif
// 1. Set up the arguments.
// If we're using inalloca, insert the allocation after the stack save.
// FIXME: Do this earlier rather than hacking it in here!
Address ArgMemory = Address::invalid();
- const llvm::StructLayout *ArgMemoryLayout = nullptr;
if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) {
const llvm::DataLayout &DL = CGM.getDataLayout();
- ArgMemoryLayout = DL.getStructLayout(ArgStruct);
llvm::Instruction *IP = CallArgs.getStackBase();
llvm::AllocaInst *AI;
if (IP) {
@@ -3846,13 +3844,6 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
ArgMemory = Address(AI, Align);
}
- // Helper function to drill into the inalloca allocation.
- auto createInAllocaStructGEP = [&](unsigned FieldIndex) -> Address {
- auto FieldOffset =
- CharUnits::fromQuantity(ArgMemoryLayout->getElementOffset(FieldIndex));
- return Builder.CreateStructGEP(ArgMemory, FieldIndex, FieldOffset);
- };
-
ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo);
SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs());
@@ -3875,7 +3866,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
if (IRFunctionArgs.hasSRetArg()) {
IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer();
} else if (RetAI.isInAlloca()) {
- Address Addr = createInAllocaStructGEP(RetAI.getInAllocaFieldIndex());
+ Address Addr =
+ Builder.CreateStructGEP(ArgMemory, RetAI.getInAllocaFieldIndex());
Builder.CreateStore(SRetPtr.getPointer(), Addr);
}
}
@@ -3913,12 +3905,14 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
cast<llvm::Instruction>(Addr.getPointer());
CGBuilderTy::InsertPoint IP = Builder.saveIP();
Builder.SetInsertPoint(Placeholder);
- Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex());
+ Addr =
+ Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex());
Builder.restoreIP(IP);
deferPlaceholderReplacement(Placeholder, Addr.getPointer());
} else {
// Store the RValue into the argument struct.
- Address Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex());
+ Address Addr =
+ Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex());
unsigned AS = Addr.getType()->getPointerAddressSpace();
llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS);
// There are some cases where a trivial bitcast is not avoidable. The
@@ -4099,11 +4093,9 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
STy->getPointerTo(Src.getAddressSpace()));
}
- auto SrcLayout = CGM.getDataLayout().getStructLayout(STy);
assert(NumIRArgs == STy->getNumElements());
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
- auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i));
- Address EltPtr = Builder.CreateStructGEP(Src, i, Offset);
+ Address EltPtr = Builder.CreateStructGEP(Src, i);
llvm::Value *LI = Builder.CreateLoad(EltPtr);
IRCallArgs[FirstIRArg + i] = LI;
}
@@ -4153,7 +4145,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
llvm::Type *eltType = coercionType->getElementType(i);
if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
- Address eltAddr = Builder.CreateStructGEP(addr, i, layout);
+ Address eltAddr = Builder.CreateStructGEP(addr, i);
llvm::Value *elt = Builder.CreateLoad(eltAddr);
IRCallArgs[IRArgPos++] = elt;
}
@@ -4186,8 +4178,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// cases, we can't do any parameter mismatch checks. Give up and bitcast
// the callee.
unsigned CalleeAS = CalleePtr->getType()->getPointerAddressSpace();
- auto FnTy = getTypes().GetFunctionType(CallInfo)->getPointerTo(CalleeAS);
- CalleePtr = Builder.CreateBitCast(CalleePtr, FnTy);
+ CalleePtr =
+ Builder.CreateBitCast(CalleePtr, IRFuncTy->getPointerTo(CalleeAS));
} else {
llvm::Type *LastParamTy =
IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1);
@@ -4219,19 +4211,20 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
//
// This makes the IR nicer, but more importantly it ensures that we
// can inline the function at -O0 if it is marked always_inline.
- auto simplifyVariadicCallee = [](llvm::Value *Ptr) -> llvm::Value* {
- llvm::FunctionType *CalleeFT =
- cast<llvm::FunctionType>(Ptr->getType()->getPointerElementType());
+ auto simplifyVariadicCallee = [](llvm::FunctionType *CalleeFT,
+ llvm::Value *Ptr) -> llvm::Function * {
if (!CalleeFT->isVarArg())
- return Ptr;
+ return nullptr;
- llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr);
- if (!CE || CE->getOpcode() != llvm::Instruction::BitCast)
- return Ptr;
+ // Get underlying value if it's a bitcast
+ if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr)) {
+ if (CE->getOpcode() == llvm::Instruction::BitCast)
+ Ptr = CE->getOperand(0);
+ }
- llvm::Function *OrigFn = dyn_cast<llvm::Function>(CE->getOperand(0));
+ llvm::Function *OrigFn = dyn_cast<llvm::Function>(Ptr);
if (!OrigFn)
- return Ptr;
+ return nullptr;
llvm::FunctionType *OrigFT = OrigFn->getFunctionType();
@@ -4240,15 +4233,19 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
if (OrigFT->isVarArg() ||
OrigFT->getNumParams() != CalleeFT->getNumParams() ||
OrigFT->getReturnType() != CalleeFT->getReturnType())
- return Ptr;
+ return nullptr;
for (unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i)
if (OrigFT->getParamType(i) != CalleeFT->getParamType(i))
- return Ptr;
+ return nullptr;
return OrigFn;
};
- CalleePtr = simplifyVariadicCallee(CalleePtr);
+
+ if (llvm::Function *OrigFn = simplifyVariadicCallee(IRFuncTy, CalleePtr)) {
+ CalleePtr = OrigFn;
+ IRFuncTy = OrigFn->getFunctionType();
+ }
// 3. Perform the actual call.
@@ -4293,11 +4290,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// Apply always_inline to all calls within flatten functions.
// FIXME: should this really take priority over __try, below?
if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() &&
- !(Callee.getAbstractInfo().getCalleeDecl().getDecl() &&
- Callee.getAbstractInfo()
- .getCalleeDecl()
- .getDecl()
- ->hasAttr<NoInlineAttr>())) {
+ !(TargetDecl && TargetDecl->hasAttr<NoInlineAttr>())) {
Attrs =
Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
llvm::Attribute::AlwaysInline);
@@ -4341,22 +4334,21 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
getBundlesForFunclet(CalleePtr);
// Emit the actual call/invoke instruction.
- llvm::CallSite CS;
+ llvm::CallBase *CI;
if (!InvokeDest) {
- CS = Builder.CreateCall(CalleePtr, IRCallArgs, BundleList);
+ CI = Builder.CreateCall(IRFuncTy, CalleePtr, IRCallArgs, BundleList);
} else {
llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
- CS = Builder.CreateInvoke(CalleePtr, Cont, InvokeDest, IRCallArgs,
+ CI = Builder.CreateInvoke(IRFuncTy, CalleePtr, Cont, InvokeDest, IRCallArgs,
BundleList);
EmitBlock(Cont);
}
- llvm::Instruction *CI = CS.getInstruction();
if (callOrInvoke)
*callOrInvoke = CI;
// Apply the attributes and calling convention.
- CS.setAttributes(Attrs);
- CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
+ CI->setAttributes(Attrs);
+ CI->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
// Apply various metadata.
@@ -4371,7 +4363,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// Insert instrumentation or attach profile metadata at indirect call sites.
// For more details, see the comment before the definition of
// IPVK_IndirectCallTarget in InstrProfData.inc.
- if (!CS.getCalledFunction())
+ if (!CI->getCalledFunction())
PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget,
CI, CalleePtr);
@@ -4382,26 +4374,45 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// Suppress tail calls if requested.
if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) {
- const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl();
if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>())
Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
}
+ // Add metadata for calls to MSAllocator functions
+ if (getDebugInfo() && TargetDecl &&
+ TargetDecl->hasAttr<MSAllocatorAttr>())
+ getDebugInfo()->addHeapAllocSiteMetadata(CI, RetTy, Loc);
+
// 4. Finish the call.
// If the call doesn't return, finish the basic block and clear the
// insertion point; this allows the rest of IRGen to discard
// unreachable code.
- if (CS.doesNotReturn()) {
+ if (CI->doesNotReturn()) {
if (UnusedReturnSizePtr)
PopCleanupBlock();
// Strip away the noreturn attribute to better diagnose unreachable UB.
if (SanOpts.has(SanitizerKind::Unreachable)) {
- if (auto *F = CS.getCalledFunction())
+ // Also remove from function since CallBase::hasFnAttr additionally checks
+ // attributes of the called function.
+ if (auto *F = CI->getCalledFunction())
F->removeFnAttr(llvm::Attribute::NoReturn);
- CS.removeAttribute(llvm::AttributeList::FunctionIndex,
- llvm::Attribute::NoReturn);
+ CI->removeAttribute(llvm::AttributeList::FunctionIndex,
+ llvm::Attribute::NoReturn);
+
+ // Avoid incompatibility with ASan which relies on the `noreturn`
+ // attribute to insert handler calls.
+ if (SanOpts.hasOneOf(SanitizerKind::Address |
+ SanitizerKind::KernelAddress)) {
+ SanitizerScope SanScope(this);
+ llvm::IRBuilder<>::InsertPointGuard IPGuard(Builder);
+ Builder.SetInsertPoint(CI);
+ auto *FnType = llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
+ llvm::FunctionCallee Fn =
+ CGM.CreateRuntimeFunction(FnType, "__asan_handle_no_return");
+ EmitNounwindRuntimeCall(Fn);
+ }
}
EmitUnreachable(Loc);
@@ -4436,7 +4447,6 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
switch (RetAI.getKind()) {
case ABIArgInfo::CoerceAndExpand: {
auto coercionType = RetAI.getCoerceAndExpandType();
- auto layout = CGM.getDataLayout().getStructLayout(coercionType);
Address addr = SRetPtr;
addr = Builder.CreateElementBitCast(addr, coercionType);
@@ -4448,7 +4458,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
llvm::Type *eltType = coercionType->getElementType(i);
if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
- Address eltAddr = Builder.CreateStructGEP(addr, i, layout);
+ Address eltAddr = Builder.CreateStructGEP(addr, i);
llvm::Value *elt = CI;
if (requiresExtract)
elt = Builder.CreateExtractValue(elt, unpaddedIndex++);
@@ -4529,7 +4539,6 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
} ();
// Emit the assume_aligned check on the return value.
- const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl();
if (Ret.isScalar() && TargetDecl) {
if (const auto *AA = TargetDecl->getAttr<AssumeAlignedAttr>()) {
llvm::Value *OffsetValue = nullptr;
@@ -4556,7 +4565,7 @@ CGCallee CGCallee::prepareConcreteCallee(CodeGenFunction &CGF) const {
if (isVirtual()) {
const CallExpr *CE = getVirtualCallExpr();
return CGF.CGM.getCXXABI().getVirtualFunctionPointer(
- CGF, getVirtualMethodDecl(), getThisAddress(), getFunctionType(),
+ CGF, getVirtualMethodDecl(), getThisAddress(), getVirtualFunctionType(),
CE ? CE->getBeginLoc() : SourceLocation());
}
diff --git a/lib/CodeGen/CGCall.h b/lib/CodeGen/CGCall.h
index c300808bea28..cc11ded704ab 100644
--- a/lib/CodeGen/CGCall.h
+++ b/lib/CodeGen/CGCall.h
@@ -1,9 +1,8 @@
//===----- CGCall.h - Encapsulate calling convention details ----*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -136,6 +135,12 @@ public:
return CGCallee(abstractInfo, functionPtr);
}
+ static CGCallee
+ forDirect(llvm::FunctionCallee functionPtr,
+ const CGCalleeInfo &abstractInfo = CGCalleeInfo()) {
+ return CGCallee(abstractInfo, functionPtr.getCallee());
+ }
+
static CGCallee forVirtual(const CallExpr *CE, GlobalDecl MD, Address Addr,
llvm::FunctionType *FTy) {
CGCallee result(SpecialKind::Virtual);
@@ -199,12 +204,9 @@ public:
assert(isVirtual());
return VirtualInfo.Addr;
}
-
- llvm::FunctionType *getFunctionType() const {
- if (isVirtual())
- return VirtualInfo.FTy;
- return cast<llvm::FunctionType>(
- getFunctionPointer()->getType()->getPointerElementType());
+ llvm::FunctionType *getVirtualFunctionType() const {
+ assert(isVirtual());
+ return VirtualInfo.FTy;
}
/// If this is a delayed callee computation of some sort, prepare
diff --git a/lib/CodeGen/CGClass.cpp b/lib/CodeGen/CGClass.cpp
index ee150a792b76..c8bb63c5c4b1 100644
--- a/lib/CodeGen/CGClass.cpp
+++ b/lib/CodeGen/CGClass.cpp
@@ -1,9 +1,8 @@
//===--- CGClass.cpp - Emit LLVM Code for C++ classes -----------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -303,7 +302,8 @@ Address CodeGenFunction::GetAddressOfBaseClass(
// Get the base pointer type.
llvm::Type *BasePtrTy =
- ConvertType((PathEnd[-1])->getType())->getPointerTo();
+ ConvertType((PathEnd[-1])->getType())
+ ->getPointerTo(Value.getType()->getPointerAddressSpace());
QualType DerivedTy = getContext().getRecordType(Derived);
CharUnits DerivedAlign = CGM.getClassPointerAlignment(Derived);
@@ -491,12 +491,15 @@ namespace {
cast<CXXMethodDecl>(CGF.CurCodeDecl)->getParent();
const CXXDestructorDecl *D = BaseClass->getDestructor();
+ // We are already inside a destructor, so presumably the object being
+ // destroyed should have the expected type.
+ QualType ThisTy = D->getThisObjectType();
Address Addr =
CGF.GetAddressOfDirectBaseInCompleteClass(CGF.LoadCXXThisAddress(),
DerivedClass, BaseClass,
BaseIsVirtual);
CGF.EmitCXXDestructorCall(D, Dtor_Base, BaseIsVirtual,
- /*Delegating=*/false, Addr);
+ /*Delegating=*/false, Addr, ThisTy);
}
};
@@ -526,8 +529,7 @@ static bool BaseInitializerUsesThis(ASTContext &C, const Expr *Init) {
static void EmitBaseInitializer(CodeGenFunction &CGF,
const CXXRecordDecl *ClassDecl,
- CXXCtorInitializer *BaseInit,
- CXXCtorType CtorType) {
+ CXXCtorInitializer *BaseInit) {
assert(BaseInit->isBaseInitializer() &&
"Must have base initializer!");
@@ -539,10 +541,6 @@ static void EmitBaseInitializer(CodeGenFunction &CGF,
bool isBaseVirtual = BaseInit->isBaseVirtual();
- // The base constructor doesn't construct virtual bases.
- if (CtorType == Ctor_Base && isBaseVirtual)
- return;
-
// If the initializer for the base (other than the constructor
// itself) accesses 'this' in any way, we need to initialize the
// vtables.
@@ -561,7 +559,7 @@ static void EmitBaseInitializer(CodeGenFunction &CGF,
AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased,
- CGF.overlapForBaseInit(ClassDecl, BaseClassDecl, isBaseVirtual));
+ CGF.getOverlapForBaseInit(ClassDecl, BaseClassDecl, isBaseVirtual));
CGF.EmitAggExpr(BaseInit->getInit(), AggSlot);
@@ -650,7 +648,7 @@ static void EmitMemberInitializer(CodeGenFunction &CGF,
LValue Src = CGF.EmitLValueForFieldInitialization(ThisRHSLV, Field);
// Copy the aggregate.
- CGF.EmitAggregateCopy(LHS, Src, FieldType, CGF.overlapForFieldInit(Field),
+ CGF.EmitAggregateCopy(LHS, Src, FieldType, CGF.getOverlapForFieldInit(Field),
LHS.isVolatileQualified());
// Ensure that we destroy the objects if an exception is thrown later in
// the constructor.
@@ -686,7 +684,7 @@ void CodeGenFunction::EmitInitializerForField(FieldDecl *Field, LValue LHS,
AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased,
- overlapForFieldInit(Field),
+ getOverlapForFieldInit(Field),
AggValueSlot::IsNotZeroed,
// Checks are made by the code that calls constructor.
AggValueSlot::IsSanitizerChecked);
@@ -793,7 +791,7 @@ void CodeGenFunction::EmitAsanPrologueOrEpilogue(bool Prologue) {
llvm::Type *Args[2] = {IntPtrTy, IntPtrTy};
llvm::FunctionType *FTy =
llvm::FunctionType::get(CGM.VoidTy, Args, false);
- llvm::Constant *F = CGM.CreateRuntimeFunction(
+ llvm::FunctionCallee F = CGM.CreateRuntimeFunction(
FTy, Prologue ? "__asan_poison_intra_object_redzone"
: "__asan_unpoison_intra_object_redzone");
@@ -1013,7 +1011,7 @@ namespace {
if (FOffset < FirstFieldOffset) {
FirstField = F;
FirstFieldOffset = FOffset;
- } else if (FOffset > LastFieldOffset) {
+ } else if (FOffset >= LastFieldOffset) {
LastField = F;
LastFieldOffset = FOffset;
}
@@ -1264,24 +1262,37 @@ void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD,
CXXConstructorDecl::init_const_iterator B = CD->init_begin(),
E = CD->init_end();
+ // Virtual base initializers first, if any. They aren't needed if:
+ // - This is a base ctor variant
+ // - There are no vbases
+ // - The class is abstract, so a complete object of it cannot be constructed
+ //
+ // The check for an abstract class is necessary because sema may not have
+ // marked virtual base destructors referenced.
+ bool ConstructVBases = CtorType != Ctor_Base &&
+ ClassDecl->getNumVBases() != 0 &&
+ !ClassDecl->isAbstract();
+
+ // In the Microsoft C++ ABI, there are no constructor variants. Instead, the
+ // constructor of a class with virtual bases takes an additional parameter to
+ // conditionally construct the virtual bases. Emit that check here.
llvm::BasicBlock *BaseCtorContinueBB = nullptr;
- if (ClassDecl->getNumVBases() &&
+ if (ConstructVBases &&
!CGM.getTarget().getCXXABI().hasConstructorVariants()) {
- // The ABIs that don't have constructor variants need to put a branch
- // before the virtual base initialization code.
BaseCtorContinueBB =
- CGM.getCXXABI().EmitCtorCompleteObjectHandler(*this, ClassDecl);
+ CGM.getCXXABI().EmitCtorCompleteObjectHandler(*this, ClassDecl);
assert(BaseCtorContinueBB);
}
llvm::Value *const OldThis = CXXThisValue;
- // Virtual base initializers first.
for (; B != E && (*B)->isBaseInitializer() && (*B)->isBaseVirtual(); B++) {
+ if (!ConstructVBases)
+ continue;
if (CGM.getCodeGenOpts().StrictVTablePointers &&
CGM.getCodeGenOpts().OptimizationLevel > 0 &&
isInitializerOfDynamicClass(*B))
CXXThisValue = Builder.CreateLaunderInvariantGroup(LoadCXXThis());
- EmitBaseInitializer(*this, ClassDecl, *B, CtorType);
+ EmitBaseInitializer(*this, ClassDecl, *B);
}
if (BaseCtorContinueBB) {
@@ -1298,7 +1309,7 @@ void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD,
CGM.getCodeGenOpts().OptimizationLevel > 0 &&
isInitializerOfDynamicClass(*B))
CXXThisValue = Builder.CreateLaunderInvariantGroup(LoadCXXThis());
- EmitBaseInitializer(*this, ClassDecl, *B, CtorType);
+ EmitBaseInitializer(*this, ClassDecl, *B);
}
CXXThisValue = OldThis;
@@ -1432,9 +1443,11 @@ void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) {
if (DtorType == Dtor_Deleting) {
RunCleanupsScope DtorEpilogue(*this);
EnterDtorCleanups(Dtor, Dtor_Deleting);
- if (HaveInsertPoint())
+ if (HaveInsertPoint()) {
+ QualType ThisTy = Dtor->getThisObjectType();
EmitCXXDestructorCall(Dtor, Dtor_Complete, /*ForVirtualBase=*/false,
- /*Delegating=*/false, LoadCXXThisAddress());
+ /*Delegating=*/false, LoadCXXThisAddress(), ThisTy);
+ }
return;
}
@@ -1465,8 +1478,9 @@ void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) {
EnterDtorCleanups(Dtor, Dtor_Complete);
if (!isTryBody) {
+ QualType ThisTy = Dtor->getThisObjectType();
EmitCXXDestructorCall(Dtor, Dtor_Base, /*ForVirtualBase=*/false,
- /*Delegating=*/false, LoadCXXThisAddress());
+ /*Delegating=*/false, LoadCXXThisAddress(), ThisTy);
break;
}
@@ -1627,7 +1641,7 @@ namespace {
llvm::FunctionType *FnType =
llvm::FunctionType::get(CGF.VoidTy, ArgTypes, false);
- llvm::Value *Fn =
+ llvm::FunctionCallee Fn =
CGF.CGM.CreateRuntimeFunction(FnType, "__sanitizer_dtor_callback");
CGF.EmitNounwindRuntimeCall(Fn, Args);
}
@@ -1970,10 +1984,14 @@ void CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
pushRegularPartialArrayCleanup(arrayBegin, cur, type, eltAlignment,
*destroyer);
}
-
+ auto currAVS = AggValueSlot::forAddr(
+ curAddr, type.getQualifiers(), AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased,
+ AggValueSlot::DoesNotOverlap, AggValueSlot::IsNotZeroed,
+ NewPointerIsChecked ? AggValueSlot::IsSanitizerChecked
+ : AggValueSlot::IsNotSanitizerChecked);
EmitCXXConstructorCall(ctor, Ctor_Complete, /*ForVirtualBase=*/false,
- /*Delegating=*/false, curAddr, E,
- AggValueSlot::DoesNotOverlap, NewPointerIsChecked);
+ /*Delegating=*/false, currAVS, E);
}
// Go to the next element.
@@ -2001,22 +2019,22 @@ void CodeGenFunction::destroyCXXObject(CodeGenFunction &CGF,
const CXXDestructorDecl *dtor = record->getDestructor();
assert(!dtor->isTrivial());
CGF.EmitCXXDestructorCall(dtor, Dtor_Complete, /*for vbase*/ false,
- /*Delegating=*/false, addr);
+ /*Delegating=*/false, addr, type);
}
void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
CXXCtorType Type,
bool ForVirtualBase,
- bool Delegating, Address This,
- const CXXConstructExpr *E,
- AggValueSlot::Overlap_t Overlap,
- bool NewPointerIsChecked) {
+ bool Delegating,
+ AggValueSlot ThisAVS,
+ const CXXConstructExpr *E) {
CallArgList Args;
-
- LangAS SlotAS = E->getType().getAddressSpace();
+ Address This = ThisAVS.getAddress();
+ LangAS SlotAS = ThisAVS.getQualifiers().getAddressSpace();
QualType ThisType = D->getThisType();
LangAS ThisAS = ThisType.getTypePtr()->getPointeeType().getAddressSpace();
llvm::Value *ThisPtr = This.getPointer();
+
if (SlotAS != ThisAS) {
unsigned TargetThisAS = getContext().getTargetAddressSpace(ThisAS);
llvm::Type *NewType =
@@ -2024,6 +2042,7 @@ void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
ThisPtr = getTargetHooks().performAddrSpaceCast(*this, This.getPointer(),
ThisAS, SlotAS, NewType);
}
+
// Push the this ptr.
Args.add(RValue::get(ThisPtr), D->getThisType());
@@ -2037,7 +2056,7 @@ void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
LValue Src = EmitLValue(Arg);
QualType DestTy = getContext().getTypeDeclType(D->getParent());
LValue Dest = MakeAddrLValue(This, DestTy);
- EmitAggregateCopyCtor(Dest, Src, Overlap);
+ EmitAggregateCopyCtor(Dest, Src, ThisAVS.mayOverlap());
return;
}
@@ -2050,7 +2069,8 @@ void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
/*ParamsToSkip*/ 0, Order);
EmitCXXConstructorCall(D, Type, ForVirtualBase, Delegating, This, Args,
- Overlap, E->getExprLoc(), NewPointerIsChecked);
+ ThisAVS.mayOverlap(), E->getExprLoc(),
+ ThisAVS.isSanitizerChecked());
}
static bool canEmitDelegateCallArgs(CodeGenFunction &CGF,
@@ -2130,8 +2150,7 @@ void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
Delegating, Args);
// Emit the call.
- llvm::Constant *CalleePtr =
- CGM.getAddrOfCXXStructor(D, getFromCtorType(Type));
+ llvm::Constant *CalleePtr = CGM.getAddrOfCXXStructor(GlobalDecl(D, Type));
const CGFunctionInfo &Info = CGM.getTypes().arrangeCXXConstructorCall(
Args, D, Type, ExtraArgs.Prefix, ExtraArgs.Suffix, PassPrototypeArgs);
CGCallee Callee = CGCallee::forDirect(CalleePtr, GlobalDecl(D, Type));
@@ -2350,8 +2369,11 @@ namespace {
: Dtor(D), Addr(Addr), Type(Type) {}
void Emit(CodeGenFunction &CGF, Flags flags) override {
+ // We are calling the destructor from within the constructor.
+ // Therefore, "this" should have the expected type.
+ QualType ThisTy = Dtor->getThisObjectType();
CGF.EmitCXXDestructorCall(Dtor, Type, /*ForVirtualBase=*/false,
- /*Delegating=*/true, Addr);
+ /*Delegating=*/true, Addr, ThisTy);
}
};
} // end anonymous namespace
@@ -2389,31 +2411,32 @@ CodeGenFunction::EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor
void CodeGenFunction::EmitCXXDestructorCall(const CXXDestructorDecl *DD,
CXXDtorType Type,
bool ForVirtualBase,
- bool Delegating,
- Address This) {
+ bool Delegating, Address This,
+ QualType ThisTy) {
CGM.getCXXABI().EmitDestructorCall(*this, DD, Type, ForVirtualBase,
- Delegating, This);
+ Delegating, This, ThisTy);
}
namespace {
struct CallLocalDtor final : EHScopeStack::Cleanup {
const CXXDestructorDecl *Dtor;
Address Addr;
+ QualType Ty;
- CallLocalDtor(const CXXDestructorDecl *D, Address Addr)
- : Dtor(D), Addr(Addr) {}
+ CallLocalDtor(const CXXDestructorDecl *D, Address Addr, QualType Ty)
+ : Dtor(D), Addr(Addr), Ty(Ty) {}
void Emit(CodeGenFunction &CGF, Flags flags) override {
CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
/*ForVirtualBase=*/false,
- /*Delegating=*/false, Addr);
+ /*Delegating=*/false, Addr, Ty);
}
};
} // end anonymous namespace
void CodeGenFunction::PushDestructorCleanup(const CXXDestructorDecl *D,
- Address Addr) {
- EHStack.pushCleanup<CallLocalDtor>(NormalAndEHCleanup, D, Addr);
+ QualType T, Address Addr) {
+ EHStack.pushCleanup<CallLocalDtor>(NormalAndEHCleanup, D, Addr, T);
}
void CodeGenFunction::PushDestructorCleanup(QualType T, Address Addr) {
@@ -2423,7 +2446,7 @@ void CodeGenFunction::PushDestructorCleanup(QualType T, Address Addr) {
const CXXDestructorDecl *D = ClassDecl->getDestructor();
assert(D && D->isUsed() && "destructor not marked as used!");
- PushDestructorCleanup(D, Addr);
+ PushDestructorCleanup(D, T, Addr);
}
void CodeGenFunction::InitializeVTablePointer(const VPtr &Vptr) {
diff --git a/lib/CodeGen/CGCleanup.cpp b/lib/CodeGen/CGCleanup.cpp
index 3743d24f11fc..5594f3030229 100644
--- a/lib/CodeGen/CGCleanup.cpp
+++ b/lib/CodeGen/CGCleanup.cpp
@@ -1,9 +1,8 @@
//===--- CGCleanup.cpp - Bookkeeping and code emission for cleanups -------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -53,12 +52,8 @@ DominatingValue<RValue>::saved_type::save(CodeGenFunction &CGF, RValue rv) {
llvm::Type *ComplexTy =
llvm::StructType::get(V.first->getType(), V.second->getType());
Address addr = CGF.CreateDefaultAlignTempAlloca(ComplexTy, "saved-complex");
- CGF.Builder.CreateStore(V.first,
- CGF.Builder.CreateStructGEP(addr, 0, CharUnits()));
- CharUnits offset = CharUnits::fromQuantity(
- CGF.CGM.getDataLayout().getTypeAllocSize(V.first->getType()));
- CGF.Builder.CreateStore(V.second,
- CGF.Builder.CreateStructGEP(addr, 1, offset));
+ CGF.Builder.CreateStore(V.first, CGF.Builder.CreateStructGEP(addr, 0));
+ CGF.Builder.CreateStore(V.second, CGF.Builder.CreateStructGEP(addr, 1));
return saved_type(addr.getPointer(), ComplexAddress);
}
@@ -96,12 +91,10 @@ RValue DominatingValue<RValue>::saved_type::restore(CodeGenFunction &CGF) {
}
case ComplexAddress: {
Address address = getSavingAddress(Value);
- llvm::Value *real = CGF.Builder.CreateLoad(
- CGF.Builder.CreateStructGEP(address, 0, CharUnits()));
- CharUnits offset = CharUnits::fromQuantity(
- CGF.CGM.getDataLayout().getTypeAllocSize(real->getType()));
- llvm::Value *imag = CGF.Builder.CreateLoad(
- CGF.Builder.CreateStructGEP(address, 1, offset));
+ llvm::Value *real =
+ CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(address, 0));
+ llvm::Value *imag =
+ CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(address, 1));
return RValue::getComplex(real, imag);
}
}
diff --git a/lib/CodeGen/CGCleanup.h b/lib/CodeGen/CGCleanup.h
index 15d6f46dcb56..ffe0f9d9dd20 100644
--- a/lib/CodeGen/CGCleanup.h
+++ b/lib/CodeGen/CGCleanup.h
@@ -1,9 +1,8 @@
//===-- CGCleanup.h - Classes for cleanups IR generation --------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/CodeGen/CGCoroutine.cpp b/lib/CodeGen/CGCoroutine.cpp
index 80fa7c873631..aee5a927a055 100644
--- a/lib/CodeGen/CGCoroutine.cpp
+++ b/lib/CodeGen/CGCoroutine.cpp
@@ -1,9 +1,8 @@
//===----- CGCoroutine.cpp - Emit LLVM Code for C++ coroutines ------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -205,7 +204,6 @@ static LValueOrRValue emitSuspendExpression(CodeGenFunction &CGF, CGCoroData &Co
BasicBlock *RealSuspendBlock =
CGF.createBasicBlock(Prefix + Twine(".suspend.bool"));
CGF.Builder.CreateCondBr(SuspendRet, RealSuspendBlock, ReadyBlock);
- SuspendBlock = RealSuspendBlock;
CGF.EmitBlock(RealSuspendBlock);
}
@@ -407,7 +405,7 @@ struct CallCoroEnd final : public EHScopeStack::Cleanup {
if (Bundles.empty()) {
// Otherwise, (landingpad model), create a conditional branch that leads
// either to a cleanup block or a block with EH resume instruction.
- auto *ResumeBB = CGF.getEHResumeBlock(/*cleanup=*/true);
+ auto *ResumeBB = CGF.getEHResumeBlock(/*isCleanup=*/true);
auto *CleanupContBB = CGF.createBasicBlock("cleanup.cont");
CGF.Builder.CreateCondBr(CoroEnd, ResumeBB, CleanupContBB);
CGF.EmitBlock(CleanupContBB);
@@ -733,10 +731,10 @@ RValue CodeGenFunction::EmitCoroutineIntrinsic(const CallExpr *E,
Args.push_back(llvm::ConstantTokenNone::get(getLLVMContext()));
break;
}
- for (auto &Arg : E->arguments())
+ for (const Expr *Arg : E->arguments())
Args.push_back(EmitScalarExpr(Arg));
- llvm::Value *F = CGM.getIntrinsic(IID);
+ llvm::Function *F = CGM.getIntrinsic(IID);
llvm::CallInst *Call = Builder.CreateCall(F, Args);
// Note: The following code is to enable to emit coro.id and coro.begin by
diff --git a/lib/CodeGen/CGDebugInfo.cpp b/lib/CodeGen/CGDebugInfo.cpp
index 41f8721468a3..f6ee7ee26d4b 100644
--- a/lib/CodeGen/CGDebugInfo.cpp
+++ b/lib/CodeGen/CGDebugInfo.cpp
@@ -1,9 +1,8 @@
//===--- CGDebugInfo.cpp - Emit Debug Information for a Module ------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -19,6 +18,7 @@
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "ConstantEmitter.h"
+#include "clang/Analysis/Analyses/ExprMutationAnalyzer.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclFriend.h"
#include "clang/AST/DeclObjC.h"
@@ -373,7 +373,7 @@ CGDebugInfo::computeChecksum(FileID FID, SmallString<32> &Checksum) const {
SourceManager &SM = CGM.getContext().getSourceManager();
bool Invalid;
- llvm::MemoryBuffer *MemBuffer = SM.getBuffer(FID, &Invalid);
+ const llvm::MemoryBuffer *MemBuffer = SM.getBuffer(FID, &Invalid);
if (Invalid)
return None;
@@ -423,8 +423,12 @@ llvm::DIFile *CGDebugInfo::getOrCreateFile(SourceLocation Loc) {
}
SmallString<32> Checksum;
+
+ // Compute the checksum if possible. If the location is affected by a #line
+ // directive that refers to a file, PLoc will have an invalid FileID, and we
+ // will correctly get no checksum.
Optional<llvm::DIFile::ChecksumKind> CSKind =
- computeChecksum(SM.getFileID(Loc), Checksum);
+ computeChecksum(PLoc.getFileID(), Checksum);
Optional<llvm::DIFile::ChecksumInfo<StringRef>> CSInfo;
if (CSKind)
CSInfo.emplace(*CSKind, Checksum);
@@ -451,8 +455,8 @@ CGDebugInfo::createFile(StringRef FileName,
for (; CurDirIt != CurDirE && *CurDirIt == *FileIt; ++CurDirIt, ++FileIt)
llvm::sys::path::append(DirBuf, *CurDirIt);
if (std::distance(llvm::sys::path::begin(CurDir), CurDirIt) == 1) {
- // The common prefix only the root; stripping it would cause
- // LLVM diagnostic locations to be more confusing.
+ // Don't strip the common prefix if it is only the root "/"
+ // since that would make LLVM diagnostic locations confusing.
Dir = {};
File = RemappedFile;
} else {
@@ -610,12 +614,8 @@ void CGDebugInfo::CreateCompileUnit() {
TheCU = DBuilder.createCompileUnit(
LangTag, CUFile, CGOpts.EmitVersionIdentMetadata ? Producer : "",
LO.Optimize || CGOpts.PrepareForLTO || CGOpts.PrepareForThinLTO,
- CGOpts.DwarfDebugFlags, RuntimeVers,
- (CGOpts.getSplitDwarfMode() != CodeGenOptions::NoFission)
- ? ""
- : CGOpts.SplitDwarfFile,
- EmissionKind, DwoId, CGOpts.SplitDwarfInlining,
- CGOpts.DebugInfoForProfiling,
+ CGOpts.DwarfDebugFlags, RuntimeVers, CGOpts.SplitDwarfFile, EmissionKind,
+ DwoId, CGOpts.SplitDwarfInlining, CGOpts.DebugInfoForProfiling,
CGM.getTarget().getTriple().isNVPTX()
? llvm::DICompileUnit::DebugNameTableKind::None
: static_cast<llvm::DICompileUnit::DebugNameTableKind>(
@@ -916,6 +916,11 @@ static SmallString<256> getTypeIdentifier(const TagType *Ty, CodeGenModule &CGM,
if (!needsTypeIdentifier(TD, CGM, TheCU))
return Identifier;
+ if (const auto *RD = dyn_cast<CXXRecordDecl>(TD))
+ if (RD->getDefinition())
+ if (RD->isDynamicClass() &&
+ CGM.getVTableLinkage(RD) == llvm::GlobalValue::ExternalLinkage)
+ return Identifier;
// TODO: This is using the RTTI name. Is there a better way to get
// a unique string for a type?
@@ -1083,15 +1088,18 @@ llvm::DIType *CGDebugInfo::CreateType(const TemplateSpecializationType *Ty,
assert(Ty->isTypeAlias());
llvm::DIType *Src = getOrCreateType(Ty->getAliasedType(), Unit);
+ auto *AliasDecl =
+ cast<TypeAliasTemplateDecl>(Ty->getTemplateName().getAsTemplateDecl())
+ ->getTemplatedDecl();
+
+ if (AliasDecl->hasAttr<NoDebugAttr>())
+ return Src;
+
SmallString<128> NS;
llvm::raw_svector_ostream OS(NS);
Ty->getTemplateName().print(OS, getPrintingPolicy(), /*qualified*/ false);
printTemplateArgumentList(OS, Ty->template_arguments(), getPrintingPolicy());
- auto *AliasDecl =
- cast<TypeAliasTemplateDecl>(Ty->getTemplateName().getAsTemplateDecl())
- ->getTemplatedDecl();
-
SourceLocation Loc = AliasDecl->getLocation();
return DBuilder.createTypedef(Src, OS.str(), getOrCreateFile(Loc),
getLineNumber(Loc),
@@ -1100,15 +1108,20 @@ llvm::DIType *CGDebugInfo::CreateType(const TemplateSpecializationType *Ty,
llvm::DIType *CGDebugInfo::CreateType(const TypedefType *Ty,
llvm::DIFile *Unit) {
+ llvm::DIType *Underlying =
+ getOrCreateType(Ty->getDecl()->getUnderlyingType(), Unit);
+
+ if (Ty->getDecl()->hasAttr<NoDebugAttr>())
+ return Underlying;
+
// We don't set size information, but do specify where the typedef was
// declared.
SourceLocation Loc = Ty->getDecl()->getLocation();
// Typedefs are derived from some other type.
- return DBuilder.createTypedef(
- getOrCreateType(Ty->getDecl()->getUnderlyingType(), Unit),
- Ty->getDecl()->getName(), getOrCreateFile(Loc), getLineNumber(Loc),
- getDeclContextDescriptor(Ty->getDecl()));
+ return DBuilder.createTypedef(Underlying, Ty->getDecl()->getName(),
+ getOrCreateFile(Loc), getLineNumber(Loc),
+ getDeclContextDescriptor(Ty->getDecl()));
}
static unsigned getDwarfCC(CallingConv CC) {
@@ -1394,6 +1407,9 @@ void CGDebugInfo::CollectRecordFields(
isa<VarTemplateSpecializationDecl>(V))
continue;
+ if (isa<VarTemplatePartialSpecializationDecl>(V))
+ continue;
+
// Reuse the existing static member declaration if one exists
auto MI = StaticDataMemberCache.find(V->getCanonicalDecl());
if (MI != StaticDataMemberCache.end()) {
@@ -1726,31 +1742,37 @@ CGDebugInfo::CollectTemplateParams(const TemplateParameterList *TPList,
QualType T = TA.getParamTypeForDecl().getDesugaredType(CGM.getContext());
llvm::DIType *TTy = getOrCreateType(T, Unit);
llvm::Constant *V = nullptr;
- const CXXMethodDecl *MD;
- // Variable pointer template parameters have a value that is the address
- // of the variable.
- if (const auto *VD = dyn_cast<VarDecl>(D))
- V = CGM.GetAddrOfGlobalVar(VD);
- // Member function pointers have special support for building them, though
- // this is currently unsupported in LLVM CodeGen.
- else if ((MD = dyn_cast<CXXMethodDecl>(D)) && MD->isInstance())
- V = CGM.getCXXABI().EmitMemberFunctionPointer(MD);
- else if (const auto *FD = dyn_cast<FunctionDecl>(D))
- V = CGM.GetAddrOfFunction(FD);
- // Member data pointers have special handling too to compute the fixed
- // offset within the object.
- else if (const auto *MPT = dyn_cast<MemberPointerType>(T.getTypePtr())) {
- // These five lines (& possibly the above member function pointer
- // handling) might be able to be refactored to use similar code in
- // CodeGenModule::getMemberPointerConstant
- uint64_t fieldOffset = CGM.getContext().getFieldOffset(D);
- CharUnits chars =
- CGM.getContext().toCharUnitsFromBits((int64_t)fieldOffset);
- V = CGM.getCXXABI().EmitMemberDataPointer(MPT, chars);
+ // Skip retrieve the value if that template parameter has cuda device
+ // attribute, i.e. that value is not available at the host side.
+ if (!CGM.getLangOpts().CUDA || CGM.getLangOpts().CUDAIsDevice ||
+ !D->hasAttr<CUDADeviceAttr>()) {
+ const CXXMethodDecl *MD;
+ // Variable pointer template parameters have a value that is the address
+ // of the variable.
+ if (const auto *VD = dyn_cast<VarDecl>(D))
+ V = CGM.GetAddrOfGlobalVar(VD);
+ // Member function pointers have special support for building them,
+ // though this is currently unsupported in LLVM CodeGen.
+ else if ((MD = dyn_cast<CXXMethodDecl>(D)) && MD->isInstance())
+ V = CGM.getCXXABI().EmitMemberFunctionPointer(MD);
+ else if (const auto *FD = dyn_cast<FunctionDecl>(D))
+ V = CGM.GetAddrOfFunction(FD);
+ // Member data pointers have special handling too to compute the fixed
+ // offset within the object.
+ else if (const auto *MPT =
+ dyn_cast<MemberPointerType>(T.getTypePtr())) {
+ // These five lines (& possibly the above member function pointer
+ // handling) might be able to be refactored to use similar code in
+ // CodeGenModule::getMemberPointerConstant
+ uint64_t fieldOffset = CGM.getContext().getFieldOffset(D);
+ CharUnits chars =
+ CGM.getContext().toCharUnitsFromBits((int64_t)fieldOffset);
+ V = CGM.getCXXABI().EmitMemberDataPointer(MPT, chars);
+ }
+ V = V->stripPointerCasts();
}
TemplateParams.push_back(DBuilder.createTemplateValueParameter(
- TheCU, Name, TTy,
- cast_or_null<llvm::Constant>(V->stripPointerCasts())));
+ TheCU, Name, TTy, cast_or_null<llvm::Constant>(V)));
} break;
case TemplateArgument::NullPtr: {
QualType T = TA.getNullPtrType();
@@ -1817,32 +1839,24 @@ CGDebugInfo::CollectFunctionTemplateParams(const FunctionDecl *FD,
}
llvm::DINodeArray CGDebugInfo::CollectVarTemplateParams(const VarDecl *VL,
- llvm::DIFile *Unit) {
- if (auto *TS = dyn_cast<VarTemplateSpecializationDecl>(VL)) {
- auto T = TS->getSpecializedTemplateOrPartial();
- auto TA = TS->getTemplateArgs().asArray();
- // Collect parameters for a partial specialization
- if (T.is<VarTemplatePartialSpecializationDecl *>()) {
- const TemplateParameterList *TList =
- T.get<VarTemplatePartialSpecializationDecl *>()
- ->getTemplateParameters();
- return CollectTemplateParams(TList, TA, Unit);
- }
-
- // Collect parameters for an explicit specialization
- if (T.is<VarTemplateDecl *>()) {
- const TemplateParameterList *TList = T.get<VarTemplateDecl *>()
- ->getTemplateParameters();
- return CollectTemplateParams(TList, TA, Unit);
- }
- }
- return llvm::DINodeArray();
+ llvm::DIFile *Unit) {
+ // Always get the full list of parameters, not just the ones from the
+ // specialization. A partial specialization may have fewer parameters than
+ // there are arguments.
+ auto *TS = dyn_cast<VarTemplateSpecializationDecl>(VL);
+ if (!TS)
+ return llvm::DINodeArray();
+ VarTemplateDecl *T = TS->getSpecializedTemplate();
+ const TemplateParameterList *TList = T->getTemplateParameters();
+ auto TA = TS->getTemplateArgs().asArray();
+ return CollectTemplateParams(TList, TA, Unit);
}
llvm::DINodeArray CGDebugInfo::CollectCXXTemplateParams(
const ClassTemplateSpecializationDecl *TSpecial, llvm::DIFile *Unit) {
- // Always get the full list of parameters, not just the ones from
- // the specialization.
+ // Always get the full list of parameters, not just the ones from the
+ // specialization. A partial specialization may have fewer parameters than
+ // there are arguments.
TemplateParameterList *TPList =
TSpecial->getSpecializedTemplate()->getTemplateParameters();
const TemplateArgumentList &TAList = TSpecial->getTemplateArgs();
@@ -1875,6 +1889,58 @@ StringRef CGDebugInfo::getVTableName(const CXXRecordDecl *RD) {
return internString("_vptr$", RD->getNameAsString());
}
+StringRef CGDebugInfo::getDynamicInitializerName(const VarDecl *VD,
+ DynamicInitKind StubKind,
+ llvm::Function *InitFn) {
+ // If we're not emitting codeview, use the mangled name. For Itanium, this is
+ // arbitrary.
+ if (!CGM.getCodeGenOpts().EmitCodeView)
+ return InitFn->getName();
+
+ // Print the normal qualified name for the variable, then break off the last
+ // NNS, and add the appropriate other text. Clang always prints the global
+ // variable name without template arguments, so we can use rsplit("::") and
+ // then recombine the pieces.
+ SmallString<128> QualifiedGV;
+ StringRef Quals;
+ StringRef GVName;
+ {
+ llvm::raw_svector_ostream OS(QualifiedGV);
+ VD->printQualifiedName(OS, getPrintingPolicy());
+ std::tie(Quals, GVName) = OS.str().rsplit("::");
+ if (GVName.empty())
+ std::swap(Quals, GVName);
+ }
+
+ SmallString<128> InitName;
+ llvm::raw_svector_ostream OS(InitName);
+ if (!Quals.empty())
+ OS << Quals << "::";
+
+ switch (StubKind) {
+ case DynamicInitKind::NoStub:
+ llvm_unreachable("not an initializer");
+ case DynamicInitKind::Initializer:
+ OS << "`dynamic initializer for '";
+ break;
+ case DynamicInitKind::AtExit:
+ OS << "`dynamic atexit destructor for '";
+ break;
+ }
+
+ OS << GVName;
+
+ // Add any template specialization args.
+ if (const auto *VTpl = dyn_cast<VarTemplateSpecializationDecl>(VD)) {
+ printTemplateArgumentList(OS, VTpl->getTemplateArgs().asArray(),
+ getPrintingPolicy());
+ }
+
+ OS << '\'';
+
+ return internString(OS.str());
+}
+
void CGDebugInfo::CollectVTableInfo(const CXXRecordDecl *RD, llvm::DIFile *Unit,
SmallVectorImpl<llvm::Metadata *> &EltTys,
llvm::DICompositeType *RecordTy) {
@@ -1954,6 +2020,20 @@ llvm::DIType *CGDebugInfo::getOrCreateStandaloneType(QualType D,
return T;
}
+void CGDebugInfo::addHeapAllocSiteMetadata(llvm::Instruction *CI,
+ QualType D,
+ SourceLocation Loc) {
+ llvm::MDNode *node;
+ if (D.getTypePtr()->isVoidPointerType()) {
+ node = llvm::MDNode::get(CGM.getLLVMContext(), None);
+ } else {
+ QualType PointeeTy = D.getTypePtr()->getPointeeType();
+ node = getOrCreateType(PointeeTy, getOrCreateFile(Loc));
+ }
+
+ CI->setMetadata("heapallocsite", node);
+}
+
void CGDebugInfo::completeType(const EnumDecl *ED) {
if (DebugKind <= codegenoptions::DebugLineTablesOnly)
return;
@@ -2297,7 +2377,14 @@ CGDebugInfo::getOrCreateModuleRef(ExternalASTSource::ASTSourceDescriptor Mod,
}
bool IsRootModule = M ? !M->Parent : true;
- if (CreateSkeletonCU && IsRootModule) {
+ // When a module name is specified as -fmodule-name, that module gets a
+ // clang::Module object, but it won't actually be built or imported; it will
+ // be textual.
+ if (CreateSkeletonCU && IsRootModule && Mod.getASTFile().empty() && M)
+ assert(StringRef(M->Name).startswith(CGM.getLangOpts().ModuleName) &&
+ "clang module without ASTFile must be specified by -fmodule-name");
+
+ if (CreateSkeletonCU && IsRootModule && !Mod.getASTFile().empty()) {
// PCH files don't have a signature field in the control block,
// but LLVM detects skeleton CUs by looking for a non-zero DWO id.
// We use the lower 64 bits for debug info.
@@ -2314,6 +2401,7 @@ CGDebugInfo::getOrCreateModuleRef(ExternalASTSource::ASTSourceDescriptor Mod,
Signature);
DIB.finalize();
}
+
llvm::DIModule *Parent =
IsRootModule ? nullptr
: getOrCreateModuleRef(
@@ -2768,6 +2856,9 @@ static QualType UnwrapTypeForDebugInfo(QualType T, const ASTContext &C) {
case Type::Paren:
T = cast<ParenType>(T)->getInnerType();
break;
+ case Type::MacroQualified:
+ T = cast<MacroQualifiedType>(T)->getUnderlyingType();
+ break;
case Type::SubstTemplateTypeParm:
T = cast<SubstTemplateTypeParmType>(T)->getReplacementType();
break;
@@ -2947,6 +3038,7 @@ llvm::DIType *CGDebugInfo::CreateTypeNode(QualType Ty, llvm::DIFile *Unit) {
case Type::DeducedTemplateSpecialization:
case Type::Elaborated:
case Type::Paren:
+ case Type::MacroQualified:
case Type::SubstTemplateTypeParm:
case Type::TypeOfExpr:
case Type::TypeOf:
@@ -3021,9 +3113,9 @@ llvm::DICompositeType *CGDebugInfo::CreateLimitedType(const RecordType *Ty) {
else
Flags |= llvm::DINode::FlagTypePassByValue;
- // Record if a C++ record is trivial type.
- if (CXXRD->isTrivial())
- Flags |= llvm::DINode::FlagTrivial;
+ // Record if a C++ record is non-trivial type.
+ if (!CXXRD->isTrivial())
+ Flags |= llvm::DINode::FlagNonTrivial;
}
llvm::DICompositeType *RealDecl = DBuilder.createReplaceableCompositeType(
@@ -3443,6 +3535,11 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, SourceLocation Loc,
} else if (const auto *OMD = dyn_cast<ObjCMethodDecl>(D)) {
Name = getObjCMethodName(OMD);
Flags |= llvm::DINode::FlagPrototyped;
+ } else if (isa<VarDecl>(D) &&
+ GD.getDynamicInitKind() != DynamicInitKind::NoStub) {
+ // This is a global initializer or atexit destructor for a global variable.
+ Name = getDynamicInitializerName(cast<VarDecl>(D), GD.getDynamicInitKind(),
+ Fn);
} else {
// Use llvm function name.
Name = Fn->getName();
@@ -3488,6 +3585,15 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, SourceLocation Loc,
if (HasDecl && isa<FunctionDecl>(D))
DeclCache[D->getCanonicalDecl()].reset(SP);
+ // We use the SPDefCache only in the case when the debug entry values option
+ // is set, in order to speed up parameters modification analysis.
+ //
+ // FIXME: Use AbstractCallee here to support ObjCMethodDecl.
+ if (CGM.getCodeGenOpts().EnableDebugEntryValues && HasDecl)
+ if (auto *FD = dyn_cast<FunctionDecl>(D))
+ if (FD->hasBody() && !FD->param_empty())
+ SPDefCache[FD].reset(SP);
+
if (CGM.getCodeGenOpts().DwarfVersion >= 5) {
// Starting with DWARF V5 method declarations are emitted as children of
// the interface type.
@@ -3516,7 +3622,7 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, SourceLocation Loc,
}
void CGDebugInfo::EmitFunctionDecl(GlobalDecl GD, SourceLocation Loc,
- QualType FnType) {
+ QualType FnType, llvm::Function *Fn) {
StringRef Name;
StringRef LinkageName;
@@ -3526,7 +3632,9 @@ void CGDebugInfo::EmitFunctionDecl(GlobalDecl GD, SourceLocation Loc,
llvm::DINode::DIFlags Flags = llvm::DINode::FlagZero;
llvm::DIFile *Unit = getOrCreateFile(Loc);
- llvm::DIScope *FDContext = getDeclContextDescriptor(D);
+ bool IsDeclForCallSite = Fn ? true : false;
+ llvm::DIScope *FDContext =
+ IsDeclForCallSite ? Unit : getDeclContextDescriptor(D);
llvm::DINodeArray TParamsArray;
if (isa<FunctionDecl>(D)) {
// If there is a DISubprogram for this function available then use it.
@@ -3553,10 +3661,38 @@ void CGDebugInfo::EmitFunctionDecl(GlobalDecl GD, SourceLocation Loc,
if (CGM.getLangOpts().Optimize)
SPFlags |= llvm::DISubprogram::SPFlagOptimized;
- DBuilder.retainType(DBuilder.createFunction(
+ llvm::DISubprogram *SP = DBuilder.createFunction(
FDContext, Name, LinkageName, Unit, LineNo,
getOrCreateFunctionType(D, FnType, Unit), ScopeLine, Flags, SPFlags,
- TParamsArray.get(), getFunctionDeclaration(D)));
+ TParamsArray.get(), getFunctionDeclaration(D));
+
+ if (IsDeclForCallSite)
+ Fn->setSubprogram(SP);
+
+ DBuilder.retainType(SP);
+}
+
+void CGDebugInfo::EmitFuncDeclForCallSite(llvm::CallBase *CallOrInvoke,
+ QualType CalleeType,
+ const FunctionDecl *CalleeDecl) {
+ auto &CGOpts = CGM.getCodeGenOpts();
+ if (!CGOpts.EnableDebugEntryValues || !CGM.getLangOpts().Optimize ||
+ !CallOrInvoke ||
+ CGM.getCodeGenOpts().getDebugInfo() < codegenoptions::LimitedDebugInfo)
+ return;
+
+ auto *Func = CallOrInvoke->getCalledFunction();
+ if (!Func)
+ return;
+
+ // If there is no DISubprogram attached to the function being called,
+ // create the one describing the function in order to have complete
+ // call site debug info.
+ if (Func->getSubprogram())
+ return;
+
+ if (!CalleeDecl->isStatic() && !CalleeDecl->isInlined())
+ EmitFunctionDecl(CalleeDecl, CalleeDecl->getLocation(), CalleeType, Func);
}
void CGDebugInfo::EmitInlineFunctionStart(CGBuilderTy &Builder, GlobalDecl GD) {
@@ -3735,7 +3871,8 @@ CGDebugInfo::EmitTypeForVarWithBlocksAttr(const VarDecl *VD,
llvm::DILocalVariable *CGDebugInfo::EmitDeclare(const VarDecl *VD,
llvm::Value *Storage,
llvm::Optional<unsigned> ArgNo,
- CGBuilderTy &Builder) {
+ CGBuilderTy &Builder,
+ const bool UsePointerValue) {
assert(DebugKind >= codegenoptions::LimitedDebugInfo);
assert(!LexicalBlockStack.empty() && "Region stack mismatch, stack empty!");
if (VD->hasAttr<NoDebugAttr>())
@@ -3840,6 +3977,16 @@ llvm::DILocalVariable *CGDebugInfo::EmitDeclare(const VarDecl *VD,
}
}
+ // Clang stores the sret pointer provided by the caller in a static alloca.
+ // Use DW_OP_deref to tell the debugger to load the pointer and treat it as
+ // the address of the variable.
+ if (UsePointerValue) {
+ assert(std::find(Expr.begin(), Expr.end(), llvm::dwarf::DW_OP_deref) ==
+ Expr.end() &&
+ "Debug info already contains DW_OP_deref.");
+ Expr.push_back(llvm::dwarf::DW_OP_deref);
+ }
+
// Create the descriptor for the variable.
auto *D = ArgNo ? DBuilder.createParameterVariable(
Scope, Name, *ArgNo, Unit, Line, Ty,
@@ -3853,14 +4000,46 @@ llvm::DILocalVariable *CGDebugInfo::EmitDeclare(const VarDecl *VD,
llvm::DebugLoc::get(Line, Column, Scope, CurInlinedAt),
Builder.GetInsertBlock());
+ if (CGM.getCodeGenOpts().EnableDebugEntryValues && ArgNo) {
+ if (auto *PD = dyn_cast<ParmVarDecl>(VD))
+ ParamCache[PD].reset(D);
+ }
+
return D;
}
llvm::DILocalVariable *
CGDebugInfo::EmitDeclareOfAutoVariable(const VarDecl *VD, llvm::Value *Storage,
- CGBuilderTy &Builder) {
+ CGBuilderTy &Builder,
+ const bool UsePointerValue) {
assert(DebugKind >= codegenoptions::LimitedDebugInfo);
- return EmitDeclare(VD, Storage, llvm::None, Builder);
+ return EmitDeclare(VD, Storage, llvm::None, Builder, UsePointerValue);
+}
+
+void CGDebugInfo::EmitLabel(const LabelDecl *D, CGBuilderTy &Builder) {
+ assert(DebugKind >= codegenoptions::LimitedDebugInfo);
+ assert(!LexicalBlockStack.empty() && "Region stack mismatch, stack empty!");
+
+ if (D->hasAttr<NoDebugAttr>())
+ return;
+
+ auto *Scope = cast<llvm::DIScope>(LexicalBlockStack.back());
+ llvm::DIFile *Unit = getOrCreateFile(D->getLocation());
+
+ // Get location information.
+ unsigned Line = getLineNumber(D->getLocation());
+ unsigned Column = getColumnNumber(D->getLocation());
+
+ StringRef Name = D->getName();
+
+ // Create the descriptor for the label.
+ auto *L =
+ DBuilder.createLabel(Scope, Name, Unit, Line, CGM.getLangOpts().Optimize);
+
+ // Insert an llvm.dbg.label into the current block.
+ DBuilder.insertLabel(L,
+ llvm::DebugLoc::get(Line, Column, Scope, CurInlinedAt),
+ Builder.GetInsertBlock());
}
llvm::DIType *CGDebugInfo::CreateSelfType(const QualType &QualTy,
@@ -4125,7 +4304,7 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
llvm::DIDerivedType *
CGDebugInfo::getOrCreateStaticDataMemberDeclarationOrNull(const VarDecl *D) {
- if (!D->isStaticDataMember())
+ if (!D || !D->isStaticDataMember())
return nullptr;
auto MI = StaticDataMemberCache.find(D->getCanonicalDecl());
@@ -4207,6 +4386,14 @@ void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
SmallVector<int64_t, 4> Expr;
unsigned AddressSpace =
CGM.getContext().getTargetAddressSpace(D->getType());
+ if (CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) {
+ if (D->hasAttr<CUDASharedAttr>())
+ AddressSpace =
+ CGM.getContext().getTargetAddressSpace(LangAS::cuda_shared);
+ else if (D->hasAttr<CUDAConstantAttr>())
+ AddressSpace =
+ CGM.getContext().getTargetAddressSpace(LangAS::cuda_constant);
+ }
AppendAddressSpaceXDeref(AddressSpace, Expr);
GVE = DBuilder.createGlobalVariableExpression(
@@ -4229,22 +4416,32 @@ void CGDebugInfo::EmitGlobalVariable(const ValueDecl *VD, const APValue &Init) {
llvm::DIFile *Unit = getOrCreateFile(VD->getLocation());
StringRef Name = VD->getName();
llvm::DIType *Ty = getOrCreateType(VD->getType(), Unit);
+
+ // Do not use global variables for enums, unless in CodeView.
if (const auto *ECD = dyn_cast<EnumConstantDecl>(VD)) {
const auto *ED = cast<EnumDecl>(ECD->getDeclContext());
assert(isa<EnumType>(ED->getTypeForDecl()) && "Enum without EnumType?");
- Ty = getOrCreateType(QualType(ED->getTypeForDecl(), 0), Unit);
+ (void)ED;
+
+ // If CodeView, emit enums as global variables, unless they are defined
+ // inside a class. We do this because MSVC doesn't emit S_CONSTANTs for
+ // enums in classes, and because it is difficult to attach this scope
+ // information to the global variable.
+ if (!CGM.getCodeGenOpts().EmitCodeView ||
+ isa<RecordDecl>(ED->getDeclContext()))
+ return;
}
- // Do not use global variables for enums.
- //
- // FIXME: why not?
- if (Ty->getTag() == llvm::dwarf::DW_TAG_enumeration_type)
- return;
- // Do not emit separate definitions for function local const/statics.
+
+ llvm::DIScope *DContext = nullptr;
+
+ // Do not emit separate definitions for function local consts.
if (isa<FunctionDecl>(VD->getDeclContext()))
return;
+
+ // Emit definition for static members in CodeView.
VD = cast<ValueDecl>(VD->getCanonicalDecl());
- auto *VarD = cast<VarDecl>(VD);
- if (VarD->isStaticDataMember()) {
+ auto *VarD = dyn_cast<VarDecl>(VD);
+ if (VarD && VarD->isStaticDataMember()) {
auto *RD = cast<RecordDecl>(VarD->getDeclContext());
getDeclContextDescriptor(VarD);
// Ensure that the type is retained even though it's otherwise unreferenced.
@@ -4253,10 +4450,16 @@ void CGDebugInfo::EmitGlobalVariable(const ValueDecl *VD, const APValue &Init) {
// through its scope.
RetainedTypes.push_back(
CGM.getContext().getRecordType(RD).getAsOpaquePtr());
- return;
- }
- llvm::DIScope *DContext = getDeclContextDescriptor(VD);
+ if (!CGM.getCodeGenOpts().EmitCodeView)
+ return;
+
+ // Use the global scope for static members.
+ DContext = getContextDescriptor(
+ cast<Decl>(CGM.getContext().getTranslationUnitDecl()), TheCU);
+ } else {
+ DContext = getDeclContextDescriptor(VD);
+ }
auto &GV = DeclCache[VD];
if (GV)
@@ -4393,6 +4596,29 @@ void CGDebugInfo::setDwoId(uint64_t Signature) {
TheCU->setDWOId(Signature);
}
+/// Analyzes each function parameter to determine whether it is constant
+/// throughout the function body.
+static void analyzeParametersModification(
+ ASTContext &Ctx,
+ llvm::DenseMap<const FunctionDecl *, llvm::TrackingMDRef> &SPDefCache,
+ llvm::DenseMap<const ParmVarDecl *, llvm::TrackingMDRef> &ParamCache) {
+ for (auto &SP : SPDefCache) {
+ auto *FD = SP.first;
+ assert(FD->hasBody() && "Functions must have body here");
+ const Stmt *FuncBody = (*FD).getBody();
+ for (auto Parm : FD->parameters()) {
+ ExprMutationAnalyzer FuncAnalyzer(*FuncBody, Ctx);
+ if (FuncAnalyzer.isMutated(Parm))
+ continue;
+
+ auto I = ParamCache.find(Parm);
+ assert(I != ParamCache.end() && "Parameters should be already cached");
+ auto *DIParm = cast<llvm::DILocalVariable>(I->second);
+ DIParm->setIsNotModified();
+ }
+ }
+}
+
void CGDebugInfo::finalize() {
// Creating types might create further types - invalidating the current
// element and the size(), so don't cache/reference them.
@@ -4465,6 +4691,10 @@ void CGDebugInfo::finalize() {
if (auto MD = TypeCache[RT])
DBuilder.retainType(cast<llvm::DIType>(MD));
+ if (CGM.getCodeGenOpts().EnableDebugEntryValues)
+ // This will be used to emit debug entry values.
+ analyzeParametersModification(CGM.getContext(), SPDefCache, ParamCache);
+
DBuilder.finalize();
}
@@ -4497,7 +4727,10 @@ llvm::DINode::DIFlags CGDebugInfo::getCallSiteRelatedAttrs() const {
// were part of DWARF v4.
bool SupportsDWARFv4Ext =
CGM.getCodeGenOpts().DwarfVersion == 4 &&
- CGM.getCodeGenOpts().getDebuggerTuning() == llvm::DebuggerKind::LLDB;
+ (CGM.getCodeGenOpts().getDebuggerTuning() == llvm::DebuggerKind::LLDB ||
+ (CGM.getCodeGenOpts().EnableDebugEntryValues &&
+ CGM.getCodeGenOpts().getDebuggerTuning() == llvm::DebuggerKind::GDB));
+
if (!SupportsDWARFv4Ext && CGM.getCodeGenOpts().DwarfVersion < 5)
return llvm::DINode::FlagZero;
diff --git a/lib/CodeGen/CGDebugInfo.h b/lib/CodeGen/CGDebugInfo.h
index 031e40b9dde9..7edbea86633a 100644
--- a/lib/CodeGen/CGDebugInfo.h
+++ b/lib/CodeGen/CGDebugInfo.h
@@ -1,9 +1,8 @@
//===--- CGDebugInfo.h - DebugInfo for LLVM CodeGen -------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -42,6 +41,7 @@ class ObjCInterfaceDecl;
class ObjCIvarDecl;
class UsingDecl;
class VarDecl;
+enum class DynamicInitKind : unsigned;
namespace CodeGen {
class CodeGenModule;
@@ -134,6 +134,10 @@ class CGDebugInfo {
llvm::DenseMap<const char *, llvm::TrackingMDRef> DIFileCache;
llvm::DenseMap<const FunctionDecl *, llvm::TrackingMDRef> SPCache;
+ /// Cache function definitions relevant to use for parameters mutation
+ /// analysis.
+ llvm::DenseMap<const FunctionDecl *, llvm::TrackingMDRef> SPDefCache;
+ llvm::DenseMap<const ParmVarDecl *, llvm::TrackingMDRef> ParamCache;
/// Cache declarations relevant to DW_TAG_imported_declarations (C++
/// using declarations) that aren't covered by other more specific caches.
llvm::DenseMap<const Decl *, llvm::TrackingMDRef> DeclCache;
@@ -405,7 +409,15 @@ public:
void EmitInlineFunctionEnd(CGBuilderTy &Builder);
/// Emit debug info for a function declaration.
- void EmitFunctionDecl(GlobalDecl GD, SourceLocation Loc, QualType FnType);
+ /// \p Fn is set only when a declaration for a debug call site gets created.
+ void EmitFunctionDecl(GlobalDecl GD, SourceLocation Loc,
+ QualType FnType, llvm::Function *Fn = nullptr);
+
+ /// Emit debug info for an extern function being called.
+ /// This is needed for call site debug info.
+ void EmitFuncDeclForCallSite(llvm::CallBase *CallOrInvoke,
+ QualType CalleeType,
+ const FunctionDecl *CalleeDecl);
/// Constructs the debug code for exiting a function.
void EmitFunctionEnd(CGBuilderTy &Builder, llvm::Function *Fn);
@@ -422,9 +434,13 @@ public:
/// declaration.
/// Returns a pointer to the DILocalVariable associated with the
/// llvm.dbg.declare, or nullptr otherwise.
- llvm::DILocalVariable *EmitDeclareOfAutoVariable(const VarDecl *Decl,
- llvm::Value *AI,
- CGBuilderTy &Builder);
+ llvm::DILocalVariable *
+ EmitDeclareOfAutoVariable(const VarDecl *Decl, llvm::Value *AI,
+ CGBuilderTy &Builder,
+ const bool UsePointerValue = false);
+
+ /// Emit call to \c llvm.dbg.label for an label.
+ void EmitLabel(const LabelDecl *D, CGBuilderTy &Builder);
/// Emit call to \c llvm.dbg.declare for an imported variable
/// declaration in a block.
@@ -474,6 +490,10 @@ public:
/// Emit standalone debug info for a type.
llvm::DIType *getOrCreateStandaloneType(QualType Ty, SourceLocation Loc);
+ /// Add heapallocsite metadata for MSAllocator calls.
+ void addHeapAllocSiteMetadata(llvm::Instruction *CallSite, QualType Ty,
+ SourceLocation Loc);
+
void completeType(const EnumDecl *ED);
void completeType(const RecordDecl *RD);
void completeRequiredType(const RecordDecl *RD);
@@ -500,7 +520,8 @@ private:
/// llvm.dbg.declare, or nullptr otherwise.
llvm::DILocalVariable *EmitDeclare(const VarDecl *decl, llvm::Value *AI,
llvm::Optional<unsigned> ArgNo,
- CGBuilderTy &Builder);
+ CGBuilderTy &Builder,
+ const bool UsePointerValue = false);
struct BlockByRefType {
/// The wrapper struct used inside the __block_literal struct.
@@ -642,6 +663,12 @@ private:
/// Get the vtable name for the given class.
StringRef getVTableName(const CXXRecordDecl *Decl);
+ /// Get the name to use in the debug info for a dynamic initializer or atexit
+ /// stub function.
+ StringRef getDynamicInitializerName(const VarDecl *VD,
+ DynamicInitKind StubKind,
+ llvm::Function *InitFn);
+
/// Get line number for the location. If location is invalid
/// then use current location.
unsigned getLineNumber(SourceLocation Loc);
diff --git a/lib/CodeGen/CGDecl.cpp b/lib/CodeGen/CGDecl.cpp
index 5959d889b455..6ad43cefc4d2 100644
--- a/lib/CodeGen/CGDecl.cpp
+++ b/lib/CodeGen/CGDecl.cpp
@@ -1,9 +1,8 @@
//===--- CGDecl.cpp - Emit LLVM Code for declarations ---------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -20,6 +19,7 @@
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "ConstantEmitter.h"
+#include "PatternInit.h"
#include "TargetInfo.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/CharUnits.h"
@@ -104,9 +104,11 @@ void CodeGenFunction::EmitDecl(const Decl &D) {
case Decl::Label: // __label__ x;
case Decl::Import:
case Decl::OMPThreadPrivate:
+ case Decl::OMPAllocate:
case Decl::OMPCapturedExpr:
case Decl::OMPRequires:
case Decl::Empty:
+ case Decl::Concept:
// None of these decls require codegen support.
return;
@@ -142,6 +144,9 @@ void CodeGenFunction::EmitDecl(const Decl &D) {
case Decl::OMPDeclareReduction:
return CGM.EmitOMPDeclareReduction(cast<OMPDeclareReductionDecl>(&D), this);
+ case Decl::OMPDeclareMapper:
+ return CGM.EmitOMPDeclareMapper(cast<OMPDeclareMapperDecl>(&D), this);
+
case Decl::Typedef: // typedef int X;
case Decl::TypeAlias: { // using X = int; [C++0x]
const TypedefNameDecl &TD = cast<TypedefNameDecl>(D);
@@ -149,6 +154,8 @@ void CodeGenFunction::EmitDecl(const Decl &D) {
if (Ty->isVariablyModifiedType())
EmitVariablyModifiedType(Ty);
+
+ return;
}
}
}
@@ -169,7 +176,7 @@ void CodeGenFunction::EmitVarDecl(const VarDecl &D) {
return;
llvm::GlobalValue::LinkageTypes Linkage =
- CGM.getLLVMLinkageVarDefinition(&D, /*isConstant=*/false);
+ CGM.getLLVMLinkageVarDefinition(&D, /*IsConstant=*/false);
// FIXME: We need to force the emission/use of a guard variable for
// some variables even if we can constant-evaluate them because
@@ -473,11 +480,12 @@ namespace {
template <class Derived>
struct DestroyNRVOVariable : EHScopeStack::Cleanup {
- DestroyNRVOVariable(Address addr, llvm::Value *NRVOFlag)
- : NRVOFlag(NRVOFlag), Loc(addr) {}
+ DestroyNRVOVariable(Address addr, QualType type, llvm::Value *NRVOFlag)
+ : NRVOFlag(NRVOFlag), Loc(addr), Ty(type) {}
llvm::Value *NRVOFlag;
Address Loc;
+ QualType Ty;
void Emit(CodeGenFunction &CGF, Flags flags) override {
// Along the exceptions path we always execute the dtor.
@@ -504,26 +512,24 @@ namespace {
struct DestroyNRVOVariableCXX final
: DestroyNRVOVariable<DestroyNRVOVariableCXX> {
- DestroyNRVOVariableCXX(Address addr, const CXXDestructorDecl *Dtor,
- llvm::Value *NRVOFlag)
- : DestroyNRVOVariable<DestroyNRVOVariableCXX>(addr, NRVOFlag),
- Dtor(Dtor) {}
+ DestroyNRVOVariableCXX(Address addr, QualType type,
+ const CXXDestructorDecl *Dtor, llvm::Value *NRVOFlag)
+ : DestroyNRVOVariable<DestroyNRVOVariableCXX>(addr, type, NRVOFlag),
+ Dtor(Dtor) {}
const CXXDestructorDecl *Dtor;
void emitDestructorCall(CodeGenFunction &CGF) {
CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
/*ForVirtualBase=*/false,
- /*Delegating=*/false, Loc);
+ /*Delegating=*/false, Loc, Ty);
}
};
struct DestroyNRVOVariableC final
: DestroyNRVOVariable<DestroyNRVOVariableC> {
DestroyNRVOVariableC(Address addr, llvm::Value *NRVOFlag, QualType Ty)
- : DestroyNRVOVariable<DestroyNRVOVariableC>(addr, NRVOFlag), Ty(Ty) {}
-
- QualType Ty;
+ : DestroyNRVOVariable<DestroyNRVOVariableC>(addr, Ty, NRVOFlag) {}
void emitDestructorCall(CodeGenFunction &CGF) {
CGF.destroyNonTrivialCStruct(CGF, Loc, Ty);
@@ -535,7 +541,7 @@ namespace {
CallStackRestore(Address Stack) : Stack(Stack) {}
void Emit(CodeGenFunction &CGF, Flags flags) override {
llvm::Value *V = CGF.Builder.CreateLoad(Stack);
- llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
+ llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
CGF.Builder.CreateCall(F, V);
}
};
@@ -915,9 +921,8 @@ static void emitStoresForInitAfterBZero(CodeGenModule &CGM,
// If necessary, get a pointer to the element and emit it.
if (!Elt->isNullValue() && !isa<llvm::UndefValue>(Elt))
emitStoresForInitAfterBZero(
- CGM, Elt,
- Builder.CreateConstInBoundsGEP2_32(Loc, 0, i, CGM.getDataLayout()),
- isVolatile, Builder);
+ CGM, Elt, Builder.CreateConstInBoundsGEP2_32(Loc, 0, i), isVolatile,
+ Builder);
}
return;
}
@@ -930,10 +935,9 @@ static void emitStoresForInitAfterBZero(CodeGenModule &CGM,
// If necessary, get a pointer to the element and emit it.
if (!Elt->isNullValue() && !isa<llvm::UndefValue>(Elt))
- emitStoresForInitAfterBZero(
- CGM, Elt,
- Builder.CreateConstInBoundsGEP2_32(Loc, 0, i, CGM.getDataLayout()),
- isVolatile, Builder);
+ emitStoresForInitAfterBZero(CGM, Elt,
+ Builder.CreateConstInBoundsGEP2_32(Loc, 0, i),
+ isVolatile, Builder);
}
}
@@ -962,103 +966,130 @@ static bool shouldUseBZeroPlusStoresToInitialize(llvm::Constant *Init,
/// FIXME We could be more clever, as we are for bzero above, and generate
/// memset followed by stores. It's unclear that's worth the effort.
static llvm::Value *shouldUseMemSetToInitialize(llvm::Constant *Init,
- uint64_t GlobalSize) {
+ uint64_t GlobalSize,
+ const llvm::DataLayout &DL) {
uint64_t SizeLimit = 32;
if (GlobalSize <= SizeLimit)
return nullptr;
- return llvm::isBytewiseValue(Init);
+ return llvm::isBytewiseValue(Init, DL);
}
-static llvm::Constant *patternFor(CodeGenModule &CGM, llvm::Type *Ty) {
- // The following value is a guaranteed unmappable pointer value and has a
- // repeated byte-pattern which makes it easier to synthesize. We use it for
- // pointers as well as integers so that aggregates are likely to be
- // initialized with this repeated value.
- constexpr uint64_t LargeValue = 0xAAAAAAAAAAAAAAAAull;
- // For 32-bit platforms it's a bit trickier because, across systems, only the
- // zero page can reasonably be expected to be unmapped, and even then we need
- // a very low address. We use a smaller value, and that value sadly doesn't
- // have a repeated byte-pattern. We don't use it for integers.
- constexpr uint32_t SmallValue = 0x000000AA;
- // Floating-point values are initialized as NaNs because they propagate. Using
- // a repeated byte pattern means that it will be easier to initialize
- // all-floating-point aggregates and arrays with memset. Further, aggregates
- // which mix integral and a few floats might also initialize with memset
- // followed by a handful of stores for the floats. Using fairly unique NaNs
- // also means they'll be easier to distinguish in a crash.
- constexpr bool NegativeNaN = true;
- constexpr uint64_t NaNPayload = 0xFFFFFFFFFFFFFFFFull;
- if (Ty->isIntOrIntVectorTy()) {
- unsigned BitWidth = cast<llvm::IntegerType>(
- Ty->isVectorTy() ? Ty->getVectorElementType() : Ty)
- ->getBitWidth();
- if (BitWidth <= 64)
- return llvm::ConstantInt::get(Ty, LargeValue);
- return llvm::ConstantInt::get(
- Ty, llvm::APInt::getSplat(BitWidth, llvm::APInt(64, LargeValue)));
- }
- if (Ty->isPtrOrPtrVectorTy()) {
- auto *PtrTy = cast<llvm::PointerType>(
- Ty->isVectorTy() ? Ty->getVectorElementType() : Ty);
- unsigned PtrWidth = CGM.getContext().getTargetInfo().getPointerWidth(
- PtrTy->getAddressSpace());
- llvm::Type *IntTy = llvm::IntegerType::get(CGM.getLLVMContext(), PtrWidth);
- uint64_t IntValue;
- switch (PtrWidth) {
- default:
- llvm_unreachable("pattern initialization of unsupported pointer width");
- case 64:
- IntValue = LargeValue;
- break;
- case 32:
- IntValue = SmallValue;
- break;
+/// Decide whether we want to split a constant structure or array store into a
+/// sequence of its fields' stores. This may cost us code size and compilation
+/// speed, but plays better with store optimizations.
+static bool shouldSplitConstantStore(CodeGenModule &CGM,
+ uint64_t GlobalByteSize) {
+ // Don't break things that occupy more than one cacheline.
+ uint64_t ByteSizeLimit = 64;
+ if (CGM.getCodeGenOpts().OptimizationLevel == 0)
+ return false;
+ if (GlobalByteSize <= ByteSizeLimit)
+ return true;
+ return false;
+}
+
+enum class IsPattern { No, Yes };
+
+/// Generate a constant filled with either a pattern or zeroes.
+static llvm::Constant *patternOrZeroFor(CodeGenModule &CGM, IsPattern isPattern,
+ llvm::Type *Ty) {
+ if (isPattern == IsPattern::Yes)
+ return initializationPatternFor(CGM, Ty);
+ else
+ return llvm::Constant::getNullValue(Ty);
+}
+
+static llvm::Constant *constWithPadding(CodeGenModule &CGM, IsPattern isPattern,
+ llvm::Constant *constant);
+
+/// Helper function for constWithPadding() to deal with padding in structures.
+static llvm::Constant *constStructWithPadding(CodeGenModule &CGM,
+ IsPattern isPattern,
+ llvm::StructType *STy,
+ llvm::Constant *constant) {
+ const llvm::DataLayout &DL = CGM.getDataLayout();
+ const llvm::StructLayout *Layout = DL.getStructLayout(STy);
+ llvm::Type *Int8Ty = llvm::IntegerType::getInt8Ty(CGM.getLLVMContext());
+ unsigned SizeSoFar = 0;
+ SmallVector<llvm::Constant *, 8> Values;
+ bool NestedIntact = true;
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; i++) {
+ unsigned CurOff = Layout->getElementOffset(i);
+ if (SizeSoFar < CurOff) {
+ assert(!STy->isPacked());
+ auto *PadTy = llvm::ArrayType::get(Int8Ty, CurOff - SizeSoFar);
+ Values.push_back(patternOrZeroFor(CGM, isPattern, PadTy));
}
- auto *Int = llvm::ConstantInt::get(IntTy, IntValue);
- return llvm::ConstantExpr::getIntToPtr(Int, PtrTy);
- }
- if (Ty->isFPOrFPVectorTy()) {
- unsigned BitWidth = llvm::APFloat::semanticsSizeInBits(
- (Ty->isVectorTy() ? Ty->getVectorElementType() : Ty)
- ->getFltSemantics());
- llvm::APInt Payload(64, NaNPayload);
- if (BitWidth >= 64)
- Payload = llvm::APInt::getSplat(BitWidth, Payload);
- return llvm::ConstantFP::getQNaN(Ty, NegativeNaN, &Payload);
- }
- if (Ty->isArrayTy()) {
- // Note: this doesn't touch tail padding (at the end of an object, before
- // the next array object). It is instead handled by replaceUndef.
- auto *ArrTy = cast<llvm::ArrayType>(Ty);
- llvm::SmallVector<llvm::Constant *, 8> Element(
- ArrTy->getNumElements(), patternFor(CGM, ArrTy->getElementType()));
- return llvm::ConstantArray::get(ArrTy, Element);
- }
-
- // Note: this doesn't touch struct padding. It will initialize as much union
- // padding as is required for the largest type in the union. Padding is
- // instead handled by replaceUndef. Stores to structs with volatile members
- // don't have a volatile qualifier when initialized according to C++. This is
- // fine because stack-based volatiles don't really have volatile semantics
- // anyways, and the initialization shouldn't be observable.
- auto *StructTy = cast<llvm::StructType>(Ty);
- llvm::SmallVector<llvm::Constant *, 8> Struct(StructTy->getNumElements());
- for (unsigned El = 0; El != Struct.size(); ++El)
- Struct[El] = patternFor(CGM, StructTy->getElementType(El));
- return llvm::ConstantStruct::get(StructTy, Struct);
+ llvm::Constant *CurOp;
+ if (constant->isZeroValue())
+ CurOp = llvm::Constant::getNullValue(STy->getElementType(i));
+ else
+ CurOp = cast<llvm::Constant>(constant->getAggregateElement(i));
+ auto *NewOp = constWithPadding(CGM, isPattern, CurOp);
+ if (CurOp != NewOp)
+ NestedIntact = false;
+ Values.push_back(NewOp);
+ SizeSoFar = CurOff + DL.getTypeAllocSize(CurOp->getType());
+ }
+ unsigned TotalSize = Layout->getSizeInBytes();
+ if (SizeSoFar < TotalSize) {
+ auto *PadTy = llvm::ArrayType::get(Int8Ty, TotalSize - SizeSoFar);
+ Values.push_back(patternOrZeroFor(CGM, isPattern, PadTy));
+ }
+ if (NestedIntact && Values.size() == STy->getNumElements())
+ return constant;
+ return llvm::ConstantStruct::getAnon(Values, STy->isPacked());
}
-static Address createUnnamedGlobalFrom(CodeGenModule &CGM, const VarDecl &D,
- CGBuilderTy &Builder,
- llvm::Constant *Constant,
- CharUnits Align) {
+/// Replace all padding bytes in a given constant with either a pattern byte or
+/// 0x00.
+static llvm::Constant *constWithPadding(CodeGenModule &CGM, IsPattern isPattern,
+ llvm::Constant *constant) {
+ llvm::Type *OrigTy = constant->getType();
+ if (const auto STy = dyn_cast<llvm::StructType>(OrigTy))
+ return constStructWithPadding(CGM, isPattern, STy, constant);
+ if (auto *STy = dyn_cast<llvm::SequentialType>(OrigTy)) {
+ llvm::SmallVector<llvm::Constant *, 8> Values;
+ unsigned Size = STy->getNumElements();
+ if (!Size)
+ return constant;
+ llvm::Type *ElemTy = STy->getElementType();
+ bool ZeroInitializer = constant->isZeroValue();
+ llvm::Constant *OpValue, *PaddedOp;
+ if (ZeroInitializer) {
+ OpValue = llvm::Constant::getNullValue(ElemTy);
+ PaddedOp = constWithPadding(CGM, isPattern, OpValue);
+ }
+ for (unsigned Op = 0; Op != Size; ++Op) {
+ if (!ZeroInitializer) {
+ OpValue = constant->getAggregateElement(Op);
+ PaddedOp = constWithPadding(CGM, isPattern, OpValue);
+ }
+ Values.push_back(PaddedOp);
+ }
+ auto *NewElemTy = Values[0]->getType();
+ if (NewElemTy == ElemTy)
+ return constant;
+ if (OrigTy->isArrayTy()) {
+ auto *ArrayTy = llvm::ArrayType::get(NewElemTy, Size);
+ return llvm::ConstantArray::get(ArrayTy, Values);
+ } else {
+ return llvm::ConstantVector::get(Values);
+ }
+ }
+ return constant;
+}
+
+Address CodeGenModule::createUnnamedGlobalFrom(const VarDecl &D,
+ llvm::Constant *Constant,
+ CharUnits Align) {
auto FunctionName = [&](const DeclContext *DC) -> std::string {
if (const auto *FD = dyn_cast<FunctionDecl>(DC)) {
if (const auto *CC = dyn_cast<CXXConstructorDecl>(FD))
return CC->getNameAsString();
if (const auto *CD = dyn_cast<CXXDestructorDecl>(FD))
return CD->getNameAsString();
- return CGM.getMangledName(FD);
+ return getMangledName(FD);
} else if (const auto *OM = dyn_cast<ObjCMethodDecl>(DC)) {
return OM->getNameAsString();
} else if (isa<BlockDecl>(DC)) {
@@ -1066,26 +1097,47 @@ static Address createUnnamedGlobalFrom(CodeGenModule &CGM, const VarDecl &D,
} else if (isa<CapturedDecl>(DC)) {
return "<captured>";
} else {
- llvm::llvm_unreachable_internal("expected a function or method");
+ llvm_unreachable("expected a function or method");
}
};
- auto *Ty = Constant->getType();
- bool isConstant = true;
- llvm::GlobalVariable *InsertBefore = nullptr;
- unsigned AS = CGM.getContext().getTargetAddressSpace(
- CGM.getStringLiteralAddressSpace());
- llvm::GlobalVariable *GV = new llvm::GlobalVariable(
- CGM.getModule(), Ty, isConstant, llvm::GlobalValue::PrivateLinkage,
- Constant,
- "__const." + FunctionName(D.getParentFunctionOrMethod()) + "." +
- D.getName(),
- InsertBefore, llvm::GlobalValue::NotThreadLocal, AS);
- GV->setAlignment(Align.getQuantity());
- GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
-
- Address SrcPtr = Address(GV, Align);
- llvm::Type *BP = llvm::PointerType::getInt8PtrTy(CGM.getLLVMContext(), AS);
+ // Form a simple per-variable cache of these values in case we find we
+ // want to reuse them.
+ llvm::GlobalVariable *&CacheEntry = InitializerConstants[&D];
+ if (!CacheEntry || CacheEntry->getInitializer() != Constant) {
+ auto *Ty = Constant->getType();
+ bool isConstant = true;
+ llvm::GlobalVariable *InsertBefore = nullptr;
+ unsigned AS =
+ getContext().getTargetAddressSpace(getStringLiteralAddressSpace());
+ std::string Name;
+ if (D.hasGlobalStorage())
+ Name = getMangledName(&D).str() + ".const";
+ else if (const DeclContext *DC = D.getParentFunctionOrMethod())
+ Name = ("__const." + FunctionName(DC) + "." + D.getName()).str();
+ else
+ llvm_unreachable("local variable has no parent function or method");
+ llvm::GlobalVariable *GV = new llvm::GlobalVariable(
+ getModule(), Ty, isConstant, llvm::GlobalValue::PrivateLinkage,
+ Constant, Name, InsertBefore, llvm::GlobalValue::NotThreadLocal, AS);
+ GV->setAlignment(Align.getQuantity());
+ GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
+ CacheEntry = GV;
+ } else if (CacheEntry->getAlignment() < Align.getQuantity()) {
+ CacheEntry->setAlignment(Align.getQuantity());
+ }
+
+ return Address(CacheEntry, Align);
+}
+
+static Address createUnnamedGlobalForMemcpyFrom(CodeGenModule &CGM,
+ const VarDecl &D,
+ CGBuilderTy &Builder,
+ llvm::Constant *Constant,
+ CharUnits Align) {
+ Address SrcPtr = CGM.createUnnamedGlobalFrom(D, Constant, Align);
+ llvm::Type *BP = llvm::PointerType::getInt8PtrTy(CGM.getLLVMContext(),
+ SrcPtr.getAddressSpace());
if (SrcPtr.getType() != BP)
SrcPtr = Builder.CreateBitCast(SrcPtr, BP);
return SrcPtr;
@@ -1096,22 +1148,23 @@ static void emitStoresForConstant(CodeGenModule &CGM, const VarDecl &D,
CGBuilderTy &Builder,
llvm::Constant *constant) {
auto *Ty = constant->getType();
- bool isScalar = Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy() ||
- Ty->isFPOrFPVectorTy();
- if (isScalar) {
+ uint64_t ConstantSize = CGM.getDataLayout().getTypeAllocSize(Ty);
+ if (!ConstantSize)
+ return;
+
+ bool canDoSingleStore = Ty->isIntOrIntVectorTy() ||
+ Ty->isPtrOrPtrVectorTy() || Ty->isFPOrFPVectorTy();
+ if (canDoSingleStore) {
Builder.CreateStore(constant, Loc, isVolatile);
return;
}
- auto *Int8Ty = llvm::IntegerType::getInt8Ty(CGM.getLLVMContext());
- auto *IntPtrTy = CGM.getDataLayout().getIntPtrType(CGM.getLLVMContext());
+ auto *SizeVal = llvm::ConstantInt::get(CGM.IntPtrTy, ConstantSize);
// If the initializer is all or mostly the same, codegen with bzero / memset
// then do a few stores afterward.
- uint64_t ConstantSize = CGM.getDataLayout().getTypeAllocSize(Ty);
- auto *SizeVal = llvm::ConstantInt::get(IntPtrTy, ConstantSize);
if (shouldUseBZeroPlusStoresToInitialize(constant, ConstantSize)) {
- Builder.CreateMemSet(Loc, llvm::ConstantInt::get(Int8Ty, 0), SizeVal,
+ Builder.CreateMemSet(Loc, llvm::ConstantInt::get(CGM.Int8Ty, 0), SizeVal,
isVolatile);
bool valueAlreadyCorrect =
@@ -1123,7 +1176,9 @@ static void emitStoresForConstant(CodeGenModule &CGM, const VarDecl &D,
return;
}
- llvm::Value *Pattern = shouldUseMemSetToInitialize(constant, ConstantSize);
+ // If the initializer is a repeated byte pattern, use memset.
+ llvm::Value *Pattern =
+ shouldUseMemSetToInitialize(constant, ConstantSize, CGM.getDataLayout());
if (Pattern) {
uint64_t Value = 0x00;
if (!isa<llvm::UndefValue>(Pattern)) {
@@ -1131,22 +1186,51 @@ static void emitStoresForConstant(CodeGenModule &CGM, const VarDecl &D,
assert(AP.getBitWidth() <= 8);
Value = AP.getLimitedValue();
}
- Builder.CreateMemSet(Loc, llvm::ConstantInt::get(Int8Ty, Value), SizeVal,
+ Builder.CreateMemSet(Loc, llvm::ConstantInt::get(CGM.Int8Ty, Value), SizeVal,
isVolatile);
return;
}
- Builder.CreateMemCpy(
- Loc,
- createUnnamedGlobalFrom(CGM, D, Builder, constant, Loc.getAlignment()),
- SizeVal, isVolatile);
+ // If the initializer is small, use a handful of stores.
+ if (shouldSplitConstantStore(CGM, ConstantSize)) {
+ if (auto *STy = dyn_cast<llvm::StructType>(Ty)) {
+ // FIXME: handle the case when STy != Loc.getElementType().
+ if (STy == Loc.getElementType()) {
+ for (unsigned i = 0; i != constant->getNumOperands(); i++) {
+ Address EltPtr = Builder.CreateStructGEP(Loc, i);
+ emitStoresForConstant(
+ CGM, D, EltPtr, isVolatile, Builder,
+ cast<llvm::Constant>(Builder.CreateExtractValue(constant, i)));
+ }
+ return;
+ }
+ } else if (auto *ATy = dyn_cast<llvm::ArrayType>(Ty)) {
+ // FIXME: handle the case when ATy != Loc.getElementType().
+ if (ATy == Loc.getElementType()) {
+ for (unsigned i = 0; i != ATy->getNumElements(); i++) {
+ Address EltPtr = Builder.CreateConstArrayGEP(Loc, i);
+ emitStoresForConstant(
+ CGM, D, EltPtr, isVolatile, Builder,
+ cast<llvm::Constant>(Builder.CreateExtractValue(constant, i)));
+ }
+ return;
+ }
+ }
+ }
+
+ // Copy from a global.
+ Builder.CreateMemCpy(Loc,
+ createUnnamedGlobalForMemcpyFrom(
+ CGM, D, Builder, constant, Loc.getAlignment()),
+ SizeVal, isVolatile);
}
static void emitStoresForZeroInit(CodeGenModule &CGM, const VarDecl &D,
Address Loc, bool isVolatile,
CGBuilderTy &Builder) {
llvm::Type *ElTy = Loc.getElementType();
- llvm::Constant *constant = llvm::Constant::getNullValue(ElTy);
+ llvm::Constant *constant =
+ constWithPadding(CGM, IsPattern::No, llvm::Constant::getNullValue(ElTy));
emitStoresForConstant(CGM, D, Loc, isVolatile, Builder, constant);
}
@@ -1154,7 +1238,8 @@ static void emitStoresForPatternInit(CodeGenModule &CGM, const VarDecl &D,
Address Loc, bool isVolatile,
CGBuilderTy &Builder) {
llvm::Type *ElTy = Loc.getElementType();
- llvm::Constant *constant = patternFor(CGM, ElTy);
+ llvm::Constant *constant = constWithPadding(
+ CGM, IsPattern::Yes, initializationPatternFor(CGM, ElTy));
assert(!isa<llvm::UndefValue>(constant));
emitStoresForConstant(CGM, D, Loc, isVolatile, Builder, constant);
}
@@ -1170,13 +1255,11 @@ static bool containsUndef(llvm::Constant *constant) {
return false;
}
-static llvm::Constant *replaceUndef(llvm::Constant *constant) {
- // FIXME: when doing pattern initialization, replace undef with 0xAA instead.
- // FIXME: also replace padding between values by creating a new struct type
- // which has no padding.
+static llvm::Constant *replaceUndef(CodeGenModule &CGM, IsPattern isPattern,
+ llvm::Constant *constant) {
auto *Ty = constant->getType();
if (isa<llvm::UndefValue>(constant))
- return llvm::Constant::getNullValue(Ty);
+ return patternOrZeroFor(CGM, isPattern, Ty);
if (!(Ty->isStructTy() || Ty->isArrayTy() || Ty->isVectorTy()))
return constant;
if (!containsUndef(constant))
@@ -1184,7 +1267,7 @@ static llvm::Constant *replaceUndef(llvm::Constant *constant) {
llvm::SmallVector<llvm::Constant *, 8> Values(constant->getNumOperands());
for (unsigned Op = 0, NumOp = constant->getNumOperands(); Op != NumOp; ++Op) {
auto *OpValue = cast<llvm::Constant>(constant->getOperand(Op));
- Values[Op] = replaceUndef(OpValue);
+ Values[Op] = replaceUndef(CGM, isPattern, OpValue);
}
if (Ty->isStructTy())
return llvm::ConstantStruct::get(cast<llvm::StructType>(Ty), Values);
@@ -1318,10 +1401,15 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
Address address = Address::invalid();
Address AllocaAddr = Address::invalid();
- if (Ty->isConstantSizeType()) {
- bool NRVO = getLangOpts().ElideConstructors &&
- D.isNRVOVariable();
-
+ Address OpenMPLocalAddr =
+ getLangOpts().OpenMP
+ ? CGM.getOpenMPRuntime().getAddressOfLocalVariable(*this, &D)
+ : Address::invalid();
+ bool NRVO = getLangOpts().ElideConstructors && D.isNRVOVariable();
+
+ if (getLangOpts().OpenMP && OpenMPLocalAddr.isValid()) {
+ address = OpenMPLocalAddr;
+ } else if (Ty->isConstantSizeType()) {
// If this value is an array or struct with a statically determinable
// constant initializer, there are optimizations we can do.
//
@@ -1361,14 +1449,7 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
// unless:
// - it's an NRVO variable.
// - we are compiling OpenMP and it's an OpenMP local variable.
-
- Address OpenMPLocalAddr =
- getLangOpts().OpenMP
- ? CGM.getOpenMPRuntime().getAddressOfLocalVariable(*this, &D)
- : Address::invalid();
- if (getLangOpts().OpenMP && OpenMPLocalAddr.isValid()) {
- address = OpenMPLocalAddr;
- } else if (NRVO) {
+ if (NRVO) {
// The named return value optimization: allocate this variable in the
// return slot, so that we can elide the copy when returning this
// variable (C++0x [class.copy]p34).
@@ -1451,7 +1532,7 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
Address Stack =
CreateTempAlloca(Int8PtrTy, getPointerAlign(), "saved_stack");
- llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stacksave);
+ llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::stacksave);
llvm::Value *V = Builder.CreateCall(F);
Builder.CreateStore(V, Stack);
@@ -1481,11 +1562,19 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
// Emit debug info for local var declaration.
if (EmitDebugInfo && HaveInsertPoint()) {
+ Address DebugAddr = address;
+ bool UsePointerValue = NRVO && ReturnValuePointer.isValid();
DI->setLocation(D.getLocation());
- (void)DI->EmitDeclareOfAutoVariable(&D, address.getPointer(), Builder);
+
+ // If NRVO, use a pointer to the return address.
+ if (UsePointerValue)
+ DebugAddr = ReturnValuePointer;
+
+ (void)DI->EmitDeclareOfAutoVariable(&D, DebugAddr.getPointer(), Builder,
+ UsePointerValue);
}
- if (D.hasAttr<AnnotateAttr>())
+ if (D.hasAttr<AnnotateAttr>() && HaveInsertPoint())
EmitVarAnnotations(&D, address.getPointer());
// Make sure we call @llvm.lifetime.end.
@@ -1575,6 +1664,87 @@ bool CodeGenFunction::isTrivialInitializer(const Expr *Init) {
return false;
}
+void CodeGenFunction::emitZeroOrPatternForAutoVarInit(QualType type,
+ const VarDecl &D,
+ Address Loc) {
+ auto trivialAutoVarInit = getContext().getLangOpts().getTrivialAutoVarInit();
+ CharUnits Size = getContext().getTypeSizeInChars(type);
+ bool isVolatile = type.isVolatileQualified();
+ if (!Size.isZero()) {
+ switch (trivialAutoVarInit) {
+ case LangOptions::TrivialAutoVarInitKind::Uninitialized:
+ llvm_unreachable("Uninitialized handled by caller");
+ case LangOptions::TrivialAutoVarInitKind::Zero:
+ emitStoresForZeroInit(CGM, D, Loc, isVolatile, Builder);
+ break;
+ case LangOptions::TrivialAutoVarInitKind::Pattern:
+ emitStoresForPatternInit(CGM, D, Loc, isVolatile, Builder);
+ break;
+ }
+ return;
+ }
+
+ // VLAs look zero-sized to getTypeInfo. We can't emit constant stores to
+ // them, so emit a memcpy with the VLA size to initialize each element.
+ // Technically zero-sized or negative-sized VLAs are undefined, and UBSan
+ // will catch that code, but there exists code which generates zero-sized
+ // VLAs. Be nice and initialize whatever they requested.
+ const auto *VlaType = getContext().getAsVariableArrayType(type);
+ if (!VlaType)
+ return;
+ auto VlaSize = getVLASize(VlaType);
+ auto SizeVal = VlaSize.NumElts;
+ CharUnits EltSize = getContext().getTypeSizeInChars(VlaSize.Type);
+ switch (trivialAutoVarInit) {
+ case LangOptions::TrivialAutoVarInitKind::Uninitialized:
+ llvm_unreachable("Uninitialized handled by caller");
+
+ case LangOptions::TrivialAutoVarInitKind::Zero:
+ if (!EltSize.isOne())
+ SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(EltSize));
+ Builder.CreateMemSet(Loc, llvm::ConstantInt::get(Int8Ty, 0), SizeVal,
+ isVolatile);
+ break;
+
+ case LangOptions::TrivialAutoVarInitKind::Pattern: {
+ llvm::Type *ElTy = Loc.getElementType();
+ llvm::Constant *Constant = constWithPadding(
+ CGM, IsPattern::Yes, initializationPatternFor(CGM, ElTy));
+ CharUnits ConstantAlign = getContext().getTypeAlignInChars(VlaSize.Type);
+ llvm::BasicBlock *SetupBB = createBasicBlock("vla-setup.loop");
+ llvm::BasicBlock *LoopBB = createBasicBlock("vla-init.loop");
+ llvm::BasicBlock *ContBB = createBasicBlock("vla-init.cont");
+ llvm::Value *IsZeroSizedVLA = Builder.CreateICmpEQ(
+ SizeVal, llvm::ConstantInt::get(SizeVal->getType(), 0),
+ "vla.iszerosized");
+ Builder.CreateCondBr(IsZeroSizedVLA, ContBB, SetupBB);
+ EmitBlock(SetupBB);
+ if (!EltSize.isOne())
+ SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(EltSize));
+ llvm::Value *BaseSizeInChars =
+ llvm::ConstantInt::get(IntPtrTy, EltSize.getQuantity());
+ Address Begin = Builder.CreateElementBitCast(Loc, Int8Ty, "vla.begin");
+ llvm::Value *End =
+ Builder.CreateInBoundsGEP(Begin.getPointer(), SizeVal, "vla.end");
+ llvm::BasicBlock *OriginBB = Builder.GetInsertBlock();
+ EmitBlock(LoopBB);
+ llvm::PHINode *Cur = Builder.CreatePHI(Begin.getType(), 2, "vla.cur");
+ Cur->addIncoming(Begin.getPointer(), OriginBB);
+ CharUnits CurAlign = Loc.getAlignment().alignmentOfArrayElement(EltSize);
+ Builder.CreateMemCpy(Address(Cur, CurAlign),
+ createUnnamedGlobalForMemcpyFrom(
+ CGM, D, Builder, Constant, ConstantAlign),
+ BaseSizeInChars, isVolatile);
+ llvm::Value *Next =
+ Builder.CreateInBoundsGEP(Int8Ty, Cur, BaseSizeInChars, "vla.next");
+ llvm::Value *Done = Builder.CreateICmpEQ(Next, End, "vla-init.isdone");
+ Builder.CreateCondBr(Done, ContBB, LoopBB);
+ Cur->addIncoming(Next, LoopBB);
+ EmitBlock(ContBB);
+ } break;
+ }
+}
+
void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
assert(emission.Variable && "emission was not valid!");
@@ -1585,8 +1755,6 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
auto DL = ApplyDebugLocation::CreateDefaultArtificial(*this, D.getLocation());
QualType type = D.getType();
- bool isVolatile = type.isVolatileQualified();
-
// If this local has an initializer, emit it now.
const Expr *Init = D.getInit();
@@ -1620,8 +1788,9 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
bool capturedByInit =
Init && emission.IsEscapingByRef && isCapturedBy(D, Init);
- Address Loc =
- capturedByInit ? emission.Addr : emission.getObjectAddress(*this);
+ bool locIsByrefHeader = !capturedByInit;
+ const Address Loc =
+ locIsByrefHeader ? emission.getObjectAddress(*this) : emission.Addr;
// Note: constexpr already initializes everything correctly.
LangOptions::TrivialAutoVarInitKind trivialAutoVarInit =
@@ -1631,103 +1800,46 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
? LangOptions::TrivialAutoVarInitKind::Uninitialized
: getContext().getLangOpts().getTrivialAutoVarInit()));
- auto initializeWhatIsTechnicallyUninitialized = [&]() {
+ auto initializeWhatIsTechnicallyUninitialized = [&](Address Loc) {
if (trivialAutoVarInit ==
LangOptions::TrivialAutoVarInitKind::Uninitialized)
return;
- CharUnits Size = getContext().getTypeSizeInChars(type);
- if (!Size.isZero()) {
- switch (trivialAutoVarInit) {
- case LangOptions::TrivialAutoVarInitKind::Uninitialized:
- llvm_unreachable("Uninitialized handled above");
- case LangOptions::TrivialAutoVarInitKind::Zero:
- emitStoresForZeroInit(CGM, D, Loc, isVolatile, Builder);
- break;
- case LangOptions::TrivialAutoVarInitKind::Pattern:
- emitStoresForPatternInit(CGM, D, Loc, isVolatile, Builder);
- break;
- }
- return;
- }
-
- // VLAs look zero-sized to getTypeInfo. We can't emit constant stores to
- // them, so emit a memcpy with the VLA size to initialize each element.
- // Technically zero-sized or negative-sized VLAs are undefined, and UBSan
- // will catch that code, but there exists code which generates zero-sized
- // VLAs. Be nice and initialize whatever they requested.
- const VariableArrayType *VlaType =
- dyn_cast_or_null<VariableArrayType>(getContext().getAsArrayType(type));
- if (!VlaType)
- return;
- auto VlaSize = getVLASize(VlaType);
- auto SizeVal = VlaSize.NumElts;
- CharUnits EltSize = getContext().getTypeSizeInChars(VlaSize.Type);
- switch (trivialAutoVarInit) {
- case LangOptions::TrivialAutoVarInitKind::Uninitialized:
- llvm_unreachable("Uninitialized handled above");
-
- case LangOptions::TrivialAutoVarInitKind::Zero:
- if (!EltSize.isOne())
- SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(EltSize));
- Builder.CreateMemSet(Loc, llvm::ConstantInt::get(Int8Ty, 0), SizeVal,
- isVolatile);
- break;
+ // Only initialize a __block's storage: we always initialize the header.
+ if (emission.IsEscapingByRef && !locIsByrefHeader)
+ Loc = emitBlockByrefAddress(Loc, &D, /*follow=*/false);
- case LangOptions::TrivialAutoVarInitKind::Pattern: {
- llvm::Type *ElTy = Loc.getElementType();
- llvm::Constant *Constant = patternFor(CGM, ElTy);
- CharUnits ConstantAlign = getContext().getTypeAlignInChars(VlaSize.Type);
- llvm::BasicBlock *SetupBB = createBasicBlock("vla-setup.loop");
- llvm::BasicBlock *LoopBB = createBasicBlock("vla-init.loop");
- llvm::BasicBlock *ContBB = createBasicBlock("vla-init.cont");
- llvm::Value *IsZeroSizedVLA = Builder.CreateICmpEQ(
- SizeVal, llvm::ConstantInt::get(SizeVal->getType(), 0),
- "vla.iszerosized");
- Builder.CreateCondBr(IsZeroSizedVLA, ContBB, SetupBB);
- EmitBlock(SetupBB);
- if (!EltSize.isOne())
- SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(EltSize));
- llvm::Value *BaseSizeInChars =
- llvm::ConstantInt::get(IntPtrTy, EltSize.getQuantity());
- Address Begin = Builder.CreateElementBitCast(Loc, Int8Ty, "vla.begin");
- llvm::Value *End =
- Builder.CreateInBoundsGEP(Begin.getPointer(), SizeVal, "vla.end");
- llvm::BasicBlock *OriginBB = Builder.GetInsertBlock();
- EmitBlock(LoopBB);
- llvm::PHINode *Cur = Builder.CreatePHI(Begin.getType(), 2, "vla.cur");
- Cur->addIncoming(Begin.getPointer(), OriginBB);
- CharUnits CurAlign = Loc.getAlignment().alignmentOfArrayElement(EltSize);
- Builder.CreateMemCpy(
- Address(Cur, CurAlign),
- createUnnamedGlobalFrom(CGM, D, Builder, Constant, ConstantAlign),
- BaseSizeInChars, isVolatile);
- llvm::Value *Next =
- Builder.CreateInBoundsGEP(Int8Ty, Cur, BaseSizeInChars, "vla.next");
- llvm::Value *Done = Builder.CreateICmpEQ(Next, End, "vla-init.isdone");
- Builder.CreateCondBr(Done, ContBB, LoopBB);
- Cur->addIncoming(Next, LoopBB);
- EmitBlock(ContBB);
- } break;
- }
+ return emitZeroOrPatternForAutoVarInit(type, D, Loc);
};
- if (isTrivialInitializer(Init)) {
- initializeWhatIsTechnicallyUninitialized();
- return;
- }
+ if (isTrivialInitializer(Init))
+ return initializeWhatIsTechnicallyUninitialized(Loc);
llvm::Constant *constant = nullptr;
- if (emission.IsConstantAggregate || D.isConstexpr()) {
+ if (emission.IsConstantAggregate ||
+ D.mightBeUsableInConstantExpressions(getContext())) {
assert(!capturedByInit && "constant init contains a capturing block?");
constant = ConstantEmitter(*this).tryEmitAbstractForInitializer(D);
- if (constant && trivialAutoVarInit !=
- LangOptions::TrivialAutoVarInitKind::Uninitialized)
- constant = replaceUndef(constant);
+ if (constant && !constant->isZeroValue() &&
+ (trivialAutoVarInit !=
+ LangOptions::TrivialAutoVarInitKind::Uninitialized)) {
+ IsPattern isPattern =
+ (trivialAutoVarInit == LangOptions::TrivialAutoVarInitKind::Pattern)
+ ? IsPattern::Yes
+ : IsPattern::No;
+ // C guarantees that brace-init with fewer initializers than members in
+ // the aggregate will initialize the rest of the aggregate as-if it were
+ // static initialization. In turn static initialization guarantees that
+ // padding is initialized to zero bits. We could instead pattern-init if D
+ // has any ImplicitValueInitExpr, but that seems to be unintuitive
+ // behavior.
+ constant = constWithPadding(CGM, IsPattern::No,
+ replaceUndef(CGM, isPattern, constant));
+ }
}
if (!constant) {
- initializeWhatIsTechnicallyUninitialized();
+ initializeWhatIsTechnicallyUninitialized(Loc);
LValue lv = MakeAddrLValue(Loc, type);
lv.setNonGC(true);
return EmitExprAsInit(Init, &D, lv, capturedByInit);
@@ -1741,10 +1853,9 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
}
llvm::Type *BP = CGM.Int8Ty->getPointerTo(Loc.getAddressSpace());
- if (Loc.getType() != BP)
- Loc = Builder.CreateBitCast(Loc, BP);
-
- emitStoresForConstant(CGM, D, Loc, isVolatile, Builder, constant);
+ emitStoresForConstant(
+ CGM, D, (Loc.getType() == BP) ? Loc : Builder.CreateBitCast(Loc, BP),
+ type.isVolatileQualified(), Builder, constant);
}
/// Emit an expression as an initializer for an object (variable, field, etc.)
@@ -1789,7 +1900,7 @@ void CodeGenFunction::EmitExprAsInit(const Expr *init, const ValueDecl *D,
if (isa<VarDecl>(D))
Overlap = AggValueSlot::DoesNotOverlap;
else if (auto *FD = dyn_cast<FieldDecl>(D))
- Overlap = overlapForFieldInit(FD);
+ Overlap = getOverlapForFieldInit(FD);
// TODO: how can we delay here if D is captured by its initializer?
EmitAggExpr(init, AggValueSlot::forLValue(lvalue,
AggValueSlot::IsDestructed,
@@ -1828,7 +1939,7 @@ void CodeGenFunction::emitAutoVarTypeCleanup(
if (emission.NRVOFlag) {
assert(!type->isArrayType());
CXXDestructorDecl *dtor = type->getAsCXXRecordDecl()->getDestructor();
- EHStack.pushCleanup<DestroyNRVOVariableCXX>(cleanupKind, addr, dtor,
+ EHStack.pushCleanup<DestroyNRVOVariableCXX>(cleanupKind, addr, type, dtor,
emission.NRVOFlag);
return;
}
@@ -2199,7 +2310,7 @@ void CodeGenFunction::pushRegularPartialArrayCleanup(llvm::Value *arrayBegin,
}
/// Lazily declare the @llvm.lifetime.start intrinsic.
-llvm::Constant *CodeGenModule::getLLVMLifetimeStartFn() {
+llvm::Function *CodeGenModule::getLLVMLifetimeStartFn() {
if (LifetimeStartFn)
return LifetimeStartFn;
LifetimeStartFn = llvm::Intrinsic::getDeclaration(&getModule(),
@@ -2208,7 +2319,7 @@ llvm::Constant *CodeGenModule::getLLVMLifetimeStartFn() {
}
/// Lazily declare the @llvm.lifetime.end intrinsic.
-llvm::Constant *CodeGenModule::getLLVMLifetimeEndFn() {
+llvm::Function *CodeGenModule::getLLVMLifetimeEndFn() {
if (LifetimeEndFn)
return LifetimeEndFn;
LifetimeEndFn = llvm::Intrinsic::getDeclaration(&getModule(),
@@ -2417,6 +2528,13 @@ void CodeGenModule::EmitOMPDeclareReduction(const OMPDeclareReductionDecl *D,
getOpenMPRuntime().emitUserDefinedReduction(CGF, D);
}
+void CodeGenModule::EmitOMPDeclareMapper(const OMPDeclareMapperDecl *D,
+ CodeGenFunction *CGF) {
+ if (!LangOpts.OpenMP || (!LangOpts.EmitAllDecls && !D->isUsed()))
+ return;
+ // FIXME: need to implement mapper code generation
+}
+
void CodeGenModule::EmitOMPRequiresDecl(const OMPRequiresDecl *D) {
- getOpenMPRuntime().checkArchForUnifiedAddressing(*this, D);
+ getOpenMPRuntime().checkArchForUnifiedAddressing(D);
}
diff --git a/lib/CodeGen/CGDeclCXX.cpp b/lib/CodeGen/CGDeclCXX.cpp
index 9aa31f181e99..7a0605b8450a 100644
--- a/lib/CodeGen/CGDeclCXX.cpp
+++ b/lib/CodeGen/CGDeclCXX.cpp
@@ -1,9 +1,8 @@
//===--- CGDeclCXX.cpp - Emit LLVM Code for C++ declarations --------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -15,6 +14,7 @@
#include "CGCXXABI.h"
#include "CGObjCRuntime.h"
#include "CGOpenMPRuntime.h"
+#include "TargetInfo.h"
#include "clang/Basic/CodeGenOptions.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/IR/Intrinsics.h"
@@ -75,7 +75,7 @@ static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D,
// bails even if the attribute is not present.
if (D.isNoDestroy(CGF.getContext()))
return;
-
+
CodeGenModule &CGM = CGF.CGM;
// FIXME: __attribute__((cleanup)) ?
@@ -98,7 +98,7 @@ static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D,
return;
}
- llvm::Constant *Func;
+ llvm::FunctionCallee Func;
llvm::Constant *Argument;
// Special-case non-array C++ destructors, if they have the right signature.
@@ -118,10 +118,23 @@ static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D,
assert(!Record->hasTrivialDestructor());
CXXDestructorDecl *Dtor = Record->getDestructor();
- Func = CGM.getAddrOfCXXStructor(Dtor, StructorType::Complete);
- Argument = llvm::ConstantExpr::getBitCast(
- Addr.getPointer(), CGF.getTypes().ConvertType(Type)->getPointerTo());
-
+ Func = CGM.getAddrAndTypeOfCXXStructor(GlobalDecl(Dtor, Dtor_Complete));
+ if (CGF.getContext().getLangOpts().OpenCL) {
+ auto DestAS =
+ CGM.getTargetCodeGenInfo().getAddrSpaceOfCxaAtexitPtrParam();
+ auto DestTy = CGF.getTypes().ConvertType(Type)->getPointerTo(
+ CGM.getContext().getTargetAddressSpace(DestAS));
+ auto SrcAS = D.getType().getQualifiers().getAddressSpace();
+ if (DestAS == SrcAS)
+ Argument = llvm::ConstantExpr::getBitCast(Addr.getPointer(), DestTy);
+ else
+ // FIXME: On addr space mismatch we are passing NULL. The generation
+ // of the global destructor function should be adjusted accordingly.
+ Argument = llvm::ConstantPointerNull::get(DestTy);
+ } else {
+ Argument = llvm::ConstantExpr::getBitCast(
+ Addr.getPointer(), CGF.getTypes().ConvertType(Type)->getPointerTo());
+ }
// Otherwise, the standard logic requires a helper function.
} else {
Func = CodeGenFunction(CGM)
@@ -150,7 +163,7 @@ void CodeGenFunction::EmitInvariantStart(llvm::Constant *Addr, CharUnits Size) {
llvm::Intrinsic::ID InvStartID = llvm::Intrinsic::invariant_start;
// Overloaded address space type.
llvm::Type *ObjectPtr[1] = {Int8PtrTy};
- llvm::Constant *InvariantStart = CGM.getIntrinsic(InvStartID, ObjectPtr);
+ llvm::Function *InvariantStart = CGM.getIntrinsic(InvStartID, ObjectPtr);
// Emit a call with the size in bytes of the object.
uint64_t Width = Size.getQuantity();
@@ -215,8 +228,8 @@ void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D,
/// Create a stub function, suitable for being passed to atexit,
/// which passes the given address to the given destructor function.
-llvm::Constant *CodeGenFunction::createAtExitStub(const VarDecl &VD,
- llvm::Constant *dtor,
+llvm::Function *CodeGenFunction::createAtExitStub(const VarDecl &VD,
+ llvm::FunctionCallee dtor,
llvm::Constant *addr) {
// Get the destructor function type, void(*)(void).
llvm::FunctionType *ty = llvm::FunctionType::get(CGM.VoidTy, false);
@@ -227,19 +240,19 @@ llvm::Constant *CodeGenFunction::createAtExitStub(const VarDecl &VD,
}
const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
- llvm::Function *fn = CGM.CreateGlobalInitOrDestructFunction(ty, FnName.str(),
- FI,
- VD.getLocation());
+ llvm::Function *fn = CGM.CreateGlobalInitOrDestructFunction(
+ ty, FnName.str(), FI, VD.getLocation());
CodeGenFunction CGF(CGM);
- CGF.StartFunction(&VD, CGM.getContext().VoidTy, fn, FI, FunctionArgList());
+ CGF.StartFunction(GlobalDecl(&VD, DynamicInitKind::AtExit),
+ CGM.getContext().VoidTy, fn, FI, FunctionArgList());
llvm::CallInst *call = CGF.Builder.CreateCall(dtor, addr);
// Make sure the call and the callee agree on calling convention.
if (llvm::Function *dtorFn =
- dyn_cast<llvm::Function>(dtor->stripPointerCasts()))
+ dyn_cast<llvm::Function>(dtor.getCallee()->stripPointerCasts()))
call->setCallingConv(dtorFn->getCallingConv());
CGF.FinishFunction();
@@ -249,7 +262,7 @@ llvm::Constant *CodeGenFunction::createAtExitStub(const VarDecl &VD,
/// Register a global destructor using the C atexit runtime function.
void CodeGenFunction::registerGlobalDtorWithAtExit(const VarDecl &VD,
- llvm::Constant *dtor,
+ llvm::FunctionCallee dtor,
llvm::Constant *addr) {
// Create a function which calls the destructor.
llvm::Constant *dtorStub = createAtExitStub(VD, dtor, addr);
@@ -261,10 +274,10 @@ void CodeGenFunction::registerGlobalDtorWithAtExit(llvm::Constant *dtorStub) {
llvm::FunctionType *atexitTy =
llvm::FunctionType::get(IntTy, dtorStub->getType(), false);
- llvm::Constant *atexit =
+ llvm::FunctionCallee atexit =
CGM.CreateRuntimeFunction(atexitTy, "atexit", llvm::AttributeList(),
/*Local=*/true);
- if (llvm::Function *atexitFn = dyn_cast<llvm::Function>(atexit))
+ if (llvm::Function *atexitFn = dyn_cast<llvm::Function>(atexit.getCallee()))
atexitFn->setDoesNotThrow();
EmitNounwindRuntimeCall(atexit, dtorStub);
@@ -356,6 +369,10 @@ llvm::Function *CodeGenModule::CreateGlobalInitOrDestructFunction(
!isInSanitizerBlacklist(SanitizerKind::KernelHWAddress, Fn, Loc))
Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress);
+ if (getLangOpts().Sanitize.has(SanitizerKind::MemTag) &&
+ !isInSanitizerBlacklist(SanitizerKind::MemTag, Fn, Loc))
+ Fn->addFnAttr(llvm::Attribute::SanitizeMemTag);
+
if (getLangOpts().Sanitize.has(SanitizerKind::Thread) &&
!isInSanitizerBlacklist(SanitizerKind::Thread, Fn, Loc))
Fn->addFnAttr(llvm::Attribute::SanitizeThread);
@@ -468,7 +485,8 @@ CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D,
} else if (auto *IPA = D->getAttr<InitPriorityAttr>()) {
OrderGlobalInits Key(IPA->getPriority(), PrioritizedCXXGlobalInits.size());
PrioritizedCXXGlobalInits.push_back(std::make_pair(Key, Fn));
- } else if (isTemplateInstantiation(D->getTemplateSpecializationKind())) {
+ } else if (isTemplateInstantiation(D->getTemplateSpecializationKind()) ||
+ getContext().GetGVALinkageForVariable(D) == GVA_DiscardableODR) {
// C++ [basic.start.init]p2:
// Definitions of explicitly specialized class template static data
// members have ordered initialization. Other class template static data
@@ -482,6 +500,11 @@ CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D,
// minor startup time optimization. In the MS C++ ABI, there are no guard
// variables, so this COMDAT key is required for correctness.
AddGlobalCtor(Fn, 65535, COMDATKey);
+ if (getTarget().getCXXABI().isMicrosoft() && COMDATKey) {
+ // In The MS C++, MS add template static data member in the linker
+ // drective.
+ addUsedGlobal(COMDATKey);
+ }
} else if (D->hasAttr<SelectAnyAttr>()) {
// SelectAny globals will be comdat-folded. Put the initializer into a
// COMDAT group associated with the global, so the initializers get folded
@@ -575,6 +598,19 @@ CodeGenModule::EmitCXXGlobalInitFunc() {
CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn, CXXGlobalInits);
AddGlobalCtor(Fn);
+ // In OpenCL global init functions must be converted to kernels in order to
+ // be able to launch them from the host.
+ // FIXME: Some more work might be needed to handle destructors correctly.
+ // Current initialization function makes use of function pointers callbacks.
+ // We can't support function pointers especially between host and device.
+ // However it seems global destruction has little meaning without any
+ // dynamic resource allocation on the device and program scope variables are
+ // destroyed by the runtime when program is released.
+ if (getLangOpts().OpenCL) {
+ GenOpenCLArgMetadata(Fn);
+ Fn->setCallingConv(llvm::CallingConv::SPIR_KERNEL);
+ }
+
CXXGlobalInits.clear();
}
@@ -604,15 +640,21 @@ void CodeGenFunction::GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
CurEHLocation = D->getBeginLoc();
- StartFunction(GlobalDecl(D), getContext().VoidTy, Fn,
- getTypes().arrangeNullaryFunction(),
+ StartFunction(GlobalDecl(D, DynamicInitKind::Initializer),
+ getContext().VoidTy, Fn, getTypes().arrangeNullaryFunction(),
FunctionArgList(), D->getLocation(),
D->getInit()->getExprLoc());
// Use guarded initialization if the global variable is weak. This
// occurs for, e.g., instantiated static data members and
// definitions explicitly marked weak.
- if (Addr->hasWeakLinkage() || Addr->hasLinkOnceLinkage()) {
+ //
+ // Also use guarded initialization for a variable with dynamic TLS and
+ // unordered initialization. (If the initialization is ordered, the ABI
+ // layer will guard the whole-TU initialization for us.)
+ if (Addr->hasWeakLinkage() || Addr->hasLinkOnceLinkage() ||
+ (D->getTLSKind() == VarDecl::TLS_Dynamic &&
+ isTemplateInstantiation(D->getTemplateSpecializationKind()))) {
EmitCXXGuardedInit(*D, Addr, PerformInit);
} else {
EmitCXXGlobalVarDeclInit(*D, Addr, PerformInit);
@@ -682,8 +724,8 @@ CodeGenFunction::GenerateCXXGlobalInitFunc(llvm::Function *Fn,
void CodeGenFunction::GenerateCXXGlobalDtorsFunc(
llvm::Function *Fn,
- const std::vector<std::pair<llvm::WeakTrackingVH, llvm::Constant *>>
- &DtorsAndObjects) {
+ const std::vector<std::tuple<llvm::FunctionType *, llvm::WeakTrackingVH,
+ llvm::Constant *>> &DtorsAndObjects) {
{
auto NL = ApplyDebugLocation::CreateEmpty(*this);
StartFunction(GlobalDecl(), getContext().VoidTy, Fn,
@@ -693,9 +735,11 @@ void CodeGenFunction::GenerateCXXGlobalDtorsFunc(
// Emit the dtors, in reverse order from construction.
for (unsigned i = 0, e = DtorsAndObjects.size(); i != e; ++i) {
- llvm::Value *Callee = DtorsAndObjects[e - i - 1].first;
- llvm::CallInst *CI = Builder.CreateCall(Callee,
- DtorsAndObjects[e - i - 1].second);
+ llvm::FunctionType *CalleeTy;
+ llvm::Value *Callee;
+ llvm::Constant *Arg;
+ std::tie(CalleeTy, Callee, Arg) = DtorsAndObjects[e - i - 1];
+ llvm::CallInst *CI = Builder.CreateCall(CalleeTy, Callee, Arg);
// Make sure the call and the callee agree on calling convention.
if (llvm::Function *F = dyn_cast<llvm::Function>(Callee))
CI->setCallingConv(F->getCallingConv());
diff --git a/lib/CodeGen/CGException.cpp b/lib/CodeGen/CGException.cpp
index 5756e13d2623..3b7a88a0b769 100644
--- a/lib/CodeGen/CGException.cpp
+++ b/lib/CodeGen/CGException.cpp
@@ -1,9 +1,8 @@
//===--- CGException.cpp - Emit LLVM Code for C++ exceptions ----*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -22,7 +21,6 @@
#include "clang/AST/StmtObjC.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/Basic/TargetBuiltins.h"
-#include "llvm/IR/CallSite.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/Support/SaveAndRestore.h"
@@ -30,29 +28,29 @@
using namespace clang;
using namespace CodeGen;
-static llvm::Constant *getFreeExceptionFn(CodeGenModule &CGM) {
+static llvm::FunctionCallee getFreeExceptionFn(CodeGenModule &CGM) {
// void __cxa_free_exception(void *thrown_exception);
llvm::FunctionType *FTy =
- llvm::FunctionType::get(CGM.VoidTy, CGM.Int8PtrTy, /*IsVarArgs=*/false);
+ llvm::FunctionType::get(CGM.VoidTy, CGM.Int8PtrTy, /*isVarArg=*/false);
return CGM.CreateRuntimeFunction(FTy, "__cxa_free_exception");
}
-static llvm::Constant *getUnexpectedFn(CodeGenModule &CGM) {
+static llvm::FunctionCallee getUnexpectedFn(CodeGenModule &CGM) {
// void __cxa_call_unexpected(void *thrown_exception);
llvm::FunctionType *FTy =
- llvm::FunctionType::get(CGM.VoidTy, CGM.Int8PtrTy, /*IsVarArgs=*/false);
+ llvm::FunctionType::get(CGM.VoidTy, CGM.Int8PtrTy, /*isVarArg=*/false);
return CGM.CreateRuntimeFunction(FTy, "__cxa_call_unexpected");
}
-llvm::Constant *CodeGenModule::getTerminateFn() {
+llvm::FunctionCallee CodeGenModule::getTerminateFn() {
// void __terminate();
llvm::FunctionType *FTy =
- llvm::FunctionType::get(VoidTy, /*IsVarArgs=*/false);
+ llvm::FunctionType::get(VoidTy, /*isVarArg=*/false);
StringRef name;
@@ -74,10 +72,10 @@ llvm::Constant *CodeGenModule::getTerminateFn() {
return CreateRuntimeFunction(FTy, name);
}
-static llvm::Constant *getCatchallRethrowFn(CodeGenModule &CGM,
- StringRef Name) {
+static llvm::FunctionCallee getCatchallRethrowFn(CodeGenModule &CGM,
+ StringRef Name) {
llvm::FunctionType *FTy =
- llvm::FunctionType::get(CGM.VoidTy, CGM.Int8PtrTy, /*IsVarArgs=*/false);
+ llvm::FunctionType::get(CGM.VoidTy, CGM.Int8PtrTy, /*isVarArg=*/false);
return CGM.CreateRuntimeFunction(FTy, Name);
}
@@ -240,8 +238,8 @@ const EHPersonality &EHPersonality::get(CodeGenFunction &CGF) {
return get(CGF.CGM, dyn_cast_or_null<FunctionDecl>(FD));
}
-static llvm::Constant *getPersonalityFn(CodeGenModule &CGM,
- const EHPersonality &Personality) {
+static llvm::FunctionCallee getPersonalityFn(CodeGenModule &CGM,
+ const EHPersonality &Personality) {
return CGM.CreateRuntimeFunction(llvm::FunctionType::get(CGM.Int32Ty, true),
Personality.PersonalityFn,
llvm::AttributeList(), /*Local=*/true);
@@ -249,12 +247,13 @@ static llvm::Constant *getPersonalityFn(CodeGenModule &CGM,
static llvm::Constant *getOpaquePersonalityFn(CodeGenModule &CGM,
const EHPersonality &Personality) {
- llvm::Constant *Fn = getPersonalityFn(CGM, Personality);
+ llvm::FunctionCallee Fn = getPersonalityFn(CGM, Personality);
llvm::PointerType* Int8PtrTy = llvm::PointerType::get(
llvm::Type::getInt8Ty(CGM.getLLVMContext()),
CGM.getDataLayout().getProgramAddressSpace());
- return llvm::ConstantExpr::getBitCast(Fn, Int8PtrTy);
+ return llvm::ConstantExpr::getBitCast(cast<llvm::Constant>(Fn.getCallee()),
+ Int8PtrTy);
}
/// Check whether a landingpad instruction only uses C++ features.
@@ -345,12 +344,13 @@ void CodeGenModule::SimplifyPersonality() {
// Create the C++ personality function and kill off the old
// function.
- llvm::Constant *CXXFn = getPersonalityFn(*this, CXX);
+ llvm::FunctionCallee CXXFn = getPersonalityFn(*this, CXX);
// This can happen if the user is screwing with us.
- if (Fn->getType() != CXXFn->getType()) return;
+ if (Fn->getType() != CXXFn.getCallee()->getType())
+ return;
- Fn->replaceAllUsesWith(CXXFn);
+ Fn->replaceAllUsesWith(CXXFn.getCallee());
Fn->eraseFromParent();
}
@@ -977,15 +977,15 @@ static void emitWasmCatchPadBlock(CodeGenFunction &CGF,
// Create calls to wasm.get.exception and wasm.get.ehselector intrinsics.
// Before they are lowered appropriately later, they provide values for the
// exception and selector.
- llvm::Value *GetExnFn =
+ llvm::Function *GetExnFn =
CGF.CGM.getIntrinsic(llvm::Intrinsic::wasm_get_exception);
- llvm::Value *GetSelectorFn =
+ llvm::Function *GetSelectorFn =
CGF.CGM.getIntrinsic(llvm::Intrinsic::wasm_get_ehselector);
llvm::CallInst *Exn = CGF.Builder.CreateCall(GetExnFn, CPI);
CGF.Builder.CreateStore(Exn, CGF.getExceptionSlot());
llvm::CallInst *Selector = CGF.Builder.CreateCall(GetSelectorFn, CPI);
- llvm::Value *TypeIDFn = CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_typeid_for);
+ llvm::Function *TypeIDFn = CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_typeid_for);
// If there's only a single catch-all, branch directly to its handler.
if (CatchScope.getNumHandlers() == 1 &&
@@ -1069,7 +1069,7 @@ static void emitCatchDispatchBlock(CodeGenFunction &CGF,
CGF.EmitBlockAfterUses(dispatchBlock);
// Select the right handler.
- llvm::Value *llvm_eh_typeid_for =
+ llvm::Function *llvm_eh_typeid_for =
CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_typeid_for);
// Load the selector value.
@@ -1259,7 +1259,9 @@ void CodeGenFunction::ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) {
}
assert(RethrowBlock != WasmCatchStartBlock && RethrowBlock->empty());
Builder.SetInsertPoint(RethrowBlock);
- CGM.getCXXABI().emitRethrow(*this, /*isNoReturn=*/true);
+ llvm::Function *RethrowInCatchFn =
+ CGM.getIntrinsic(llvm::Intrinsic::wasm_rethrow_in_catch);
+ EmitNoreturnRuntimeCallOrInvoke(RethrowInCatchFn, {});
}
EmitBlock(ContBB);
@@ -1269,9 +1271,10 @@ void CodeGenFunction::ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) {
namespace {
struct CallEndCatchForFinally final : EHScopeStack::Cleanup {
llvm::Value *ForEHVar;
- llvm::Value *EndCatchFn;
- CallEndCatchForFinally(llvm::Value *ForEHVar, llvm::Value *EndCatchFn)
- : ForEHVar(ForEHVar), EndCatchFn(EndCatchFn) {}
+ llvm::FunctionCallee EndCatchFn;
+ CallEndCatchForFinally(llvm::Value *ForEHVar,
+ llvm::FunctionCallee EndCatchFn)
+ : ForEHVar(ForEHVar), EndCatchFn(EndCatchFn) {}
void Emit(CodeGenFunction &CGF, Flags flags) override {
llvm::BasicBlock *EndCatchBB = CGF.createBasicBlock("finally.endcatch");
@@ -1290,15 +1293,15 @@ namespace {
struct PerformFinally final : EHScopeStack::Cleanup {
const Stmt *Body;
llvm::Value *ForEHVar;
- llvm::Value *EndCatchFn;
- llvm::Value *RethrowFn;
+ llvm::FunctionCallee EndCatchFn;
+ llvm::FunctionCallee RethrowFn;
llvm::Value *SavedExnVar;
PerformFinally(const Stmt *Body, llvm::Value *ForEHVar,
- llvm::Value *EndCatchFn,
- llvm::Value *RethrowFn, llvm::Value *SavedExnVar)
- : Body(Body), ForEHVar(ForEHVar), EndCatchFn(EndCatchFn),
- RethrowFn(RethrowFn), SavedExnVar(SavedExnVar) {}
+ llvm::FunctionCallee EndCatchFn,
+ llvm::FunctionCallee RethrowFn, llvm::Value *SavedExnVar)
+ : Body(Body), ForEHVar(ForEHVar), EndCatchFn(EndCatchFn),
+ RethrowFn(RethrowFn), SavedExnVar(SavedExnVar) {}
void Emit(CodeGenFunction &CGF, Flags flags) override {
// Enter a cleanup to call the end-catch function if one was provided.
@@ -1360,12 +1363,11 @@ namespace {
/// Enters a finally block for an implementation using zero-cost
/// exceptions. This is mostly general, but hard-codes some
/// language/ABI-specific behavior in the catch-all sections.
-void CodeGenFunction::FinallyInfo::enter(CodeGenFunction &CGF,
- const Stmt *body,
- llvm::Constant *beginCatchFn,
- llvm::Constant *endCatchFn,
- llvm::Constant *rethrowFn) {
- assert((beginCatchFn != nullptr) == (endCatchFn != nullptr) &&
+void CodeGenFunction::FinallyInfo::enter(CodeGenFunction &CGF, const Stmt *body,
+ llvm::FunctionCallee beginCatchFn,
+ llvm::FunctionCallee endCatchFn,
+ llvm::FunctionCallee rethrowFn) {
+ assert((!!beginCatchFn) == (!!endCatchFn) &&
"begin/end catch functions not paired");
assert(rethrowFn && "rethrow function is required");
@@ -1377,9 +1379,7 @@ void CodeGenFunction::FinallyInfo::enter(CodeGenFunction &CGF,
// In the latter case we need to pass it the exception object.
// But we can't use the exception slot because the @finally might
// have a landing pad (which would overwrite the exception slot).
- llvm::FunctionType *rethrowFnTy =
- cast<llvm::FunctionType>(
- cast<llvm::PointerType>(rethrowFn->getType())->getElementType());
+ llvm::FunctionType *rethrowFnTy = rethrowFn.getFunctionType();
SavedExnVar = nullptr;
if (rethrowFnTy->getNumParams())
SavedExnVar = CGF.CreateTempAlloca(CGF.Int8PtrTy, "finally.exn");
@@ -1545,7 +1545,7 @@ llvm::BasicBlock *CodeGenFunction::getTerminateFunclet() {
// __clang_call_terminate function.
if (getLangOpts().CPlusPlus &&
EHPersonality::get(*this).isWasmPersonality()) {
- llvm::Value *GetExnFn =
+ llvm::Function *GetExnFn =
CGM.getIntrinsic(llvm::Intrinsic::wasm_get_exception);
Exn = Builder.CreateCall(GetExnFn, CurrentFuncletPad);
}
@@ -1632,7 +1632,7 @@ struct PerformSEHFinally final : EHScopeStack::Cleanup {
if (CGF.IsOutlinedSEHHelper) {
FP = &CGF.CurFn->arg_begin()[1];
} else {
- llvm::Value *LocalAddrFn =
+ llvm::Function *LocalAddrFn =
CGM.getIntrinsic(llvm::Intrinsic::localaddress);
FP = CGF.Builder.CreateCall(LocalAddrFn);
}
diff --git a/lib/CodeGen/CGExpr.cpp b/lib/CodeGen/CGExpr.cpp
index 34a921e2dc00..5a4b1188b711 100644
--- a/lib/CodeGen/CGExpr.cpp
+++ b/lib/CodeGen/CGExpr.cpp
@@ -1,9 +1,8 @@
//===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -26,6 +25,7 @@
#include "clang/AST/Attr.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/NSAPI.h"
+#include "clang/Basic/Builtins.h"
#include "clang/Basic/CodeGenOptions.h"
#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/StringExtras.h"
@@ -331,7 +331,7 @@ pushTemporaryCleanup(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M,
switch (M->getStorageDuration()) {
case SD_Static:
case SD_Thread: {
- llvm::Constant *CleanupFn;
+ llvm::FunctionCallee CleanupFn;
llvm::Constant *CleanupArg;
if (E->getType()->isArrayType()) {
CleanupFn = CodeGenFunction(CGF.CGM).generateDestroyHelper(
@@ -340,8 +340,8 @@ pushTemporaryCleanup(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M,
dyn_cast_or_null<VarDecl>(M->getExtendingDecl()));
CleanupArg = llvm::Constant::getNullValue(CGF.Int8PtrTy);
} else {
- CleanupFn = CGF.CGM.getAddrOfCXXStructor(ReferenceTemporaryDtor,
- StructorType::Complete);
+ CleanupFn = CGF.CGM.getAddrAndTypeOfCXXStructor(
+ GlobalDecl(ReferenceTemporaryDtor, Dtor_Complete));
CleanupArg = cast<llvm::Constant>(ReferenceTemporary.getPointer());
}
CGF.CGM.getCXXABI().registerGlobalDtor(
@@ -653,7 +653,8 @@ bool CodeGenFunction::sanitizePerformTypeCheck() const {
void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
llvm::Value *Ptr, QualType Ty,
CharUnits Alignment,
- SanitizerSet SkippedChecks) {
+ SanitizerSet SkippedChecks,
+ llvm::Value *ArraySize) {
if (!sanitizePerformTypeCheck())
return;
@@ -711,21 +712,28 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
if (SanOpts.has(SanitizerKind::ObjectSize) &&
!SkippedChecks.has(SanitizerKind::ObjectSize) &&
!Ty->isIncompleteType()) {
- uint64_t Size = getContext().getTypeSizeInChars(Ty).getQuantity();
-
- // The glvalue must refer to a large enough storage region.
- // FIXME: If Address Sanitizer is enabled, insert dynamic instrumentation
- // to check this.
- // FIXME: Get object address space
- llvm::Type *Tys[2] = { IntPtrTy, Int8PtrTy };
- llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, Tys);
- llvm::Value *Min = Builder.getFalse();
- llvm::Value *NullIsUnknown = Builder.getFalse();
- llvm::Value *CastAddr = Builder.CreateBitCast(Ptr, Int8PtrTy);
- llvm::Value *LargeEnough = Builder.CreateICmpUGE(
- Builder.CreateCall(F, {CastAddr, Min, NullIsUnknown}),
- llvm::ConstantInt::get(IntPtrTy, Size));
- Checks.push_back(std::make_pair(LargeEnough, SanitizerKind::ObjectSize));
+ uint64_t TySize = getContext().getTypeSizeInChars(Ty).getQuantity();
+ llvm::Value *Size = llvm::ConstantInt::get(IntPtrTy, TySize);
+ if (ArraySize)
+ Size = Builder.CreateMul(Size, ArraySize);
+
+ // Degenerate case: new X[0] does not need an objectsize check.
+ llvm::Constant *ConstantSize = dyn_cast<llvm::Constant>(Size);
+ if (!ConstantSize || !ConstantSize->isNullValue()) {
+ // The glvalue must refer to a large enough storage region.
+ // FIXME: If Address Sanitizer is enabled, insert dynamic instrumentation
+ // to check this.
+ // FIXME: Get object address space
+ llvm::Type *Tys[2] = { IntPtrTy, Int8PtrTy };
+ llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, Tys);
+ llvm::Value *Min = Builder.getFalse();
+ llvm::Value *NullIsUnknown = Builder.getFalse();
+ llvm::Value *Dynamic = Builder.getFalse();
+ llvm::Value *CastAddr = Builder.CreateBitCast(Ptr, Int8PtrTy);
+ llvm::Value *LargeEnough = Builder.CreateICmpUGE(
+ Builder.CreateCall(F, {CastAddr, Min, NullIsUnknown, Dynamic}), Size);
+ Checks.push_back(std::make_pair(LargeEnough, SanitizerKind::ObjectSize));
+ }
}
uint64_t AlignVal = 0;
@@ -1288,7 +1296,7 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
case Expr::CXXUuidofExprClass:
return EmitCXXUuidofLValue(cast<CXXUuidofExpr>(E));
case Expr::LambdaExprClass:
- return EmitLambdaLValue(cast<LambdaExpr>(E));
+ return EmitAggExprToLValue(E);
case Expr::ExprWithCleanupsClass: {
const auto *cleanups = cast<ExprWithCleanups>(E);
@@ -1308,11 +1316,15 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
return LV;
}
- case Expr::CXXDefaultArgExprClass:
- return EmitLValue(cast<CXXDefaultArgExpr>(E)->getExpr());
+ case Expr::CXXDefaultArgExprClass: {
+ auto *DAE = cast<CXXDefaultArgExpr>(E);
+ CXXDefaultArgExprScope Scope(*this, DAE);
+ return EmitLValue(DAE->getExpr());
+ }
case Expr::CXXDefaultInitExprClass: {
- CXXDefaultInitExprScope Scope(*this);
- return EmitLValue(cast<CXXDefaultInitExpr>(E)->getExpr());
+ auto *DIE = cast<CXXDefaultInitExpr>(E);
+ CXXDefaultInitExprScope Scope(*this, DIE);
+ return EmitLValue(DIE->getExpr());
}
case Expr::CXXTypeidExprClass:
return EmitCXXTypeidLValue(cast<CXXTypeidExpr>(E));
@@ -1387,7 +1399,7 @@ static bool isConstantEmittableObjectType(QualType type) {
/// Can we constant-emit a load of a reference to a variable of the
/// given type? This is different from predicates like
-/// Decl::isUsableInConstantExpressions because we do want it to apply
+/// Decl::mightBeUsableInConstantExpressions because we do want it to apply
/// in situations that don't necessarily satisfy the language's rules
/// for this (e.g. C++'s ODR-use rules). For example, we want to able
/// to do this with const float variables even if those variables
@@ -1411,10 +1423,11 @@ static ConstantEmissionKind checkVarTypeForConstantEmission(QualType type) {
}
/// Try to emit a reference to the given value without producing it as
-/// an l-value. This is actually more than an optimization: we can't
-/// produce an l-value for variables that we never actually captured
-/// in a block or lambda, which means const int variables or constexpr
-/// literals or similar.
+/// an l-value. This is just an optimization, but it avoids us needing
+/// to emit global copies of variables if they're named without triggering
+/// a formal use in a context where we can't emit a direct reference to them,
+/// for instance if a block or lambda or a member of a local class uses a
+/// const int variable or constexpr variable from an enclosing function.
CodeGenFunction::ConstantEmission
CodeGenFunction::tryEmitAsConstant(DeclRefExpr *refExpr) {
ValueDecl *value = refExpr->getDecl();
@@ -1485,7 +1498,7 @@ static DeclRefExpr *tryToConvertMemberExprToDeclRefExpr(CodeGenFunction &CGF,
return DeclRefExpr::Create(
CGF.getContext(), NestedNameSpecifierLoc(), SourceLocation(), VD,
/*RefersToEnclosingVariableOrCapture=*/false, ME->getExprLoc(),
- ME->getType(), ME->getValueKind());
+ ME->getType(), ME->getValueKind(), nullptr, nullptr, ME->isNonOdrUse());
}
return nullptr;
}
@@ -1879,7 +1892,6 @@ Address CodeGenFunction::EmitExtVectorElementLValue(LValue LV) {
Address VectorBasePtrPlusIx =
Builder.CreateConstInBoundsGEP(CastToPointerElement, ix,
- getContext().getTypeSizeInChars(EQT),
"vector.elt");
return VectorBasePtrPlusIx;
@@ -1899,7 +1911,7 @@ RValue CodeGenFunction::EmitLoadOfGlobalRegLValue(LValue LV) {
Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy);
llvm::Type *Types[] = { Ty };
- llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);
+ llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);
llvm::Value *Call = Builder.CreateCall(
F, llvm::MetadataAsValue::get(Ty->getContext(), RegName));
if (OrigTy->isPointerTy())
@@ -2019,7 +2031,7 @@ void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
// Cast the source to the storage type and shift it into place.
SrcVal = Builder.CreateIntCast(SrcVal, Ptr.getElementType(),
- /*IsSigned=*/false);
+ /*isSigned=*/false);
llvm::Value *MaskedVal = SrcVal;
// See if there are other bits in the bitfield's storage we'll need to load
@@ -2160,7 +2172,7 @@ void CodeGenFunction::EmitStoreThroughGlobalRegLValue(RValue Src, LValue Dst) {
Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy);
llvm::Type *Types[] = { Ty };
- llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
+ llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
llvm::Value *Value = Src.getScalarVal();
if (OrigTy->isPointerTy())
Value = Builder.CreatePtrToInt(Value, Ty);
@@ -2284,15 +2296,22 @@ static LValue EmitThreadPrivateVarDeclLValue(
return CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl);
}
-static Address emitDeclTargetLinkVarDeclLValue(CodeGenFunction &CGF,
- const VarDecl *VD, QualType T) {
+static Address emitDeclTargetVarDeclLValue(CodeGenFunction &CGF,
+ const VarDecl *VD, QualType T) {
llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
- if (!Res || *Res == OMPDeclareTargetDeclAttr::MT_To)
+ // Return an invalid address if variable is MT_To and unified
+ // memory is not enabled. For all other cases: MT_Link and
+ // MT_To with unified memory, return a valid address.
+ if (!Res || (*Res == OMPDeclareTargetDeclAttr::MT_To &&
+ !CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory()))
return Address::invalid();
- assert(*Res == OMPDeclareTargetDeclAttr::MT_Link && "Expected link clause");
+ assert(((*Res == OMPDeclareTargetDeclAttr::MT_Link) ||
+ (*Res == OMPDeclareTargetDeclAttr::MT_To &&
+ CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory())) &&
+ "Expected link clause OR to clause with unified memory enabled.");
QualType PtrTy = CGF.getContext().getPointerType(VD->getType());
- Address Addr = CGF.CGM.getOpenMPRuntime().getAddrOfDeclareTargetLink(VD);
+ Address Addr = CGF.CGM.getOpenMPRuntime().getAddrOfDeclareTargetVar(VD);
return CGF.EmitLoadOfPointer(Addr, PtrTy->castAs<PointerType>());
}
@@ -2348,7 +2367,7 @@ static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF,
// Check if the variable is marked as declare target with link clause in
// device codegen.
if (CGF.getLangOpts().OpenMPIsDevice) {
- Address Addr = emitDeclTargetLinkVarDeclLValue(CGF, VD, T);
+ Address Addr = emitDeclTargetVarDeclLValue(CGF, VD, T);
if (Addr.isValid())
return CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl);
}
@@ -2440,45 +2459,101 @@ static LValue EmitGlobalNamedRegister(const VarDecl *VD, CodeGenModule &CGM) {
return LValue::MakeGlobalReg(Address(Ptr, Alignment), VD->getType());
}
+/// Determine whether we can emit a reference to \p VD from the current
+/// context, despite not necessarily having seen an odr-use of the variable in
+/// this context.
+static bool canEmitSpuriousReferenceToVariable(CodeGenFunction &CGF,
+ const DeclRefExpr *E,
+ const VarDecl *VD,
+ bool IsConstant) {
+ // For a variable declared in an enclosing scope, do not emit a spurious
+ // reference even if we have a capture, as that will emit an unwarranted
+ // reference to our capture state, and will likely generate worse code than
+ // emitting a local copy.
+ if (E->refersToEnclosingVariableOrCapture())
+ return false;
+
+ // For a local declaration declared in this function, we can always reference
+ // it even if we don't have an odr-use.
+ if (VD->hasLocalStorage()) {
+ return VD->getDeclContext() ==
+ dyn_cast_or_null<DeclContext>(CGF.CurCodeDecl);
+ }
+
+ // For a global declaration, we can emit a reference to it if we know
+ // for sure that we are able to emit a definition of it.
+ VD = VD->getDefinition(CGF.getContext());
+ if (!VD)
+ return false;
+
+ // Don't emit a spurious reference if it might be to a variable that only
+ // exists on a different device / target.
+ // FIXME: This is unnecessarily broad. Check whether this would actually be a
+ // cross-target reference.
+ if (CGF.getLangOpts().OpenMP || CGF.getLangOpts().CUDA ||
+ CGF.getLangOpts().OpenCL) {
+ return false;
+ }
+
+ // We can emit a spurious reference only if the linkage implies that we'll
+ // be emitting a non-interposable symbol that will be retained until link
+ // time.
+ switch (CGF.CGM.getLLVMLinkageVarDefinition(VD, IsConstant)) {
+ case llvm::GlobalValue::ExternalLinkage:
+ case llvm::GlobalValue::LinkOnceODRLinkage:
+ case llvm::GlobalValue::WeakODRLinkage:
+ case llvm::GlobalValue::InternalLinkage:
+ case llvm::GlobalValue::PrivateLinkage:
+ return true;
+ default:
+ return false;
+ }
+}
+
LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
const NamedDecl *ND = E->getDecl();
QualType T = E->getType();
+ assert(E->isNonOdrUse() != NOUR_Unevaluated &&
+ "should not emit an unevaluated operand");
+
if (const auto *VD = dyn_cast<VarDecl>(ND)) {
// Global Named registers access via intrinsics only
if (VD->getStorageClass() == SC_Register &&
VD->hasAttr<AsmLabelAttr>() && !VD->isLocalVarDecl())
return EmitGlobalNamedRegister(VD, CGM);
- // A DeclRefExpr for a reference initialized by a constant expression can
- // appear without being odr-used. Directly emit the constant initializer.
- const Expr *Init = VD->getAnyInitializer(VD);
- const auto *BD = dyn_cast_or_null<BlockDecl>(CurCodeDecl);
- if (Init && !isa<ParmVarDecl>(VD) && VD->getType()->isReferenceType() &&
- VD->isUsableInConstantExpressions(getContext()) &&
- VD->checkInitIsICE() &&
- // Do not emit if it is private OpenMP variable.
- !(E->refersToEnclosingVariableOrCapture() &&
- ((CapturedStmtInfo &&
- (LocalDeclMap.count(VD->getCanonicalDecl()) ||
- CapturedStmtInfo->lookup(VD->getCanonicalDecl()))) ||
- LambdaCaptureFields.lookup(VD->getCanonicalDecl()) ||
- (BD && BD->capturesVariable(VD))))) {
- llvm::Constant *Val =
- ConstantEmitter(*this).emitAbstract(E->getLocation(),
- *VD->evaluateValue(),
- VD->getType());
- assert(Val && "failed to emit reference constant expression");
- // FIXME: Eventually we will want to emit vector element references.
-
- // Should we be using the alignment of the constant pointer we emitted?
- CharUnits Alignment = getNaturalTypeAlignment(E->getType(),
- /* BaseInfo= */ nullptr,
- /* TBAAInfo= */ nullptr,
- /* forPointeeType= */ true);
- return MakeAddrLValue(Address(Val, Alignment), T, AlignmentSource::Decl);
+ // If this DeclRefExpr does not constitute an odr-use of the variable,
+ // we're not permitted to emit a reference to it in general, and it might
+ // not be captured if capture would be necessary for a use. Emit the
+ // constant value directly instead.
+ if (E->isNonOdrUse() == NOUR_Constant &&
+ (VD->getType()->isReferenceType() ||
+ !canEmitSpuriousReferenceToVariable(*this, E, VD, true))) {
+ VD->getAnyInitializer(VD);
+ llvm::Constant *Val = ConstantEmitter(*this).emitAbstract(
+ E->getLocation(), *VD->evaluateValue(), VD->getType());
+ assert(Val && "failed to emit constant expression");
+
+ Address Addr = Address::invalid();
+ if (!VD->getType()->isReferenceType()) {
+ // Spill the constant value to a global.
+ Addr = CGM.createUnnamedGlobalFrom(*VD, Val,
+ getContext().getDeclAlign(VD));
+ } else {
+ // Should we be using the alignment of the constant pointer we emitted?
+ CharUnits Alignment =
+ getNaturalTypeAlignment(E->getType(),
+ /* BaseInfo= */ nullptr,
+ /* TBAAInfo= */ nullptr,
+ /* forPointeeType= */ true);
+ Addr = Address(Val, Alignment);
+ }
+ return MakeAddrLValue(Addr, T, AlignmentSource::Decl);
}
+ // FIXME: Handle other kinds of non-odr-use DeclRefExprs.
+
// Check for captured variables.
if (E->refersToEnclosingVariableOrCapture()) {
VD = VD->getCanonicalDecl();
@@ -2510,7 +2585,7 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
// FIXME: We should be able to assert this for FunctionDecls as well!
// FIXME: We should be able to assert this for all DeclRefExprs, not just
// those with a valid source location.
- assert((ND->isUsed(false) || !isa<VarDecl>(ND) ||
+ assert((ND->isUsed(false) || !isa<VarDecl>(ND) || E->isNonOdrUse() ||
!E->getLocation().isValid()) &&
"Should not use decl without marking it used!");
@@ -2536,7 +2611,7 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
// some reason; most likely, because it's in an outer function.
} else if (VD->isStaticLocal()) {
addr = Address(CGM.getOrCreateStaticVarDecl(
- *VD, CGM.getLLVMLinkageVarDefinition(VD, /*isConstant=*/false)),
+ *VD, CGM.getLLVMLinkageVarDefinition(VD, /*IsConstant=*/false)),
getContext().getDeclAlign(VD));
// No other cases for now.
@@ -2851,16 +2926,13 @@ enum class CheckRecoverableKind {
}
static CheckRecoverableKind getRecoverableKind(SanitizerMask Kind) {
- assert(llvm::countPopulation(Kind) == 1);
- switch (Kind) {
- case SanitizerKind::Vptr:
+ assert(Kind.countPopulation() == 1);
+ if (Kind == SanitizerKind::Function || Kind == SanitizerKind::Vptr)
return CheckRecoverableKind::AlwaysRecoverable;
- case SanitizerKind::Return:
- case SanitizerKind::Unreachable:
+ else if (Kind == SanitizerKind::Return || Kind == SanitizerKind::Unreachable)
return CheckRecoverableKind::Unrecoverable;
- default:
+ else
return CheckRecoverableKind::Recoverable;
- }
}
namespace {
@@ -2910,7 +2982,7 @@ static void emitCheckHandlerCall(CodeGenFunction &CGF,
}
B.addAttribute(llvm::Attribute::UWTable);
- llvm::Value *Fn = CGF.CGM.CreateRuntimeFunction(
+ llvm::FunctionCallee Fn = CGF.CGM.CreateRuntimeFunction(
FnType, FnName,
llvm::AttributeList::get(CGF.getLLVMContext(),
llvm::AttributeList::FunctionIndex, B),
@@ -3051,7 +3123,7 @@ void CodeGenFunction::EmitCfiSlowPathCheck(
bool WithDiag = !CGM.getCodeGenOpts().SanitizeTrap.has(Kind);
llvm::CallInst *CheckCall;
- llvm::Constant *SlowPathFn;
+ llvm::FunctionCallee SlowPathFn;
if (WithDiag) {
llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs);
auto *InfoPtr =
@@ -3073,7 +3145,8 @@ void CodeGenFunction::EmitCfiSlowPathCheck(
CheckCall = Builder.CreateCall(SlowPathFn, {TypeId, Ptr});
}
- CGM.setDSOLocal(cast<llvm::GlobalValue>(SlowPathFn->stripPointerCasts()));
+ CGM.setDSOLocal(
+ cast<llvm::GlobalValue>(SlowPathFn.getCallee()->stripPointerCasts()));
CheckCall->setDoesNotThrow();
EmitBlock(Cont);
@@ -3252,7 +3325,7 @@ Address CodeGenFunction::EmitArrayToPointerDecay(const Expr *E,
if (!E->getType()->isVariableArrayType()) {
assert(isa<llvm::ArrayType>(Addr.getElementType()) &&
"Expected pointer to array");
- Addr = Builder.CreateStructGEP(Addr, 0, CharUnits::Zero(), "arraydecay");
+ Addr = Builder.CreateConstArrayGEP(Addr, 0, "arraydecay");
}
// The result of this decay conversion points to an array element within the
@@ -3346,8 +3419,20 @@ static Address emitArraySubscriptGEP(CodeGenFunction &CGF, Address addr,
CharUnits eltAlign =
getArrayElementAlign(addr.getAlignment(), indices.back(), eltSize);
- llvm::Value *eltPtr = emitArraySubscriptGEP(
- CGF, addr.getPointer(), indices, inbounds, signedIndices, loc, name);
+ llvm::Value *eltPtr;
+ auto LastIndex = dyn_cast<llvm::ConstantInt>(indices.back());
+ if (!CGF.IsInPreservedAIRegion || !LastIndex) {
+ eltPtr = emitArraySubscriptGEP(
+ CGF, addr.getPointer(), indices, inbounds, signedIndices,
+ loc, name);
+ } else {
+ // Remember the original array subscript for bpf target
+ unsigned idx = LastIndex->getZExtValue();
+ eltPtr = CGF.Builder.CreatePreserveArrayAccessIndex(addr.getPointer(),
+ indices.size() - 1,
+ idx);
+ }
+
return Address(eltPtr, eltAlign);
}
@@ -3529,8 +3614,7 @@ static Address emitOMPArraySectionBase(CodeGenFunction &CGF, const Expr *Base,
if (!BaseTy->isVariableArrayType()) {
assert(isa<llvm::ArrayType>(Addr.getElementType()) &&
"Expected pointer to array");
- Addr = CGF.Builder.CreateStructGEP(Addr, 0, CharUnits::Zero(),
- "arraydecay");
+ Addr = CGF.Builder.CreateConstArrayGEP(Addr, 0, "arraydecay");
}
return CGF.Builder.CreateElementBitCast(Addr,
@@ -3665,7 +3749,7 @@ LValue CodeGenFunction::EmitOMPArraySectionExpr(const OMPArraySectionExpr *E,
Idx = Builder.CreateNSWMul(Idx, NumElements);
EltPtr = emitArraySubscriptGEP(*this, Base, Idx, VLA->getElementType(),
!getLangOpts().isSignedOverflowDefined(),
- /*SignedIndices=*/false, E->getExprLoc());
+ /*signedIndices=*/false, E->getExprLoc());
} else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
// If this is A[i] where A is an array, the frontend will have decayed the
// base to be a ArrayToPointerDecay implicit cast. While correct, it is
@@ -3685,7 +3769,7 @@ LValue CodeGenFunction::EmitOMPArraySectionExpr(const OMPArraySectionExpr *E,
EltPtr = emitArraySubscriptGEP(
*this, ArrayLV.getAddress(), {CGM.getSize(CharUnits::Zero()), Idx},
ResultExprTy, !getLangOpts().isSignedOverflowDefined(),
- /*SignedIndices=*/false, E->getExprLoc());
+ /*signedIndices=*/false, E->getExprLoc());
BaseInfo = ArrayLV.getBaseInfo();
TBAAInfo = CGM.getTBAAInfoForSubobject(ArrayLV, ResultExprTy);
} else {
@@ -3694,7 +3778,7 @@ LValue CodeGenFunction::EmitOMPArraySectionExpr(const OMPArraySectionExpr *E,
IsLowerBound);
EltPtr = emitArraySubscriptGEP(*this, Base, Idx, ResultExprTy,
!getLangOpts().isSignedOverflowDefined(),
- /*SignedIndices=*/false, E->getExprLoc());
+ /*signedIndices=*/false, E->getExprLoc());
}
return MakeAddrLValue(EltPtr, ResultExprTy, BaseInfo, TBAAInfo);
@@ -3808,31 +3892,63 @@ LValue CodeGenFunction::EmitLValueForLambdaField(const FieldDecl *Field) {
return EmitLValueForField(LambdaLV, Field);
}
+/// Get the field index in the debug info. The debug info structure/union
+/// will ignore the unnamed bitfields.
+unsigned CodeGenFunction::getDebugInfoFIndex(const RecordDecl *Rec,
+ unsigned FieldIndex) {
+ unsigned I = 0, Skipped = 0;
+
+ for (auto F : Rec->getDefinition()->fields()) {
+ if (I == FieldIndex)
+ break;
+ if (F->isUnnamedBitfield())
+ Skipped++;
+ I++;
+ }
+
+ return FieldIndex - Skipped;
+}
+
+/// Get the address of a zero-sized field within a record. The resulting
+/// address doesn't necessarily have the right type.
+static Address emitAddrOfZeroSizeField(CodeGenFunction &CGF, Address Base,
+ const FieldDecl *Field) {
+ CharUnits Offset = CGF.getContext().toCharUnitsFromBits(
+ CGF.getContext().getFieldOffset(Field));
+ if (Offset.isZero())
+ return Base;
+ Base = CGF.Builder.CreateElementBitCast(Base, CGF.Int8Ty);
+ return CGF.Builder.CreateConstInBoundsByteGEP(Base, Offset);
+}
+
/// Drill down to the storage of a field without walking into
/// reference types.
///
/// The resulting address doesn't necessarily have the right type.
static Address emitAddrOfFieldStorage(CodeGenFunction &CGF, Address base,
const FieldDecl *field) {
+ if (field->isZeroSize(CGF.getContext()))
+ return emitAddrOfZeroSizeField(CGF, base, field);
+
const RecordDecl *rec = field->getParent();
unsigned idx =
CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
- CharUnits offset;
- // Adjust the alignment down to the given offset.
- // As a special case, if the LLVM field index is 0, we know that this
- // is zero.
- assert((idx != 0 || CGF.getContext().getASTRecordLayout(rec)
- .getFieldOffset(field->getFieldIndex()) == 0) &&
- "LLVM field at index zero had non-zero offset?");
- if (idx != 0) {
- auto &recLayout = CGF.getContext().getASTRecordLayout(rec);
- auto offsetInBits = recLayout.getFieldOffset(field->getFieldIndex());
- offset = CGF.getContext().toCharUnitsFromBits(offsetInBits);
- }
+ return CGF.Builder.CreateStructGEP(base, idx, field->getName());
+}
+
+static Address emitPreserveStructAccess(CodeGenFunction &CGF, Address base,
+ const FieldDecl *field) {
+ const RecordDecl *rec = field->getParent();
+ llvm::DIType *DbgInfo = CGF.getDebugInfo()->getOrCreateRecordType(
+ CGF.getContext().getRecordType(rec), rec->getLocation());
+
+ unsigned idx =
+ CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
- return CGF.Builder.CreateStructGEP(base, idx, offset, field->getName());
+ return CGF.Builder.CreatePreserveStructAccessIndex(
+ base, idx, CGF.getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo);
}
static bool hasAnyVptr(const QualType Type, const ASTContext &Context) {
@@ -3866,8 +3982,7 @@ LValue CodeGenFunction::EmitLValueForField(LValue base,
unsigned Idx = RL.getLLVMFieldNo(field);
if (Idx != 0)
// For structs, we GEP to the field that the record layout suggests.
- Addr = Builder.CreateStructGEP(Addr, Idx, Info.StorageOffset,
- field->getName());
+ Addr = Builder.CreateStructGEP(Addr, Idx, field->getName());
// Get the access type.
llvm::Type *FieldIntTy =
llvm::Type::getIntNTy(getLLVMContext(), Info.StorageSize);
@@ -3943,9 +4058,24 @@ LValue CodeGenFunction::EmitLValueForField(LValue base,
// a barrier every time CXXRecord field with vptr is referenced.
addr = Address(Builder.CreateLaunderInvariantGroup(addr.getPointer()),
addr.getAlignment());
+
+ if (IsInPreservedAIRegion) {
+ // Remember the original union field index
+ llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateRecordType(
+ getContext().getRecordType(rec), rec->getLocation());
+ addr = Address(
+ Builder.CreatePreserveUnionAccessIndex(
+ addr.getPointer(), getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo),
+ addr.getAlignment());
+ }
} else {
- // For structs, we GEP to the field that the record layout suggests.
- addr = emitAddrOfFieldStorage(*this, addr, field);
+
+ if (!IsInPreservedAIRegion)
+ // For structs, we GEP to the field that the record layout suggests.
+ addr = emitAddrOfFieldStorage(*this, addr, field);
+ else
+ // Remember the original struct field index
+ addr = emitPreserveStructAccess(*this, addr, field);
// If this is a reference field, load the reference right now.
if (FieldType->isReferenceType()) {
@@ -4137,6 +4267,7 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
switch (E->getCastKind()) {
case CK_ToVoid:
case CK_BitCast:
+ case CK_LValueToRValueBitCast:
case CK_ArrayToPointerDecay:
case CK_FunctionToPointerDecay:
case CK_NullToMemberPointer:
@@ -4175,6 +4306,8 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
case CK_IntToOCLSampler:
case CK_FixedPointCast:
case CK_FixedPointToBoolean:
+ case CK_FixedPointToIntegral:
+ case CK_IntegralToFixedPoint:
return EmitUnsupportedLValue(E, "unexpected cast lvalue");
case CK_Dependent:
@@ -4548,13 +4681,6 @@ CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) {
return MakeAddrLValue(Slot.getAddress(), E->getType(), AlignmentSource::Decl);
}
-LValue
-CodeGenFunction::EmitLambdaLValue(const LambdaExpr *E) {
- AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue");
- EmitLambdaExpr(E, Slot);
- return MakeAddrLValue(Slot.getAddress(), E->getType(), AlignmentSource::Decl);
-}
-
LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) {
RValue RV = EmitObjCMessageExpr(E);
@@ -4630,17 +4756,6 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, const CGCallee &OrigCallee
const Decl *TargetDecl =
OrigCallee.getAbstractInfo().getCalleeDecl().getDecl();
- if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl))
- // We can only guarantee that a function is called from the correct
- // context/function based on the appropriate target attributes,
- // so only check in the case where we have both always_inline and target
- // since otherwise we could be making a conditional call after a check for
- // the proper cpu features (and it won't cause code generation issues due to
- // function based code generation).
- if (TargetDecl->hasAttr<AlwaysInlineAttr>() &&
- TargetDecl->hasAttr<TargetAttr>())
- checkTargetFeatures(E, FD);
-
CalleeType = getContext().getCanonicalType(CalleeType);
auto PointeeType = cast<PointerType>(CalleeType)->getPointeeType();
@@ -4688,7 +4803,8 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, const CGCallee &OrigCallee
llvm::Constant *StaticData[] = {EmitCheckSourceLocation(E->getBeginLoc()),
EmitCheckTypeDescriptor(CalleeType)};
EmitCheck(std::make_pair(CalleeRTTIMatch, SanitizerKind::Function),
- SanitizerHandler::FunctionTypeMismatch, StaticData, CalleePtr);
+ SanitizerHandler::FunctionTypeMismatch, StaticData,
+ {CalleePtr, CalleeRTTI, FTRTTIConst});
Builder.CreateBr(Cont);
EmitBlock(Cont);
@@ -4768,7 +4884,7 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, const CGCallee &OrigCallee
E->getDirectCallee(), /*ParamsToSkip*/ 0, Order);
const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeFreeFunctionCall(
- Args, FnType, /*isChainCall=*/Chain);
+ Args, FnType, /*ChainCall=*/Chain);
// C99 6.5.2.2p6:
// If the expression that denotes the called function has a type
@@ -4799,7 +4915,19 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, const CGCallee &OrigCallee
Callee.setFunctionPointer(CalleePtr);
}
- return EmitCall(FnInfo, Callee, ReturnValue, Args, nullptr, E->getExprLoc());
+ llvm::CallBase *CallOrInvoke = nullptr;
+ RValue Call = EmitCall(FnInfo, Callee, ReturnValue, Args, &CallOrInvoke,
+ E->getExprLoc());
+
+ // Generate function declaration DISuprogram in order to be used
+ // in debug info about call sites.
+ if (CGDebugInfo *DI = getDebugInfo()) {
+ if (auto *CalleeDecl = dyn_cast_or_null<FunctionDecl>(TargetDecl))
+ DI->EmitFuncDeclForCallSite(CallOrInvoke, QualType(FnType, 0),
+ CalleeDecl);
+ }
+
+ return Call;
}
LValue CodeGenFunction::
diff --git a/lib/CodeGen/CGExprAgg.cpp b/lib/CodeGen/CGExprAgg.cpp
index db49b3f28a59..695facd50b67 100644
--- a/lib/CodeGen/CGExprAgg.cpp
+++ b/lib/CodeGen/CGExprAgg.cpp
@@ -1,9 +1,8 @@
//===--- CGExprAgg.cpp - Emit LLVM Code from Aggregate Expressions --------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -166,10 +165,11 @@ public:
void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E);
void VisitNoInitExpr(NoInitExpr *E) { } // Do nothing.
void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
+ CodeGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE);
Visit(DAE->getExpr());
}
void VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
- CodeGenFunction::CXXDefaultInitExprScope Scope(CGF);
+ CodeGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE);
Visit(DIE->getExpr());
}
void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E);
@@ -711,6 +711,25 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
break;
}
+ case CK_LValueToRValueBitCast: {
+ if (Dest.isIgnored()) {
+ CGF.EmitAnyExpr(E->getSubExpr(), AggValueSlot::ignored(),
+ /*ignoreResult=*/true);
+ break;
+ }
+
+ LValue SourceLV = CGF.EmitLValue(E->getSubExpr());
+ Address SourceAddress =
+ Builder.CreateElementBitCast(SourceLV.getAddress(), CGF.Int8Ty);
+ Address DestAddress =
+ Builder.CreateElementBitCast(Dest.getAddress(), CGF.Int8Ty);
+ llvm::Value *SizeVal = llvm::ConstantInt::get(
+ CGF.SizeTy,
+ CGF.getContext().getTypeSizeInChars(E->getType()).getQuantity());
+ Builder.CreateMemCpy(DestAddress, SourceAddress, SizeVal);
+ break;
+ }
+
case CK_DerivedToBase:
case CK_BaseToDerived:
case CK_UncheckedDerivedToBase: {
@@ -760,8 +779,7 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
// Build a GEP to refer to the subobject.
Address valueAddr =
- CGF.Builder.CreateStructGEP(valueDest.getAddress(), 0,
- CharUnits());
+ CGF.Builder.CreateStructGEP(valueDest.getAddress(), 0);
valueDest = AggValueSlot::forAddr(valueAddr,
valueDest.getQualifiers(),
valueDest.isExternallyDestructed(),
@@ -781,11 +799,12 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
CGF.CreateAggTemp(atomicType, "atomic-to-nonatomic.temp");
CGF.EmitAggExpr(E->getSubExpr(), atomicSlot);
- Address valueAddr =
- Builder.CreateStructGEP(atomicSlot.getAddress(), 0, CharUnits());
+ Address valueAddr = Builder.CreateStructGEP(atomicSlot.getAddress(), 0);
RValue rvalue = RValue::getAggregate(valueAddr, atomicSlot.isVolatile());
return EmitFinalDestCopy(valueType, rvalue);
}
+ case CK_AddressSpaceConversion:
+ return Visit(E->getSubExpr());
case CK_LValueToRValue:
// If we're loading from a volatile type, force the destination
@@ -797,6 +816,7 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
LLVM_FALLTHROUGH;
+
case CK_NoOp:
case CK_UserDefinedConversion:
case CK_ConstructorConversion:
@@ -852,10 +872,12 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
case CK_CopyAndAutoreleaseBlockObject:
case CK_BuiltinFnToFnPtr:
case CK_ZeroToOCLOpaqueType:
- case CK_AddressSpaceConversion:
+
case CK_IntToOCLSampler:
case CK_FixedPointCast:
case CK_FixedPointToBoolean:
+ case CK_FixedPointToIntegral:
+ case CK_IntegralToFixedPoint:
llvm_unreachable("cast kind invalid for aggregate types");
}
}
@@ -1264,7 +1286,52 @@ void AggExprEmitter::VisitCXXInheritedCtorInitExpr(
void
AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) {
AggValueSlot Slot = EnsureSlot(E->getType());
- CGF.EmitLambdaExpr(E, Slot);
+ LValue SlotLV = CGF.MakeAddrLValue(Slot.getAddress(), E->getType());
+
+ // We'll need to enter cleanup scopes in case any of the element
+ // initializers throws an exception.
+ SmallVector<EHScopeStack::stable_iterator, 16> Cleanups;
+ llvm::Instruction *CleanupDominator = nullptr;
+
+ CXXRecordDecl::field_iterator CurField = E->getLambdaClass()->field_begin();
+ for (LambdaExpr::const_capture_init_iterator i = E->capture_init_begin(),
+ e = E->capture_init_end();
+ i != e; ++i, ++CurField) {
+ // Emit initialization
+ LValue LV = CGF.EmitLValueForFieldInitialization(SlotLV, *CurField);
+ if (CurField->hasCapturedVLAType()) {
+ CGF.EmitLambdaVLACapture(CurField->getCapturedVLAType(), LV);
+ continue;
+ }
+
+ EmitInitializationToLValue(*i, LV);
+
+ // Push a destructor if necessary.
+ if (QualType::DestructionKind DtorKind =
+ CurField->getType().isDestructedType()) {
+ assert(LV.isSimple());
+ if (CGF.needsEHCleanup(DtorKind)) {
+ if (!CleanupDominator)
+ CleanupDominator = CGF.Builder.CreateAlignedLoad(
+ CGF.Int8Ty,
+ llvm::Constant::getNullValue(CGF.Int8PtrTy),
+ CharUnits::One()); // placeholder
+
+ CGF.pushDestroy(EHCleanup, LV.getAddress(), CurField->getType(),
+ CGF.getDestroyer(DtorKind), false);
+ Cleanups.push_back(CGF.EHStack.stable_begin());
+ }
+ }
+ }
+
+ // Deactivate all the partial cleanups in reverse order, which
+ // generally means popping them.
+ for (unsigned i = Cleanups.size(); i != 0; --i)
+ CGF.DeactivateCleanupBlock(Cleanups[i-1], CleanupDominator);
+
+ // Destroy the placeholder if we made one.
+ if (CleanupDominator)
+ CleanupDominator->eraseFromParent();
}
void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
@@ -1304,7 +1371,8 @@ static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF) {
// (int*)0 - Null pointer expressions.
if (const CastExpr *ICE = dyn_cast<CastExpr>(E))
return ICE->getCastKind() == CK_NullToPointer &&
- CGF.getTypes().isPointerZeroInitializable(E->getType());
+ CGF.getTypes().isPointerZeroInitializable(E->getType()) &&
+ !E->HasSideEffects(CGF.getContext());
// '\0'
if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E))
return CL->getValue() == 0;
@@ -1445,7 +1513,7 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased,
- CGF.overlapForBaseInit(CXXRD, BaseRD, Base.isVirtual()));
+ CGF.getOverlapForBaseInit(CXXRD, BaseRD, Base.isVirtual()));
CGF.EmitAggExpr(E->getInit(curInitIndex++), AggSlot);
if (QualType::DestructionKind dtorKind =
@@ -1797,15 +1865,32 @@ LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {
return LV;
}
-AggValueSlot::Overlap_t CodeGenFunction::overlapForBaseInit(
+AggValueSlot::Overlap_t
+CodeGenFunction::getOverlapForFieldInit(const FieldDecl *FD) {
+ if (!FD->hasAttr<NoUniqueAddressAttr>() || !FD->getType()->isRecordType())
+ return AggValueSlot::DoesNotOverlap;
+
+ // If the field lies entirely within the enclosing class's nvsize, its tail
+ // padding cannot overlap any already-initialized object. (The only subobjects
+ // with greater addresses that might already be initialized are vbases.)
+ const RecordDecl *ClassRD = FD->getParent();
+ const ASTRecordLayout &Layout = getContext().getASTRecordLayout(ClassRD);
+ if (Layout.getFieldOffset(FD->getFieldIndex()) +
+ getContext().getTypeSize(FD->getType()) <=
+ (uint64_t)getContext().toBits(Layout.getNonVirtualSize()))
+ return AggValueSlot::DoesNotOverlap;
+
+ // The tail padding may contain values we need to preserve.
+ return AggValueSlot::MayOverlap;
+}
+
+AggValueSlot::Overlap_t CodeGenFunction::getOverlapForBaseInit(
const CXXRecordDecl *RD, const CXXRecordDecl *BaseRD, bool IsVirtual) {
- // Virtual bases are initialized first, in address order, so there's never
- // any overlap during their initialization.
- //
- // FIXME: Under P0840, this is no longer true: the tail padding of a vbase
- // of a field could be reused by a vbase of a containing class.
+ // If the most-derived object is a field declared with [[no_unique_address]],
+ // the tail padding of any virtual base could be reused for other subobjects
+ // of that field's class.
if (IsVirtual)
- return AggValueSlot::DoesNotOverlap;
+ return AggValueSlot::MayOverlap;
// If the base class is laid out entirely within the nvsize of the derived
// class, its tail padding cannot yet be initialized, so we can issue
diff --git a/lib/CodeGen/CGExprCXX.cpp b/lib/CodeGen/CGExprCXX.cpp
index 884ce96859c5..5476d13b7c46 100644
--- a/lib/CodeGen/CGExprCXX.cpp
+++ b/lib/CodeGen/CGExprCXX.cpp
@@ -1,9 +1,8 @@
//===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -11,15 +10,15 @@
//
//===----------------------------------------------------------------------===//
-#include "CodeGenFunction.h"
#include "CGCUDARuntime.h"
#include "CGCXXABI.h"
#include "CGDebugInfo.h"
#include "CGObjCRuntime.h"
+#include "CodeGenFunction.h"
#include "ConstantEmitter.h"
+#include "TargetInfo.h"
#include "clang/Basic/CodeGenOptions.h"
#include "clang/CodeGen/CGFunctionInfo.h"
-#include "llvm/IR/CallSite.h"
#include "llvm/IR/Intrinsics.h"
using namespace clang;
@@ -42,13 +41,11 @@ commonEmitCXXMemberOrOperatorCall(CodeGenFunction &CGF, const CXXMethodDecl *MD,
isa<CXXOperatorCallExpr>(CE));
assert(MD->isInstance() &&
"Trying to emit a member or operator call expr on a static method!");
- ASTContext &C = CGF.getContext();
// Push the this ptr.
const CXXRecordDecl *RD =
CGF.CGM.getCXXABI().getThisArgumentTypeForMethod(MD);
- Args.add(RValue::get(This),
- RD ? C.getPointerType(C.getTypeDeclType(RD)) : C.VoidPtrTy);
+ Args.add(RValue::get(This), CGF.getTypes().DeriveThisType(RD, MD));
// If there is an implicit parameter (e.g. VTT), emit it.
if (ImplicitParam) {
@@ -56,7 +53,7 @@ commonEmitCXXMemberOrOperatorCall(CodeGenFunction &CGF, const CXXMethodDecl *MD,
}
const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
- RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, Args.size(), MD);
+ RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, Args.size());
unsigned PrefixSize = Args.size() - 1;
// And the rest of the call args.
@@ -94,14 +91,28 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorCall(
}
RValue CodeGenFunction::EmitCXXDestructorCall(
- const CXXDestructorDecl *DD, const CGCallee &Callee, llvm::Value *This,
- llvm::Value *ImplicitParam, QualType ImplicitParamTy, const CallExpr *CE,
- StructorType Type) {
+ GlobalDecl Dtor, const CGCallee &Callee, llvm::Value *This, QualType ThisTy,
+ llvm::Value *ImplicitParam, QualType ImplicitParamTy, const CallExpr *CE) {
+ const CXXMethodDecl *DtorDecl = cast<CXXMethodDecl>(Dtor.getDecl());
+
+ assert(!ThisTy.isNull());
+ assert(ThisTy->getAsCXXRecordDecl() == DtorDecl->getParent() &&
+ "Pointer/Object mixup");
+
+ LangAS SrcAS = ThisTy.getAddressSpace();
+ LangAS DstAS = DtorDecl->getMethodQualifiers().getAddressSpace();
+ if (SrcAS != DstAS) {
+ QualType DstTy = DtorDecl->getThisType();
+ llvm::Type *NewType = CGM.getTypes().ConvertType(DstTy);
+ This = getTargetHooks().performAddrSpaceCast(*this, This, SrcAS, DstAS,
+ NewType);
+ }
+
CallArgList Args;
- commonEmitCXXMemberOrOperatorCall(*this, DD, This, ImplicitParam,
+ commonEmitCXXMemberOrOperatorCall(*this, DtorDecl, This, ImplicitParam,
ImplicitParamTy, CE, Args, nullptr);
- return EmitCall(CGM.getTypes().arrangeCXXStructorDeclaration(DD, Type),
- Callee, ReturnValueSlot(), Args);
+ return EmitCall(CGM.getTypes().arrangeCXXStructorDeclaration(Dtor), Callee,
+ ReturnValueSlot(), Args);
}
RValue CodeGenFunction::EmitCXXPseudoDestructorExpr(
@@ -253,13 +264,25 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
This = EmitLValue(Base);
}
+ if (const CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(MD)) {
+ // This is the MSVC p->Ctor::Ctor(...) extension. We assume that's
+ // constructing a new complete object of type Ctor.
+ assert(!RtlArgs);
+ assert(ReturnValue.isNull() && "Constructor shouldn't have return value");
+ CallArgList Args;
+ commonEmitCXXMemberOrOperatorCall(
+ *this, Ctor, This.getPointer(), /*ImplicitParam=*/nullptr,
+ /*ImplicitParamTy=*/QualType(), CE, Args, nullptr);
+
+ EmitCXXConstructorCall(Ctor, Ctor_Complete, /*ForVirtualBase=*/false,
+ /*Delegating=*/false, This.getAddress(), Args,
+ AggValueSlot::DoesNotOverlap, CE->getExprLoc(),
+ /*NewPointerIsChecked=*/false);
+ return RValue::get(nullptr);
+ }
if (MD->isTrivial() || (MD->isDefaulted() && MD->getParent()->isUnion())) {
if (isa<CXXDestructorDecl>(MD)) return RValue::get(nullptr);
- if (isa<CXXConstructorDecl>(MD) &&
- cast<CXXConstructorDecl>(MD)->isDefaultConstructor())
- return RValue::get(nullptr);
-
if (!MD->getParent()->mayInsertExtraPadding()) {
if (MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) {
// We don't like to generate the trivial copy/move assignment operator
@@ -272,20 +295,6 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
EmitAggregateAssign(This, RHS, CE->getType());
return RValue::get(This.getPointer());
}
-
- if (isa<CXXConstructorDecl>(MD) &&
- cast<CXXConstructorDecl>(MD)->isCopyOrMoveConstructor()) {
- // Trivial move and copy ctor are the same.
- assert(CE->getNumArgs() == 1 && "unexpected argcount for trivial ctor");
- const Expr *Arg = *CE->arg_begin();
- LValue RHS = EmitLValue(Arg);
- LValue Dest = MakeAddrLValue(This.getAddress(), Arg->getType());
- // This is the MSVC p->Ctor::Ctor(...) extension. We assume that's
- // constructing a new complete object of type Ctor.
- EmitAggregateCopy(Dest, RHS, Arg->getType(),
- AggValueSlot::DoesNotOverlap);
- return RValue::get(This.getPointer());
- }
llvm_unreachable("unknown trivial member function");
}
}
@@ -296,10 +305,7 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
const CGFunctionInfo *FInfo = nullptr;
if (const auto *Dtor = dyn_cast<CXXDestructorDecl>(CalleeDecl))
FInfo = &CGM.getTypes().arrangeCXXStructorDeclaration(
- Dtor, StructorType::Complete);
- else if (const auto *Ctor = dyn_cast<CXXConstructorDecl>(CalleeDecl))
- FInfo = &CGM.getTypes().arrangeCXXStructorDeclaration(
- Ctor, StructorType::Complete);
+ GlobalDecl(Dtor, Dtor_Complete));
else
FInfo = &CGM.getTypes().arrangeCXXMethodDeclaration(CalleeDecl);
@@ -322,14 +328,9 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
if (IsImplicitObjectCXXThis || isa<DeclRefExpr>(IOA))
SkippedChecks.set(SanitizerKind::Null, true);
}
- EmitTypeCheck(
- isa<CXXConstructorDecl>(CalleeDecl) ? CodeGenFunction::TCK_ConstructorCall
- : CodeGenFunction::TCK_MemberCall,
- CallLoc, This.getPointer(), C.getRecordType(CalleeDecl->getParent()),
- /*Alignment=*/CharUnits::Zero(), SkippedChecks);
-
- // FIXME: Uses of 'MD' past this point need to be audited. We may need to use
- // 'CalleeDecl' instead.
+ EmitTypeCheck(CodeGenFunction::TCK_MemberCall, CallLoc, This.getPointer(),
+ C.getRecordType(CalleeDecl->getParent()),
+ /*Alignment=*/CharUnits::Zero(), SkippedChecks);
// C++ [class.virtual]p12:
// Explicit qualification with the scope operator (5.1) suppresses the
@@ -339,7 +340,7 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
// because then we know what the type is.
bool UseVirtualCall = CanUseVirtualCall && !DevirtualizedMethod;
- if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(MD)) {
+ if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(CalleeDecl)) {
assert(CE->arg_begin() == CE->arg_end() &&
"Destructor shouldn't have explicit parameters");
assert(ReturnValue.isNull() && "Destructor shouldn't have return value");
@@ -348,33 +349,31 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
*this, Dtor, Dtor_Complete, This.getAddress(),
cast<CXXMemberCallExpr>(CE));
} else {
+ GlobalDecl GD(Dtor, Dtor_Complete);
CGCallee Callee;
- if (getLangOpts().AppleKext && MD->isVirtual() && HasQualifier)
- Callee = BuildAppleKextVirtualCall(MD, Qualifier, Ty);
+ if (getLangOpts().AppleKext && Dtor->isVirtual() && HasQualifier)
+ Callee = BuildAppleKextVirtualCall(Dtor, Qualifier, Ty);
else if (!DevirtualizedMethod)
- Callee = CGCallee::forDirect(
- CGM.getAddrOfCXXStructor(Dtor, StructorType::Complete, FInfo, Ty),
- GlobalDecl(Dtor, Dtor_Complete));
+ Callee =
+ CGCallee::forDirect(CGM.getAddrOfCXXStructor(GD, FInfo, Ty), GD);
else {
- const CXXDestructorDecl *DDtor =
- cast<CXXDestructorDecl>(DevirtualizedMethod);
- Callee = CGCallee::forDirect(
- CGM.GetAddrOfFunction(GlobalDecl(DDtor, Dtor_Complete), Ty),
- GlobalDecl(DDtor, Dtor_Complete));
+ Callee = CGCallee::forDirect(CGM.GetAddrOfFunction(GD, Ty), GD);
}
- EmitCXXMemberOrOperatorCall(
- CalleeDecl, Callee, ReturnValue, This.getPointer(),
- /*ImplicitParam=*/nullptr, QualType(), CE, nullptr);
+
+ QualType ThisTy =
+ IsArrow ? Base->getType()->getPointeeType() : Base->getType();
+ EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy,
+ /*ImplicitParam=*/nullptr,
+ /*ImplicitParamTy=*/QualType(), nullptr);
}
return RValue::get(nullptr);
}
+ // FIXME: Uses of 'MD' past this point need to be audited. We may need to use
+ // 'CalleeDecl' instead.
+
CGCallee Callee;
- if (const CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(MD)) {
- Callee = CGCallee::forDirect(
- CGM.GetAddrOfFunction(GlobalDecl(Ctor, Ctor_Complete), Ty),
- GlobalDecl(Ctor, Ctor_Complete));
- } else if (UseVirtualCall) {
+ if (UseVirtualCall) {
Callee = CGCallee::forVirtual(CE, MD, This.getAddress(), Ty);
} else {
if (SanOpts.has(SanitizerKind::CFINVCall) &&
@@ -454,8 +453,7 @@ CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
// Push the this ptr.
Args.add(RValue::get(ThisPtrForCall), ThisType);
- RequiredArgs required =
- RequiredArgs::forPrototypePlus(FPT, 1, /*FD=*/nullptr);
+ RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, 1);
// And the rest of the call args
EmitCallArgs(Args, FPT, E->arguments());
@@ -633,12 +631,10 @@ CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
case CXXConstructExpr::CK_NonVirtualBase:
Type = Ctor_Base;
- }
+ }
- // Call the constructor.
- EmitCXXConstructorCall(CD, Type, ForVirtualBase, Delegating,
- Dest.getAddress(), E, Dest.mayOverlap(),
- Dest.isSanitizerChecked());
+ // Call the constructor.
+ EmitCXXConstructorCall(CD, Type, ForVirtualBase, Delegating, Dest, E);
}
}
@@ -702,9 +698,9 @@ static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
// We multiply the size of all dimensions for NumElements.
// e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6.
numElements =
- ConstantEmitter(CGF).tryEmitAbstract(e->getArraySize(), e->getType());
+ ConstantEmitter(CGF).tryEmitAbstract(*e->getArraySize(), e->getType());
if (!numElements)
- numElements = CGF.EmitScalarExpr(e->getArraySize());
+ numElements = CGF.EmitScalarExpr(*e->getArraySize());
assert(isa<llvm::IntegerType>(numElements->getType()));
// The number of elements can be have an arbitrary integer type;
@@ -714,7 +710,7 @@ static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
// important way: if the count is negative, it's an error even if
// the cookie size would bring the total size >= 0.
bool isSigned
- = e->getArraySize()->getType()->isSignedIntegerOrEnumerationType();
+ = (*e->getArraySize())->getType()->isSignedIntegerOrEnumerationType();
llvm::IntegerType *numElementsType
= cast<llvm::IntegerType>(numElements->getType());
unsigned numElementsWidth = numElementsType->getBitWidth();
@@ -866,7 +862,7 @@ static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
// can be ignored because the result shouldn't be used if
// allocation fails.
if (typeSizeMultiplier != 1) {
- llvm::Value *umul_with_overflow
+ llvm::Function *umul_with_overflow
= CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, CGF.SizeTy);
llvm::Value *tsmV =
@@ -906,7 +902,7 @@ static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
if (cookieSize != 0) {
sizeWithoutCookie = size;
- llvm::Value *uadd_with_overflow
+ llvm::Function *uadd_with_overflow
= CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, CGF.SizeTy);
llvm::Value *cookieSizeV = llvm::ConstantInt::get(CGF.SizeTy, cookieSize);
@@ -1293,12 +1289,12 @@ static RValue EmitNewDeleteCall(CodeGenFunction &CGF,
const FunctionDecl *CalleeDecl,
const FunctionProtoType *CalleeType,
const CallArgList &Args) {
- llvm::Instruction *CallOrInvoke;
+ llvm::CallBase *CallOrInvoke;
llvm::Constant *CalleePtr = CGF.CGM.GetAddrOfFunction(CalleeDecl);
CGCallee Callee = CGCallee::forDirect(CalleePtr, GlobalDecl(CalleeDecl));
RValue RV =
CGF.EmitCall(CGF.CGM.getTypes().arrangeFreeFunctionCall(
- Args, CalleeType, /*chainCall=*/false),
+ Args, CalleeType, /*ChainCall=*/false),
Callee, ReturnValueSlot(), Args, &CallOrInvoke);
/// C++1y [expr.new]p10:
@@ -1309,15 +1305,8 @@ static RValue EmitNewDeleteCall(CodeGenFunction &CGF,
llvm::Function *Fn = dyn_cast<llvm::Function>(CalleePtr);
if (CalleeDecl->isReplaceableGlobalAllocationFunction() &&
Fn && Fn->hasFnAttribute(llvm::Attribute::NoBuiltin)) {
- // FIXME: Add addAttribute to CallSite.
- if (llvm::CallInst *CI = dyn_cast<llvm::CallInst>(CallOrInvoke))
- CI->addAttribute(llvm::AttributeList::FunctionIndex,
- llvm::Attribute::Builtin);
- else if (llvm::InvokeInst *II = dyn_cast<llvm::InvokeInst>(CallOrInvoke))
- II->addAttribute(llvm::AttributeList::FunctionIndex,
- llvm::Attribute::Builtin);
- else
- llvm_unreachable("unexpected kind of call instruction");
+ CallOrInvoke->addAttribute(llvm::AttributeList::FunctionIndex,
+ llvm::Attribute::Builtin);
}
return RV;
@@ -1715,10 +1704,16 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
result.getAlignment());
// Emit sanitizer checks for pointer value now, so that in the case of an
- // array it was checked only once and not at each constructor call.
+ // array it was checked only once and not at each constructor call. We may
+ // have already checked that the pointer is non-null.
+ // FIXME: If we have an array cookie and a potentially-throwing allocator,
+ // we'll null check the wrong pointer here.
+ SanitizerSet SkippedChecks;
+ SkippedChecks.set(SanitizerKind::Null, nullCheck);
EmitTypeCheck(CodeGenFunction::TCK_ConstructorCall,
- E->getAllocatedTypeSourceInfo()->getTypeLoc().getBeginLoc(),
- result.getPointer(), allocType);
+ E->getAllocatedTypeSourceInfo()->getTypeLoc().getBeginLoc(),
+ result.getPointer(), allocType, result.getAlignment(),
+ SkippedChecks, numElements);
EmitNewInitializer(*this, E, allocType, elementTy, result, numElements,
allocSizeWithoutCookie);
@@ -1905,7 +1900,7 @@ static void EmitObjectDelete(CodeGenFunction &CGF,
CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
/*ForVirtualBase=*/false,
/*Delegating=*/false,
- Ptr);
+ Ptr, ElementType);
else if (auto Lifetime = ElementType.getObjCLifetime()) {
switch (Lifetime) {
case Qualifiers::OCL_None:
@@ -2253,21 +2248,3 @@ llvm::Value *CodeGenFunction::EmitDynamicCast(Address ThisAddr,
return Value;
}
-
-void CodeGenFunction::EmitLambdaExpr(const LambdaExpr *E, AggValueSlot Slot) {
- LValue SlotLV = MakeAddrLValue(Slot.getAddress(), E->getType());
-
- CXXRecordDecl::field_iterator CurField = E->getLambdaClass()->field_begin();
- for (LambdaExpr::const_capture_init_iterator i = E->capture_init_begin(),
- e = E->capture_init_end();
- i != e; ++i, ++CurField) {
- // Emit initialization
- LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
- if (CurField->hasCapturedVLAType()) {
- auto VAT = CurField->getCapturedVLAType();
- EmitStoreThroughLValue(RValue::get(VLASizeMap[VAT->getSizeExpr()]), LV);
- } else {
- EmitInitializerForField(*CurField, LV, *i);
- }
- }
-}
diff --git a/lib/CodeGen/CGExprComplex.cpp b/lib/CodeGen/CGExprComplex.cpp
index 2db693b44c90..6a5fb45ba259 100644
--- a/lib/CodeGen/CGExprComplex.cpp
+++ b/lib/CodeGen/CGExprComplex.cpp
@@ -1,9 +1,8 @@
//===--- CGExprComplex.cpp - Emit LLVM Code for Complex Exprs -------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -214,10 +213,11 @@ public:
return Visit(E->getSubExpr());
}
ComplexPairTy VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
+ CodeGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE);
return Visit(DAE->getExpr());
}
ComplexPairTy VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
- CodeGenFunction::CXXDefaultInitExprScope Scope(CGF);
+ CodeGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE);
return Visit(DIE->getExpr());
}
ComplexPairTy VisitExprWithCleanups(ExprWithCleanups *E) {
@@ -328,15 +328,12 @@ public:
Address CodeGenFunction::emitAddrOfRealComponent(Address addr,
QualType complexType) {
- CharUnits offset = CharUnits::Zero();
- return Builder.CreateStructGEP(addr, 0, offset, addr.getName() + ".realp");
+ return Builder.CreateStructGEP(addr, 0, addr.getName() + ".realp");
}
Address CodeGenFunction::emitAddrOfImagComponent(Address addr,
QualType complexType) {
- QualType eltType = complexType->castAs<ComplexType>()->getElementType();
- CharUnits offset = getContext().getTypeSizeInChars(eltType);
- return Builder.CreateStructGEP(addr, 1, offset, addr.getName() + ".imagp");
+ return Builder.CreateStructGEP(addr, 1, addr.getName() + ".imagp");
}
/// EmitLoadOfLValue - Given an RValue reference for a complex, emit code to
@@ -467,6 +464,15 @@ ComplexPairTy ComplexExprEmitter::EmitCast(CastKind CK, Expr *Op,
return EmitLoadOfLValue(CGF.MakeAddrLValue(V, DestTy), Op->getExprLoc());
}
+ case CK_LValueToRValueBitCast: {
+ LValue SourceLVal = CGF.EmitLValue(Op);
+ Address Addr = Builder.CreateElementBitCast(SourceLVal.getAddress(),
+ CGF.ConvertTypeForMem(DestTy));
+ LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
+ DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
+ return EmitLoadOfLValue(DestLV, Op->getExprLoc());
+ }
+
case CK_BitCast:
case CK_BaseToDerived:
case CK_DerivedToBase:
@@ -513,6 +519,8 @@ ComplexPairTy ComplexExprEmitter::EmitCast(CastKind CK, Expr *Op,
case CK_IntToOCLSampler:
case CK_FixedPointCast:
case CK_FixedPointToBoolean:
+ case CK_FixedPointToIntegral:
+ case CK_IntegralToFixedPoint:
llvm_unreachable("invalid cast kind for complex value");
case CK_FloatingRealToComplex:
@@ -628,12 +636,13 @@ ComplexPairTy ComplexExprEmitter::EmitComplexBinOpLibCall(StringRef LibCallName,
Args, cast<FunctionType>(FQTy.getTypePtr()), false);
llvm::FunctionType *FTy = CGF.CGM.getTypes().GetFunctionType(FuncInfo);
- llvm::Constant *Func = CGF.CGM.CreateBuiltinFunction(FTy, LibCallName);
+ llvm::FunctionCallee Func = CGF.CGM.CreateRuntimeFunction(
+ FTy, LibCallName, llvm::AttributeList(), true);
CGCallee Callee = CGCallee::forDirect(Func, FQTy->getAs<FunctionProtoType>());
- llvm::Instruction *Call;
+ llvm::CallBase *Call;
RValue Res = CGF.EmitCall(FuncInfo, Callee, ReturnValueSlot(), Args, &Call);
- cast<llvm::CallInst>(Call)->setCallingConv(CGF.CGM.getRuntimeCC());
+ Call->setCallingConv(CGF.CGM.getRuntimeCC());
return Res.getComplexVal();
}
diff --git a/lib/CodeGen/CGExprConstant.cpp b/lib/CodeGen/CGExprConstant.cpp
index c9475840aeeb..31cf2aef1ba0 100644
--- a/lib/CodeGen/CGExprConstant.cpp
+++ b/lib/CodeGen/CGExprConstant.cpp
@@ -1,9 +1,8 @@
//===--- CGExprConstant.cpp - Emit LLVM Code from Constant Expressions ----===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -23,6 +22,8 @@
#include "clang/AST/RecordLayout.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/Basic/Builtins.h"
+#include "llvm/ADT/Sequence.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Function.h"
@@ -31,345 +32,637 @@ using namespace clang;
using namespace CodeGen;
//===----------------------------------------------------------------------===//
-// ConstStructBuilder
+// ConstantAggregateBuilder
//===----------------------------------------------------------------------===//
namespace {
class ConstExprEmitter;
-class ConstStructBuilder {
- CodeGenModule &CGM;
- ConstantEmitter &Emitter;
-
- bool Packed;
- CharUnits NextFieldOffsetInChars;
- CharUnits LLVMStructAlignment;
- SmallVector<llvm::Constant *, 32> Elements;
-public:
- static llvm::Constant *BuildStruct(ConstantEmitter &Emitter,
- ConstExprEmitter *ExprEmitter,
- llvm::Constant *Base,
- InitListExpr *Updater,
- QualType ValTy);
- static llvm::Constant *BuildStruct(ConstantEmitter &Emitter,
- InitListExpr *ILE, QualType StructTy);
- static llvm::Constant *BuildStruct(ConstantEmitter &Emitter,
- const APValue &Value, QualType ValTy);
-
-private:
- ConstStructBuilder(ConstantEmitter &emitter)
- : CGM(emitter.CGM), Emitter(emitter), Packed(false),
- NextFieldOffsetInChars(CharUnits::Zero()),
- LLVMStructAlignment(CharUnits::One()) { }
-
- void AppendField(const FieldDecl *Field, uint64_t FieldOffset,
- llvm::Constant *InitExpr);
-
- void AppendBytes(CharUnits FieldOffsetInChars, llvm::Constant *InitCst);
-
- void AppendBitField(const FieldDecl *Field, uint64_t FieldOffset,
- llvm::ConstantInt *InitExpr);
-
- void AppendPadding(CharUnits PadSize);
- void AppendTailPadding(CharUnits RecordSize);
-
- void ConvertStructToPacked();
+struct ConstantAggregateBuilderUtils {
+ CodeGenModule &CGM;
- bool Build(InitListExpr *ILE);
- bool Build(ConstExprEmitter *Emitter, llvm::Constant *Base,
- InitListExpr *Updater);
- bool Build(const APValue &Val, const RecordDecl *RD, bool IsPrimaryBase,
- const CXXRecordDecl *VTableClass, CharUnits BaseOffset);
- llvm::Constant *Finalize(QualType Ty);
+ ConstantAggregateBuilderUtils(CodeGenModule &CGM) : CGM(CGM) {}
CharUnits getAlignment(const llvm::Constant *C) const {
- if (Packed) return CharUnits::One();
return CharUnits::fromQuantity(
CGM.getDataLayout().getABITypeAlignment(C->getType()));
}
- CharUnits getSizeInChars(const llvm::Constant *C) const {
- return CharUnits::fromQuantity(
- CGM.getDataLayout().getTypeAllocSize(C->getType()));
+ CharUnits getSize(llvm::Type *Ty) const {
+ return CharUnits::fromQuantity(CGM.getDataLayout().getTypeAllocSize(Ty));
}
-};
-
-void ConstStructBuilder::
-AppendField(const FieldDecl *Field, uint64_t FieldOffset,
- llvm::Constant *InitCst) {
- const ASTContext &Context = CGM.getContext();
-
- CharUnits FieldOffsetInChars = Context.toCharUnitsFromBits(FieldOffset);
-
- AppendBytes(FieldOffsetInChars, InitCst);
-}
-
-void ConstStructBuilder::
-AppendBytes(CharUnits FieldOffsetInChars, llvm::Constant *InitCst) {
-
- assert(NextFieldOffsetInChars <= FieldOffsetInChars
- && "Field offset mismatch!");
-
- CharUnits FieldAlignment = getAlignment(InitCst);
-
- // Round up the field offset to the alignment of the field type.
- CharUnits AlignedNextFieldOffsetInChars =
- NextFieldOffsetInChars.alignTo(FieldAlignment);
- if (AlignedNextFieldOffsetInChars < FieldOffsetInChars) {
- // We need to append padding.
- AppendPadding(FieldOffsetInChars - NextFieldOffsetInChars);
-
- assert(NextFieldOffsetInChars == FieldOffsetInChars &&
- "Did not add enough padding!");
+ CharUnits getSize(const llvm::Constant *C) const {
+ return getSize(C->getType());
+ }
- AlignedNextFieldOffsetInChars =
- NextFieldOffsetInChars.alignTo(FieldAlignment);
+ llvm::Constant *getPadding(CharUnits PadSize) const {
+ llvm::Type *Ty = CGM.Int8Ty;
+ if (PadSize > CharUnits::One())
+ Ty = llvm::ArrayType::get(Ty, PadSize.getQuantity());
+ return llvm::UndefValue::get(Ty);
}
- if (AlignedNextFieldOffsetInChars > FieldOffsetInChars) {
- assert(!Packed && "Alignment is wrong even with a packed struct!");
+ llvm::Constant *getZeroes(CharUnits ZeroSize) const {
+ llvm::Type *Ty = llvm::ArrayType::get(CGM.Int8Ty, ZeroSize.getQuantity());
+ return llvm::ConstantAggregateZero::get(Ty);
+ }
+};
- // Convert the struct to a packed struct.
- ConvertStructToPacked();
+/// Incremental builder for an llvm::Constant* holding a struct or array
+/// constant.
+class ConstantAggregateBuilder : private ConstantAggregateBuilderUtils {
+ /// The elements of the constant. These two arrays must have the same size;
+ /// Offsets[i] describes the offset of Elems[i] within the constant. The
+ /// elements are kept in increasing offset order, and we ensure that there
+ /// is no overlap: Offsets[i+1] >= Offsets[i] + getSize(Elemes[i]).
+ ///
+ /// This may contain explicit padding elements (in order to create a
+ /// natural layout), but need not. Gaps between elements are implicitly
+ /// considered to be filled with undef.
+ llvm::SmallVector<llvm::Constant*, 32> Elems;
+ llvm::SmallVector<CharUnits, 32> Offsets;
+
+ /// The size of the constant (the maximum end offset of any added element).
+ /// May be larger than the end of Elems.back() if we split the last element
+ /// and removed some trailing undefs.
+ CharUnits Size = CharUnits::Zero();
+
+ /// This is true only if laying out Elems in order as the elements of a
+ /// non-packed LLVM struct will give the correct layout.
+ bool NaturalLayout = true;
+
+ bool split(size_t Index, CharUnits Hint);
+ Optional<size_t> splitAt(CharUnits Pos);
+
+ static llvm::Constant *buildFrom(CodeGenModule &CGM,
+ ArrayRef<llvm::Constant *> Elems,
+ ArrayRef<CharUnits> Offsets,
+ CharUnits StartOffset, CharUnits Size,
+ bool NaturalLayout, llvm::Type *DesiredTy,
+ bool AllowOversized);
- // After we pack the struct, we may need to insert padding.
- if (NextFieldOffsetInChars < FieldOffsetInChars) {
- // We need to append padding.
- AppendPadding(FieldOffsetInChars - NextFieldOffsetInChars);
+public:
+ ConstantAggregateBuilder(CodeGenModule &CGM)
+ : ConstantAggregateBuilderUtils(CGM) {}
+
+ /// Update or overwrite the value starting at \p Offset with \c C.
+ ///
+ /// \param AllowOverwrite If \c true, this constant might overwrite (part of)
+ /// a constant that has already been added. This flag is only used to
+ /// detect bugs.
+ bool add(llvm::Constant *C, CharUnits Offset, bool AllowOverwrite);
+
+ /// Update or overwrite the bits starting at \p OffsetInBits with \p Bits.
+ bool addBits(llvm::APInt Bits, uint64_t OffsetInBits, bool AllowOverwrite);
+
+ /// Attempt to condense the value starting at \p Offset to a constant of type
+ /// \p DesiredTy.
+ void condense(CharUnits Offset, llvm::Type *DesiredTy);
+
+ /// Produce a constant representing the entire accumulated value, ideally of
+ /// the specified type. If \p AllowOversized, the constant might be larger
+ /// than implied by \p DesiredTy (eg, if there is a flexible array member).
+ /// Otherwise, the constant will be of exactly the same size as \p DesiredTy
+ /// even if we can't represent it as that type.
+ llvm::Constant *build(llvm::Type *DesiredTy, bool AllowOversized) const {
+ return buildFrom(CGM, Elems, Offsets, CharUnits::Zero(), Size,
+ NaturalLayout, DesiredTy, AllowOversized);
+ }
+};
- assert(NextFieldOffsetInChars == FieldOffsetInChars &&
- "Did not add enough padding!");
+template<typename Container, typename Range = std::initializer_list<
+ typename Container::value_type>>
+static void replace(Container &C, size_t BeginOff, size_t EndOff, Range Vals) {
+ assert(BeginOff <= EndOff && "invalid replacement range");
+ llvm::replace(C, C.begin() + BeginOff, C.begin() + EndOff, Vals);
+}
+
+bool ConstantAggregateBuilder::add(llvm::Constant *C, CharUnits Offset,
+ bool AllowOverwrite) {
+ // Common case: appending to a layout.
+ if (Offset >= Size) {
+ CharUnits Align = getAlignment(C);
+ CharUnits AlignedSize = Size.alignTo(Align);
+ if (AlignedSize > Offset || Offset.alignTo(Align) != Offset)
+ NaturalLayout = false;
+ else if (AlignedSize < Offset) {
+ Elems.push_back(getPadding(Offset - Size));
+ Offsets.push_back(Size);
}
- AlignedNextFieldOffsetInChars = NextFieldOffsetInChars;
+ Elems.push_back(C);
+ Offsets.push_back(Offset);
+ Size = Offset + getSize(C);
+ return true;
}
- // Add the field.
- Elements.push_back(InitCst);
- NextFieldOffsetInChars = AlignedNextFieldOffsetInChars +
- getSizeInChars(InitCst);
+ // Uncommon case: constant overlaps what we've already created.
+ llvm::Optional<size_t> FirstElemToReplace = splitAt(Offset);
+ if (!FirstElemToReplace)
+ return false;
- if (Packed)
- assert(LLVMStructAlignment == CharUnits::One() &&
- "Packed struct not byte-aligned!");
- else
- LLVMStructAlignment = std::max(LLVMStructAlignment, FieldAlignment);
+ CharUnits CSize = getSize(C);
+ llvm::Optional<size_t> LastElemToReplace = splitAt(Offset + CSize);
+ if (!LastElemToReplace)
+ return false;
+
+ assert((FirstElemToReplace == LastElemToReplace || AllowOverwrite) &&
+ "unexpectedly overwriting field");
+
+ replace(Elems, *FirstElemToReplace, *LastElemToReplace, {C});
+ replace(Offsets, *FirstElemToReplace, *LastElemToReplace, {Offset});
+ Size = std::max(Size, Offset + CSize);
+ NaturalLayout = false;
+ return true;
}
-void ConstStructBuilder::AppendBitField(const FieldDecl *Field,
- uint64_t FieldOffset,
- llvm::ConstantInt *CI) {
+bool ConstantAggregateBuilder::addBits(llvm::APInt Bits, uint64_t OffsetInBits,
+ bool AllowOverwrite) {
const ASTContext &Context = CGM.getContext();
- const uint64_t CharWidth = Context.getCharWidth();
- uint64_t NextFieldOffsetInBits = Context.toBits(NextFieldOffsetInChars);
- if (FieldOffset > NextFieldOffsetInBits) {
- // We need to add padding.
- CharUnits PadSize = Context.toCharUnitsFromBits(
- llvm::alignTo(FieldOffset - NextFieldOffsetInBits,
- Context.getTargetInfo().getCharAlign()));
+ const uint64_t CharWidth = CGM.getContext().getCharWidth();
+
+ // Offset of where we want the first bit to go within the bits of the
+ // current char.
+ unsigned OffsetWithinChar = OffsetInBits % CharWidth;
+
+ // We split bit-fields up into individual bytes. Walk over the bytes and
+ // update them.
+ for (CharUnits OffsetInChars =
+ Context.toCharUnitsFromBits(OffsetInBits - OffsetWithinChar);
+ /**/; ++OffsetInChars) {
+ // Number of bits we want to fill in this char.
+ unsigned WantedBits =
+ std::min((uint64_t)Bits.getBitWidth(), CharWidth - OffsetWithinChar);
+
+ // Get a char containing the bits we want in the right places. The other
+ // bits have unspecified values.
+ llvm::APInt BitsThisChar = Bits;
+ if (BitsThisChar.getBitWidth() < CharWidth)
+ BitsThisChar = BitsThisChar.zext(CharWidth);
+ if (CGM.getDataLayout().isBigEndian()) {
+ // Figure out how much to shift by. We may need to left-shift if we have
+ // less than one byte of Bits left.
+ int Shift = Bits.getBitWidth() - CharWidth + OffsetWithinChar;
+ if (Shift > 0)
+ BitsThisChar.lshrInPlace(Shift);
+ else if (Shift < 0)
+ BitsThisChar = BitsThisChar.shl(-Shift);
+ } else {
+ BitsThisChar = BitsThisChar.shl(OffsetWithinChar);
+ }
+ if (BitsThisChar.getBitWidth() > CharWidth)
+ BitsThisChar = BitsThisChar.trunc(CharWidth);
- AppendPadding(PadSize);
- }
+ if (WantedBits == CharWidth) {
+ // Got a full byte: just add it directly.
+ add(llvm::ConstantInt::get(CGM.getLLVMContext(), BitsThisChar),
+ OffsetInChars, AllowOverwrite);
+ } else {
+ // Partial byte: update the existing integer if there is one. If we
+ // can't split out a 1-CharUnit range to update, then we can't add
+ // these bits and fail the entire constant emission.
+ llvm::Optional<size_t> FirstElemToUpdate = splitAt(OffsetInChars);
+ if (!FirstElemToUpdate)
+ return false;
+ llvm::Optional<size_t> LastElemToUpdate =
+ splitAt(OffsetInChars + CharUnits::One());
+ if (!LastElemToUpdate)
+ return false;
+ assert(*LastElemToUpdate - *FirstElemToUpdate < 2 &&
+ "should have at most one element covering one byte");
+
+ // Figure out which bits we want and discard the rest.
+ llvm::APInt UpdateMask(CharWidth, 0);
+ if (CGM.getDataLayout().isBigEndian())
+ UpdateMask.setBits(CharWidth - OffsetWithinChar - WantedBits,
+ CharWidth - OffsetWithinChar);
+ else
+ UpdateMask.setBits(OffsetWithinChar, OffsetWithinChar + WantedBits);
+ BitsThisChar &= UpdateMask;
+
+ if (*FirstElemToUpdate == *LastElemToUpdate ||
+ Elems[*FirstElemToUpdate]->isNullValue() ||
+ isa<llvm::UndefValue>(Elems[*FirstElemToUpdate])) {
+ // All existing bits are either zero or undef.
+ add(llvm::ConstantInt::get(CGM.getLLVMContext(), BitsThisChar),
+ OffsetInChars, /*AllowOverwrite*/ true);
+ } else {
+ llvm::Constant *&ToUpdate = Elems[*FirstElemToUpdate];
+ // In order to perform a partial update, we need the existing bitwise
+ // value, which we can only extract for a constant int.
+ auto *CI = dyn_cast<llvm::ConstantInt>(ToUpdate);
+ if (!CI)
+ return false;
+ // Because this is a 1-CharUnit range, the constant occupying it must
+ // be exactly one CharUnit wide.
+ assert(CI->getBitWidth() == CharWidth && "splitAt failed");
+ assert((!(CI->getValue() & UpdateMask) || AllowOverwrite) &&
+ "unexpectedly overwriting bitfield");
+ BitsThisChar |= (CI->getValue() & ~UpdateMask);
+ ToUpdate = llvm::ConstantInt::get(CGM.getLLVMContext(), BitsThisChar);
+ }
+ }
- uint64_t FieldSize = Field->getBitWidthValue(Context);
+ // Stop if we've added all the bits.
+ if (WantedBits == Bits.getBitWidth())
+ break;
- llvm::APInt FieldValue = CI->getValue();
+ // Remove the consumed bits from Bits.
+ if (!CGM.getDataLayout().isBigEndian())
+ Bits.lshrInPlace(WantedBits);
+ Bits = Bits.trunc(Bits.getBitWidth() - WantedBits);
- // Promote the size of FieldValue if necessary
- // FIXME: This should never occur, but currently it can because initializer
- // constants are cast to bool, and because clang is not enforcing bitfield
- // width limits.
- if (FieldSize > FieldValue.getBitWidth())
- FieldValue = FieldValue.zext(FieldSize);
+ // The remanining bits go at the start of the following bytes.
+ OffsetWithinChar = 0;
+ }
- // Truncate the size of FieldValue to the bit field size.
- if (FieldSize < FieldValue.getBitWidth())
- FieldValue = FieldValue.trunc(FieldSize);
+ return true;
+}
- NextFieldOffsetInBits = Context.toBits(NextFieldOffsetInChars);
- if (FieldOffset < NextFieldOffsetInBits) {
- // Either part of the field or the entire field can go into the previous
- // byte.
- assert(!Elements.empty() && "Elements can't be empty!");
+/// Returns a position within Elems and Offsets such that all elements
+/// before the returned index end before Pos and all elements at or after
+/// the returned index begin at or after Pos. Splits elements as necessary
+/// to ensure this. Returns None if we find something we can't split.
+Optional<size_t> ConstantAggregateBuilder::splitAt(CharUnits Pos) {
+ if (Pos >= Size)
+ return Offsets.size();
+
+ while (true) {
+ auto FirstAfterPos = llvm::upper_bound(Offsets, Pos);
+ if (FirstAfterPos == Offsets.begin())
+ return 0;
+
+ // If we already have an element starting at Pos, we're done.
+ size_t LastAtOrBeforePosIndex = FirstAfterPos - Offsets.begin() - 1;
+ if (Offsets[LastAtOrBeforePosIndex] == Pos)
+ return LastAtOrBeforePosIndex;
+
+ // We found an element starting before Pos. Check for overlap.
+ if (Offsets[LastAtOrBeforePosIndex] +
+ getSize(Elems[LastAtOrBeforePosIndex]) <= Pos)
+ return LastAtOrBeforePosIndex + 1;
+
+ // Try to decompose it into smaller constants.
+ if (!split(LastAtOrBeforePosIndex, Pos))
+ return None;
+ }
+}
+
+/// Split the constant at index Index, if possible. Return true if we did.
+/// Hint indicates the location at which we'd like to split, but may be
+/// ignored.
+bool ConstantAggregateBuilder::split(size_t Index, CharUnits Hint) {
+ NaturalLayout = false;
+ llvm::Constant *C = Elems[Index];
+ CharUnits Offset = Offsets[Index];
+
+ if (auto *CA = dyn_cast<llvm::ConstantAggregate>(C)) {
+ replace(Elems, Index, Index + 1,
+ llvm::map_range(llvm::seq(0u, CA->getNumOperands()),
+ [&](unsigned Op) { return CA->getOperand(Op); }));
+ if (auto *Seq = dyn_cast<llvm::SequentialType>(CA->getType())) {
+ // Array or vector.
+ CharUnits ElemSize = getSize(Seq->getElementType());
+ replace(
+ Offsets, Index, Index + 1,
+ llvm::map_range(llvm::seq(0u, CA->getNumOperands()),
+ [&](unsigned Op) { return Offset + Op * ElemSize; }));
+ } else {
+ // Must be a struct.
+ auto *ST = cast<llvm::StructType>(CA->getType());
+ const llvm::StructLayout *Layout =
+ CGM.getDataLayout().getStructLayout(ST);
+ replace(Offsets, Index, Index + 1,
+ llvm::map_range(
+ llvm::seq(0u, CA->getNumOperands()), [&](unsigned Op) {
+ return Offset + CharUnits::fromQuantity(
+ Layout->getElementOffset(Op));
+ }));
+ }
+ return true;
+ }
- unsigned BitsInPreviousByte = NextFieldOffsetInBits - FieldOffset;
+ if (auto *CDS = dyn_cast<llvm::ConstantDataSequential>(C)) {
+ // FIXME: If possible, split into two ConstantDataSequentials at Hint.
+ CharUnits ElemSize = getSize(CDS->getElementType());
+ replace(Elems, Index, Index + 1,
+ llvm::map_range(llvm::seq(0u, CDS->getNumElements()),
+ [&](unsigned Elem) {
+ return CDS->getElementAsConstant(Elem);
+ }));
+ replace(Offsets, Index, Index + 1,
+ llvm::map_range(
+ llvm::seq(0u, CDS->getNumElements()),
+ [&](unsigned Elem) { return Offset + Elem * ElemSize; }));
+ return true;
+ }
- bool FitsCompletelyInPreviousByte =
- BitsInPreviousByte >= FieldValue.getBitWidth();
+ if (isa<llvm::ConstantAggregateZero>(C)) {
+ CharUnits ElemSize = getSize(C);
+ assert(Hint > Offset && Hint < Offset + ElemSize && "nothing to split");
+ replace(Elems, Index, Index + 1,
+ {getZeroes(Hint - Offset), getZeroes(Offset + ElemSize - Hint)});
+ replace(Offsets, Index, Index + 1, {Offset, Hint});
+ return true;
+ }
- llvm::APInt Tmp = FieldValue;
+ if (isa<llvm::UndefValue>(C)) {
+ replace(Elems, Index, Index + 1, {});
+ replace(Offsets, Index, Index + 1, {});
+ return true;
+ }
- if (!FitsCompletelyInPreviousByte) {
- unsigned NewFieldWidth = FieldSize - BitsInPreviousByte;
+ // FIXME: We could split a ConstantInt if the need ever arose.
+ // We don't need to do this to handle bit-fields because we always eagerly
+ // split them into 1-byte chunks.
- if (CGM.getDataLayout().isBigEndian()) {
- Tmp.lshrInPlace(NewFieldWidth);
- Tmp = Tmp.trunc(BitsInPreviousByte);
+ return false;
+}
- // We want the remaining high bits.
- FieldValue = FieldValue.trunc(NewFieldWidth);
- } else {
- Tmp = Tmp.trunc(BitsInPreviousByte);
+static llvm::Constant *
+EmitArrayConstant(CodeGenModule &CGM, llvm::ArrayType *DesiredType,
+ llvm::Type *CommonElementType, unsigned ArrayBound,
+ SmallVectorImpl<llvm::Constant *> &Elements,
+ llvm::Constant *Filler);
+
+llvm::Constant *ConstantAggregateBuilder::buildFrom(
+ CodeGenModule &CGM, ArrayRef<llvm::Constant *> Elems,
+ ArrayRef<CharUnits> Offsets, CharUnits StartOffset, CharUnits Size,
+ bool NaturalLayout, llvm::Type *DesiredTy, bool AllowOversized) {
+ ConstantAggregateBuilderUtils Utils(CGM);
+
+ if (Elems.empty())
+ return llvm::UndefValue::get(DesiredTy);
+
+ auto Offset = [&](size_t I) { return Offsets[I] - StartOffset; };
+
+ // If we want an array type, see if all the elements are the same type and
+ // appropriately spaced.
+ if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(DesiredTy)) {
+ assert(!AllowOversized && "oversized array emission not supported");
+
+ bool CanEmitArray = true;
+ llvm::Type *CommonType = Elems[0]->getType();
+ llvm::Constant *Filler = llvm::Constant::getNullValue(CommonType);
+ CharUnits ElemSize = Utils.getSize(ATy->getElementType());
+ SmallVector<llvm::Constant*, 32> ArrayElements;
+ for (size_t I = 0; I != Elems.size(); ++I) {
+ // Skip zeroes; we'll use a zero value as our array filler.
+ if (Elems[I]->isNullValue())
+ continue;
- // We want the remaining low bits.
- FieldValue.lshrInPlace(BitsInPreviousByte);
- FieldValue = FieldValue.trunc(NewFieldWidth);
+ // All remaining elements must be the same type.
+ if (Elems[I]->getType() != CommonType ||
+ Offset(I) % ElemSize != 0) {
+ CanEmitArray = false;
+ break;
}
+ ArrayElements.resize(Offset(I) / ElemSize + 1, Filler);
+ ArrayElements.back() = Elems[I];
}
- Tmp = Tmp.zext(CharWidth);
- if (CGM.getDataLayout().isBigEndian()) {
- if (FitsCompletelyInPreviousByte)
- Tmp = Tmp.shl(BitsInPreviousByte - FieldValue.getBitWidth());
- } else {
- Tmp = Tmp.shl(CharWidth - BitsInPreviousByte);
+ if (CanEmitArray) {
+ return EmitArrayConstant(CGM, ATy, CommonType, ATy->getNumElements(),
+ ArrayElements, Filler);
}
- // 'or' in the bits that go into the previous byte.
- llvm::Value *LastElt = Elements.back();
- if (llvm::ConstantInt *Val = dyn_cast<llvm::ConstantInt>(LastElt))
- Tmp |= Val->getValue();
- else {
- assert(isa<llvm::UndefValue>(LastElt));
- // If there is an undef field that we're adding to, it can either be a
- // scalar undef (in which case, we just replace it with our field) or it
- // is an array. If it is an array, we have to pull one byte off the
- // array so that the other undef bytes stay around.
- if (!isa<llvm::IntegerType>(LastElt->getType())) {
- // The undef padding will be a multibyte array, create a new smaller
- // padding and then an hole for our i8 to get plopped into.
- assert(isa<llvm::ArrayType>(LastElt->getType()) &&
- "Expected array padding of undefs");
- llvm::ArrayType *AT = cast<llvm::ArrayType>(LastElt->getType());
- assert(AT->getElementType()->isIntegerTy(CharWidth) &&
- AT->getNumElements() != 0 &&
- "Expected non-empty array padding of undefs");
-
- // Remove the padding array.
- NextFieldOffsetInChars -= CharUnits::fromQuantity(AT->getNumElements());
- Elements.pop_back();
-
- // Add the padding back in two chunks.
- AppendPadding(CharUnits::fromQuantity(AT->getNumElements()-1));
- AppendPadding(CharUnits::One());
- assert(isa<llvm::UndefValue>(Elements.back()) &&
- Elements.back()->getType()->isIntegerTy(CharWidth) &&
- "Padding addition didn't work right");
- }
+ // Can't emit as an array, carry on to emit as a struct.
+ }
+
+ CharUnits DesiredSize = Utils.getSize(DesiredTy);
+ CharUnits Align = CharUnits::One();
+ for (llvm::Constant *C : Elems)
+ Align = std::max(Align, Utils.getAlignment(C));
+ CharUnits AlignedSize = Size.alignTo(Align);
+
+ bool Packed = false;
+ ArrayRef<llvm::Constant*> UnpackedElems = Elems;
+ llvm::SmallVector<llvm::Constant*, 32> UnpackedElemStorage;
+ if ((DesiredSize < AlignedSize && !AllowOversized) ||
+ DesiredSize.alignTo(Align) != DesiredSize) {
+ // The natural layout would be the wrong size; force use of a packed layout.
+ NaturalLayout = false;
+ Packed = true;
+ } else if (DesiredSize > AlignedSize) {
+ // The constant would be too small. Add padding to fix it.
+ UnpackedElemStorage.assign(Elems.begin(), Elems.end());
+ UnpackedElemStorage.push_back(Utils.getPadding(DesiredSize - Size));
+ UnpackedElems = UnpackedElemStorage;
+ }
+
+ // If we don't have a natural layout, insert padding as necessary.
+ // As we go, double-check to see if we can actually just emit Elems
+ // as a non-packed struct and do so opportunistically if possible.
+ llvm::SmallVector<llvm::Constant*, 32> PackedElems;
+ if (!NaturalLayout) {
+ CharUnits SizeSoFar = CharUnits::Zero();
+ for (size_t I = 0; I != Elems.size(); ++I) {
+ CharUnits Align = Utils.getAlignment(Elems[I]);
+ CharUnits NaturalOffset = SizeSoFar.alignTo(Align);
+ CharUnits DesiredOffset = Offset(I);
+ assert(DesiredOffset >= SizeSoFar && "elements out of order");
+
+ if (DesiredOffset != NaturalOffset)
+ Packed = true;
+ if (DesiredOffset != SizeSoFar)
+ PackedElems.push_back(Utils.getPadding(DesiredOffset - SizeSoFar));
+ PackedElems.push_back(Elems[I]);
+ SizeSoFar = DesiredOffset + Utils.getSize(Elems[I]);
+ }
+ // If we're using the packed layout, pad it out to the desired size if
+ // necessary.
+ if (Packed) {
+ assert((SizeSoFar <= DesiredSize || AllowOversized) &&
+ "requested size is too small for contents");
+ if (SizeSoFar < DesiredSize)
+ PackedElems.push_back(Utils.getPadding(DesiredSize - SizeSoFar));
}
+ }
- Elements.back() = llvm::ConstantInt::get(CGM.getLLVMContext(), Tmp);
+ llvm::StructType *STy = llvm::ConstantStruct::getTypeForElements(
+ CGM.getLLVMContext(), Packed ? PackedElems : UnpackedElems, Packed);
- if (FitsCompletelyInPreviousByte)
- return;
+ // Pick the type to use. If the type is layout identical to the desired
+ // type then use it, otherwise use whatever the builder produced for us.
+ if (llvm::StructType *DesiredSTy = dyn_cast<llvm::StructType>(DesiredTy)) {
+ if (DesiredSTy->isLayoutIdentical(STy))
+ STy = DesiredSTy;
}
- while (FieldValue.getBitWidth() > CharWidth) {
- llvm::APInt Tmp;
+ return llvm::ConstantStruct::get(STy, Packed ? PackedElems : UnpackedElems);
+}
- if (CGM.getDataLayout().isBigEndian()) {
- // We want the high bits.
- Tmp =
- FieldValue.lshr(FieldValue.getBitWidth() - CharWidth).trunc(CharWidth);
- } else {
- // We want the low bits.
- Tmp = FieldValue.trunc(CharWidth);
+void ConstantAggregateBuilder::condense(CharUnits Offset,
+ llvm::Type *DesiredTy) {
+ CharUnits Size = getSize(DesiredTy);
- FieldValue.lshrInPlace(CharWidth);
- }
+ llvm::Optional<size_t> FirstElemToReplace = splitAt(Offset);
+ if (!FirstElemToReplace)
+ return;
+ size_t First = *FirstElemToReplace;
- Elements.push_back(llvm::ConstantInt::get(CGM.getLLVMContext(), Tmp));
- ++NextFieldOffsetInChars;
+ llvm::Optional<size_t> LastElemToReplace = splitAt(Offset + Size);
+ if (!LastElemToReplace)
+ return;
+ size_t Last = *LastElemToReplace;
- FieldValue = FieldValue.trunc(FieldValue.getBitWidth() - CharWidth);
+ size_t Length = Last - First;
+ if (Length == 0)
+ return;
+
+ if (Length == 1 && Offsets[First] == Offset &&
+ getSize(Elems[First]) == Size) {
+ // Re-wrap single element structs if necessary. Otherwise, leave any single
+ // element constant of the right size alone even if it has the wrong type.
+ auto *STy = dyn_cast<llvm::StructType>(DesiredTy);
+ if (STy && STy->getNumElements() == 1 &&
+ STy->getElementType(0) == Elems[First]->getType())
+ Elems[First] = llvm::ConstantStruct::get(STy, Elems[First]);
+ return;
}
- assert(FieldValue.getBitWidth() > 0 &&
- "Should have at least one bit left!");
- assert(FieldValue.getBitWidth() <= CharWidth &&
- "Should not have more than a byte left!");
+ llvm::Constant *Replacement = buildFrom(
+ CGM, makeArrayRef(Elems).slice(First, Length),
+ makeArrayRef(Offsets).slice(First, Length), Offset, getSize(DesiredTy),
+ /*known to have natural layout=*/false, DesiredTy, false);
+ replace(Elems, First, Last, {Replacement});
+ replace(Offsets, First, Last, {Offset});
+}
- if (FieldValue.getBitWidth() < CharWidth) {
- if (CGM.getDataLayout().isBigEndian()) {
- unsigned BitWidth = FieldValue.getBitWidth();
+//===----------------------------------------------------------------------===//
+// ConstStructBuilder
+//===----------------------------------------------------------------------===//
- FieldValue = FieldValue.zext(CharWidth) << (CharWidth - BitWidth);
- } else
- FieldValue = FieldValue.zext(CharWidth);
- }
+class ConstStructBuilder {
+ CodeGenModule &CGM;
+ ConstantEmitter &Emitter;
+ ConstantAggregateBuilder &Builder;
+ CharUnits StartOffset;
- // Append the last element.
- Elements.push_back(llvm::ConstantInt::get(CGM.getLLVMContext(),
- FieldValue));
- ++NextFieldOffsetInChars;
-}
+public:
+ static llvm::Constant *BuildStruct(ConstantEmitter &Emitter,
+ InitListExpr *ILE, QualType StructTy);
+ static llvm::Constant *BuildStruct(ConstantEmitter &Emitter,
+ const APValue &Value, QualType ValTy);
+ static bool UpdateStruct(ConstantEmitter &Emitter,
+ ConstantAggregateBuilder &Const, CharUnits Offset,
+ InitListExpr *Updater);
-void ConstStructBuilder::AppendPadding(CharUnits PadSize) {
- if (PadSize.isZero())
- return;
+private:
+ ConstStructBuilder(ConstantEmitter &Emitter,
+ ConstantAggregateBuilder &Builder, CharUnits StartOffset)
+ : CGM(Emitter.CGM), Emitter(Emitter), Builder(Builder),
+ StartOffset(StartOffset) {}
- llvm::Type *Ty = CGM.Int8Ty;
- if (PadSize > CharUnits::One())
- Ty = llvm::ArrayType::get(Ty, PadSize.getQuantity());
+ bool AppendField(const FieldDecl *Field, uint64_t FieldOffset,
+ llvm::Constant *InitExpr, bool AllowOverwrite = false);
- llvm::Constant *C = llvm::UndefValue::get(Ty);
- Elements.push_back(C);
- assert(getAlignment(C) == CharUnits::One() &&
- "Padding must have 1 byte alignment!");
+ bool AppendBytes(CharUnits FieldOffsetInChars, llvm::Constant *InitCst,
+ bool AllowOverwrite = false);
- NextFieldOffsetInChars += getSizeInChars(C);
-}
+ bool AppendBitField(const FieldDecl *Field, uint64_t FieldOffset,
+ llvm::ConstantInt *InitExpr, bool AllowOverwrite = false);
-void ConstStructBuilder::AppendTailPadding(CharUnits RecordSize) {
- assert(NextFieldOffsetInChars <= RecordSize &&
- "Size mismatch!");
+ bool Build(InitListExpr *ILE, bool AllowOverwrite);
+ bool Build(const APValue &Val, const RecordDecl *RD, bool IsPrimaryBase,
+ const CXXRecordDecl *VTableClass, CharUnits BaseOffset);
+ llvm::Constant *Finalize(QualType Ty);
+};
- AppendPadding(RecordSize - NextFieldOffsetInChars);
-}
+bool ConstStructBuilder::AppendField(
+ const FieldDecl *Field, uint64_t FieldOffset, llvm::Constant *InitCst,
+ bool AllowOverwrite) {
+ const ASTContext &Context = CGM.getContext();
-void ConstStructBuilder::ConvertStructToPacked() {
- SmallVector<llvm::Constant *, 16> PackedElements;
- CharUnits ElementOffsetInChars = CharUnits::Zero();
+ CharUnits FieldOffsetInChars = Context.toCharUnitsFromBits(FieldOffset);
- for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
- llvm::Constant *C = Elements[i];
+ return AppendBytes(FieldOffsetInChars, InitCst, AllowOverwrite);
+}
- CharUnits ElementAlign = CharUnits::fromQuantity(
- CGM.getDataLayout().getABITypeAlignment(C->getType()));
- CharUnits AlignedElementOffsetInChars =
- ElementOffsetInChars.alignTo(ElementAlign);
+bool ConstStructBuilder::AppendBytes(CharUnits FieldOffsetInChars,
+ llvm::Constant *InitCst,
+ bool AllowOverwrite) {
+ return Builder.add(InitCst, StartOffset + FieldOffsetInChars, AllowOverwrite);
+}
- if (AlignedElementOffsetInChars > ElementOffsetInChars) {
- // We need some padding.
- CharUnits NumChars =
- AlignedElementOffsetInChars - ElementOffsetInChars;
+bool ConstStructBuilder::AppendBitField(
+ const FieldDecl *Field, uint64_t FieldOffset, llvm::ConstantInt *CI,
+ bool AllowOverwrite) {
+ uint64_t FieldSize = Field->getBitWidthValue(CGM.getContext());
+ llvm::APInt FieldValue = CI->getValue();
- llvm::Type *Ty = CGM.Int8Ty;
- if (NumChars > CharUnits::One())
- Ty = llvm::ArrayType::get(Ty, NumChars.getQuantity());
+ // Promote the size of FieldValue if necessary
+ // FIXME: This should never occur, but currently it can because initializer
+ // constants are cast to bool, and because clang is not enforcing bitfield
+ // width limits.
+ if (FieldSize > FieldValue.getBitWidth())
+ FieldValue = FieldValue.zext(FieldSize);
- llvm::Constant *Padding = llvm::UndefValue::get(Ty);
- PackedElements.push_back(Padding);
- ElementOffsetInChars += getSizeInChars(Padding);
- }
+ // Truncate the size of FieldValue to the bit field size.
+ if (FieldSize < FieldValue.getBitWidth())
+ FieldValue = FieldValue.trunc(FieldSize);
- PackedElements.push_back(C);
- ElementOffsetInChars += getSizeInChars(C);
+ return Builder.addBits(FieldValue,
+ CGM.getContext().toBits(StartOffset) + FieldOffset,
+ AllowOverwrite);
+}
+
+static bool EmitDesignatedInitUpdater(ConstantEmitter &Emitter,
+ ConstantAggregateBuilder &Const,
+ CharUnits Offset, QualType Type,
+ InitListExpr *Updater) {
+ if (Type->isRecordType())
+ return ConstStructBuilder::UpdateStruct(Emitter, Const, Offset, Updater);
+
+ auto CAT = Emitter.CGM.getContext().getAsConstantArrayType(Type);
+ if (!CAT)
+ return false;
+ QualType ElemType = CAT->getElementType();
+ CharUnits ElemSize = Emitter.CGM.getContext().getTypeSizeInChars(ElemType);
+ llvm::Type *ElemTy = Emitter.CGM.getTypes().ConvertTypeForMem(ElemType);
+
+ llvm::Constant *FillC = nullptr;
+ if (Expr *Filler = Updater->getArrayFiller()) {
+ if (!isa<NoInitExpr>(Filler)) {
+ FillC = Emitter.tryEmitAbstractForMemory(Filler, ElemType);
+ if (!FillC)
+ return false;
+ }
}
- assert(ElementOffsetInChars == NextFieldOffsetInChars &&
- "Packing the struct changed its size!");
+ unsigned NumElementsToUpdate =
+ FillC ? CAT->getSize().getZExtValue() : Updater->getNumInits();
+ for (unsigned I = 0; I != NumElementsToUpdate; ++I, Offset += ElemSize) {
+ Expr *Init = nullptr;
+ if (I < Updater->getNumInits())
+ Init = Updater->getInit(I);
+
+ if (!Init && FillC) {
+ if (!Const.add(FillC, Offset, true))
+ return false;
+ } else if (!Init || isa<NoInitExpr>(Init)) {
+ continue;
+ } else if (InitListExpr *ChildILE = dyn_cast<InitListExpr>(Init)) {
+ if (!EmitDesignatedInitUpdater(Emitter, Const, Offset, ElemType,
+ ChildILE))
+ return false;
+ // Attempt to reduce the array element to a single constant if necessary.
+ Const.condense(Offset, ElemTy);
+ } else {
+ llvm::Constant *Val = Emitter.tryEmitPrivateForMemory(Init, ElemType);
+ if (!Const.add(Val, Offset, true))
+ return false;
+ }
+ }
- Elements.swap(PackedElements);
- LLVMStructAlignment = CharUnits::One();
- Packed = true;
+ return true;
}
-bool ConstStructBuilder::Build(InitListExpr *ILE) {
+bool ConstStructBuilder::Build(InitListExpr *ILE, bool AllowOverwrite) {
RecordDecl *RD = ILE->getType()->getAs<RecordType>()->getDecl();
const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
- unsigned FieldNo = 0;
+ unsigned FieldNo = -1;
unsigned ElementNo = 0;
// Bail out if we have base classes. We could support these, but they only
@@ -379,35 +672,66 @@ bool ConstStructBuilder::Build(InitListExpr *ILE) {
if (CXXRD->getNumBases())
return false;
- for (RecordDecl::field_iterator Field = RD->field_begin(),
- FieldEnd = RD->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
+ for (FieldDecl *Field : RD->fields()) {
+ ++FieldNo;
+
// If this is a union, skip all the fields that aren't being initialized.
- if (RD->isUnion() && ILE->getInitializedFieldInUnion() != *Field)
+ if (RD->isUnion() &&
+ !declaresSameEntity(ILE->getInitializedFieldInUnion(), Field))
continue;
- // Don't emit anonymous bitfields, they just affect layout.
- if (Field->isUnnamedBitfield())
+ // Don't emit anonymous bitfields or zero-sized fields.
+ if (Field->isUnnamedBitfield() || Field->isZeroSize(CGM.getContext()))
continue;
// Get the initializer. A struct can include fields without initializers,
// we just use explicit null values for them.
- llvm::Constant *EltInit;
+ Expr *Init = nullptr;
if (ElementNo < ILE->getNumInits())
- EltInit = Emitter.tryEmitPrivateForMemory(ILE->getInit(ElementNo++),
- Field->getType());
- else
- EltInit = Emitter.emitNullForMemory(Field->getType());
+ Init = ILE->getInit(ElementNo++);
+ if (Init && isa<NoInitExpr>(Init))
+ continue;
+ // When emitting a DesignatedInitUpdateExpr, a nested InitListExpr
+ // represents additional overwriting of our current constant value, and not
+ // a new constant to emit independently.
+ if (AllowOverwrite &&
+ (Field->getType()->isArrayType() || Field->getType()->isRecordType())) {
+ if (auto *SubILE = dyn_cast<InitListExpr>(Init)) {
+ CharUnits Offset = CGM.getContext().toCharUnitsFromBits(
+ Layout.getFieldOffset(FieldNo));
+ if (!EmitDesignatedInitUpdater(Emitter, Builder, StartOffset + Offset,
+ Field->getType(), SubILE))
+ return false;
+ // If we split apart the field's value, try to collapse it down to a
+ // single value now.
+ Builder.condense(StartOffset + Offset,
+ CGM.getTypes().ConvertTypeForMem(Field->getType()));
+ continue;
+ }
+ }
+
+ llvm::Constant *EltInit =
+ Init ? Emitter.tryEmitPrivateForMemory(Init, Field->getType())
+ : Emitter.emitNullForMemory(Field->getType());
if (!EltInit)
return false;
if (!Field->isBitField()) {
// Handle non-bitfield members.
- AppendField(*Field, Layout.getFieldOffset(FieldNo), EltInit);
+ if (!AppendField(Field, Layout.getFieldOffset(FieldNo), EltInit,
+ AllowOverwrite))
+ return false;
+ // After emitting a non-empty field with [[no_unique_address]], we may
+ // need to overwrite its tail padding.
+ if (Field->hasAttr<NoUniqueAddressAttr>())
+ AllowOverwrite = true;
} else {
// Otherwise we have a bitfield.
if (auto *CI = dyn_cast<llvm::ConstantInt>(EltInit)) {
- AppendBitField(*Field, Layout.getFieldOffset(FieldNo), CI);
+ if (!AppendBitField(Field, Layout.getFieldOffset(FieldNo), CI,
+ AllowOverwrite))
+ return false;
} else {
// We are trying to initialize a bitfield with a non-trivial constant,
// this must require run-time code.
@@ -445,7 +769,8 @@ bool ConstStructBuilder::Build(const APValue &Val, const RecordDecl *RD,
llvm::Constant *VTableAddressPoint =
CGM.getCXXABI().getVTableAddressPointForConstExpr(
BaseSubobject(CD, Offset), VTableClass);
- AppendBytes(Offset, VTableAddressPoint);
+ if (!AppendBytes(Offset, VTableAddressPoint))
+ return false;
}
// Accumulate and sort bases, in order to visit them in address order, which
@@ -460,7 +785,7 @@ bool ConstStructBuilder::Build(const APValue &Val, const RecordDecl *RD,
CharUnits BaseOffset = Layout.getBaseClassOffset(BD);
Bases.push_back(BaseInfo(BD, BaseOffset, BaseNo));
}
- std::stable_sort(Bases.begin(), Bases.end());
+ llvm::stable_sort(Bases);
for (unsigned I = 0, N = Bases.size(); I != N; ++I) {
BaseInfo &Base = Bases[I];
@@ -474,14 +799,15 @@ bool ConstStructBuilder::Build(const APValue &Val, const RecordDecl *RD,
unsigned FieldNo = 0;
uint64_t OffsetBits = CGM.getContext().toBits(Offset);
+ bool AllowOverwrite = false;
for (RecordDecl::field_iterator Field = RD->field_begin(),
FieldEnd = RD->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
// If this is a union, skip all the fields that aren't being initialized.
- if (RD->isUnion() && Val.getUnionField() != *Field)
+ if (RD->isUnion() && !declaresSameEntity(Val.getUnionField(), *Field))
continue;
- // Don't emit anonymous bitfields, they just affect layout.
- if (Field->isUnnamedBitfield())
+ // Don't emit anonymous bitfields or zero-sized fields.
+ if (Field->isUnnamedBitfield() || Field->isZeroSize(CGM.getContext()))
continue;
// Emit the value of the initializer.
@@ -494,93 +820,37 @@ bool ConstStructBuilder::Build(const APValue &Val, const RecordDecl *RD,
if (!Field->isBitField()) {
// Handle non-bitfield members.
- AppendField(*Field, Layout.getFieldOffset(FieldNo) + OffsetBits, EltInit);
+ if (!AppendField(*Field, Layout.getFieldOffset(FieldNo) + OffsetBits,
+ EltInit, AllowOverwrite))
+ return false;
+ // After emitting a non-empty field with [[no_unique_address]], we may
+ // need to overwrite its tail padding.
+ if (Field->hasAttr<NoUniqueAddressAttr>())
+ AllowOverwrite = true;
} else {
// Otherwise we have a bitfield.
- AppendBitField(*Field, Layout.getFieldOffset(FieldNo) + OffsetBits,
- cast<llvm::ConstantInt>(EltInit));
+ if (!AppendBitField(*Field, Layout.getFieldOffset(FieldNo) + OffsetBits,
+ cast<llvm::ConstantInt>(EltInit), AllowOverwrite))
+ return false;
}
}
return true;
}
-llvm::Constant *ConstStructBuilder::Finalize(QualType Ty) {
- RecordDecl *RD = Ty->getAs<RecordType>()->getDecl();
- const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
-
- CharUnits LayoutSizeInChars = Layout.getSize();
-
- if (NextFieldOffsetInChars > LayoutSizeInChars) {
- // If the struct is bigger than the size of the record type,
- // we must have a flexible array member at the end.
- assert(RD->hasFlexibleArrayMember() &&
- "Must have flexible array member if struct is bigger than type!");
-
- // No tail padding is necessary.
- } else {
- // Append tail padding if necessary.
- CharUnits LLVMSizeInChars =
- NextFieldOffsetInChars.alignTo(LLVMStructAlignment);
-
- if (LLVMSizeInChars != LayoutSizeInChars)
- AppendTailPadding(LayoutSizeInChars);
-
- LLVMSizeInChars = NextFieldOffsetInChars.alignTo(LLVMStructAlignment);
-
- // Check if we need to convert the struct to a packed struct.
- if (NextFieldOffsetInChars <= LayoutSizeInChars &&
- LLVMSizeInChars > LayoutSizeInChars) {
- assert(!Packed && "Size mismatch!");
-
- ConvertStructToPacked();
- assert(NextFieldOffsetInChars <= LayoutSizeInChars &&
- "Converting to packed did not help!");
- }
-
- LLVMSizeInChars = NextFieldOffsetInChars.alignTo(LLVMStructAlignment);
-
- assert(LayoutSizeInChars == LLVMSizeInChars &&
- "Tail padding mismatch!");
- }
-
- // Pick the type to use. If the type is layout identical to the ConvertType
- // type then use it, otherwise use whatever the builder produced for us.
- llvm::StructType *STy =
- llvm::ConstantStruct::getTypeForElements(CGM.getLLVMContext(),
- Elements, Packed);
- llvm::Type *ValTy = CGM.getTypes().ConvertType(Ty);
- if (llvm::StructType *ValSTy = dyn_cast<llvm::StructType>(ValTy)) {
- if (ValSTy->isLayoutIdentical(STy))
- STy = ValSTy;
- }
-
- llvm::Constant *Result = llvm::ConstantStruct::get(STy, Elements);
-
- assert(NextFieldOffsetInChars.alignTo(getAlignment(Result)) ==
- getSizeInChars(Result) &&
- "Size mismatch!");
-
- return Result;
-}
-
-llvm::Constant *ConstStructBuilder::BuildStruct(ConstantEmitter &Emitter,
- ConstExprEmitter *ExprEmitter,
- llvm::Constant *Base,
- InitListExpr *Updater,
- QualType ValTy) {
- ConstStructBuilder Builder(Emitter);
- if (!Builder.Build(ExprEmitter, Base, Updater))
- return nullptr;
- return Builder.Finalize(ValTy);
+llvm::Constant *ConstStructBuilder::Finalize(QualType Type) {
+ RecordDecl *RD = Type->getAs<RecordType>()->getDecl();
+ llvm::Type *ValTy = CGM.getTypes().ConvertType(Type);
+ return Builder.build(ValTy, RD->hasFlexibleArrayMember());
}
llvm::Constant *ConstStructBuilder::BuildStruct(ConstantEmitter &Emitter,
InitListExpr *ILE,
QualType ValTy) {
- ConstStructBuilder Builder(Emitter);
+ ConstantAggregateBuilder Const(Emitter.CGM);
+ ConstStructBuilder Builder(Emitter, Const, CharUnits::Zero());
- if (!Builder.Build(ILE))
+ if (!Builder.Build(ILE, /*AllowOverwrite*/false))
return nullptr;
return Builder.Finalize(ValTy);
@@ -589,7 +859,8 @@ llvm::Constant *ConstStructBuilder::BuildStruct(ConstantEmitter &Emitter,
llvm::Constant *ConstStructBuilder::BuildStruct(ConstantEmitter &Emitter,
const APValue &Val,
QualType ValTy) {
- ConstStructBuilder Builder(Emitter);
+ ConstantAggregateBuilder Const(Emitter.CGM);
+ ConstStructBuilder Builder(Emitter, Const, CharUnits::Zero());
const RecordDecl *RD = ValTy->castAs<RecordType>()->getDecl();
const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(RD);
@@ -599,6 +870,12 @@ llvm::Constant *ConstStructBuilder::BuildStruct(ConstantEmitter &Emitter,
return Builder.Finalize(ValTy);
}
+bool ConstStructBuilder::UpdateStruct(ConstantEmitter &Emitter,
+ ConstantAggregateBuilder &Const,
+ CharUnits Offset, InitListExpr *Updater) {
+ return ConstStructBuilder(Emitter, Const, Offset)
+ .Build(Updater, /*AllowOverwrite*/ true);
+}
//===----------------------------------------------------------------------===//
// ConstExprEmitter
@@ -636,7 +913,7 @@ static ConstantAddress tryEmitGlobalCompoundLiteral(CodeGenModule &CGM,
}
static llvm::Constant *
-EmitArrayConstant(CodeGenModule &CGM, const ConstantArrayType *DestType,
+EmitArrayConstant(CodeGenModule &CGM, llvm::ArrayType *DesiredType,
llvm::Type *CommonElementType, unsigned ArrayBound,
SmallVectorImpl<llvm::Constant *> &Elements,
llvm::Constant *Filler) {
@@ -649,10 +926,8 @@ EmitArrayConstant(CodeGenModule &CGM, const ConstantArrayType *DestType,
--NonzeroLength;
}
- if (NonzeroLength == 0) {
- return llvm::ConstantAggregateZero::get(
- CGM.getTypes().ConvertType(QualType(DestType, 0)));
- }
+ if (NonzeroLength == 0)
+ return llvm::ConstantAggregateZero::get(DesiredType);
// Add a zeroinitializer array filler if we have lots of trailing zeroes.
unsigned TrailingZeroes = ArrayBound - NonzeroLength;
@@ -673,9 +948,7 @@ EmitArrayConstant(CodeGenModule &CGM, const ConstantArrayType *DestType,
}
auto *FillerType =
- CommonElementType
- ? CommonElementType
- : CGM.getTypes().ConvertType(DestType->getElementType());
+ CommonElementType ? CommonElementType : DesiredType->getElementType();
FillerType = llvm::ArrayType::get(FillerType, TrailingZeroes);
Elements.back() = llvm::ConstantAggregateZero::get(FillerType);
CommonElementType = nullptr;
@@ -701,10 +974,12 @@ EmitArrayConstant(CodeGenModule &CGM, const ConstantArrayType *DestType,
return llvm::ConstantStruct::get(SType, Elements);
}
-/// This class only needs to handle two cases:
-/// 1) Literals (this is used by APValue emission to emit literals).
-/// 2) Arrays, structs and unions (outside C++11 mode, we don't currently
-/// constant fold these types).
+// This class only needs to handle arrays, structs and unions. Outside C++11
+// mode, we don't currently constant fold those types. All other types are
+// handled by constant folding.
+//
+// Constant folding is currently missing support for a few features supported
+// here: CK_ToUnion, CK_ReinterpretMemberPointer, and DesignatedInitUpdateExpr.
class ConstExprEmitter :
public StmtVisitor<ConstExprEmitter, llvm::Constant*, QualType> {
CodeGenModule &CGM;
@@ -840,6 +1115,7 @@ public:
case CK_ToVoid:
case CK_Dynamic:
case CK_LValueBitCast:
+ case CK_LValueToRValueBitCast:
case CK_NullToMemberPointer:
case CK_UserDefinedConversion:
case CK_CPointerToObjCPointerCast:
@@ -875,16 +1151,14 @@ public:
case CK_FloatingCast:
case CK_FixedPointCast:
case CK_FixedPointToBoolean:
+ case CK_FixedPointToIntegral:
+ case CK_IntegralToFixedPoint:
case CK_ZeroToOCLOpaqueType:
return nullptr;
}
llvm_unreachable("Invalid CastKind");
}
- llvm::Constant *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE, QualType T) {
- return Visit(DAE->getExpr(), T);
- }
-
llvm::Constant *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE, QualType T) {
// No need for a DefaultInitExprScope: we don't handle 'this' in a
// constant expression.
@@ -942,7 +1216,9 @@ public:
Elts.push_back(C);
}
- return EmitArrayConstant(CGM, CAT, CommonElementType, NumElements, Elts,
+ llvm::ArrayType *Desired =
+ cast<llvm::ArrayType>(CGM.getTypes().ConvertType(ILE->getType()));
+ return EmitArrayConstant(CGM, Desired, CommonElementType, NumElements, Elts,
fillC);
}
@@ -968,80 +1244,24 @@ public:
return nullptr;
}
- llvm::Constant *EmitDesignatedInitUpdater(llvm::Constant *Base,
- InitListExpr *Updater,
- QualType destType) {
- if (auto destAT = CGM.getContext().getAsArrayType(destType)) {
- llvm::ArrayType *AType = cast<llvm::ArrayType>(ConvertType(destType));
- llvm::Type *ElemType = AType->getElementType();
-
- unsigned NumInitElements = Updater->getNumInits();
- unsigned NumElements = AType->getNumElements();
-
- std::vector<llvm::Constant *> Elts;
- Elts.reserve(NumElements);
-
- QualType destElemType = destAT->getElementType();
-
- if (auto DataArray = dyn_cast<llvm::ConstantDataArray>(Base))
- for (unsigned i = 0; i != NumElements; ++i)
- Elts.push_back(DataArray->getElementAsConstant(i));
- else if (auto Array = dyn_cast<llvm::ConstantArray>(Base))
- for (unsigned i = 0; i != NumElements; ++i)
- Elts.push_back(Array->getOperand(i));
- else
- return nullptr; // FIXME: other array types not implemented
-
- llvm::Constant *fillC = nullptr;
- if (Expr *filler = Updater->getArrayFiller())
- if (!isa<NoInitExpr>(filler))
- fillC = Emitter.tryEmitAbstractForMemory(filler, destElemType);
- bool RewriteType = (fillC && fillC->getType() != ElemType);
-
- for (unsigned i = 0; i != NumElements; ++i) {
- Expr *Init = nullptr;
- if (i < NumInitElements)
- Init = Updater->getInit(i);
-
- if (!Init && fillC)
- Elts[i] = fillC;
- else if (!Init || isa<NoInitExpr>(Init))
- ; // Do nothing.
- else if (InitListExpr *ChildILE = dyn_cast<InitListExpr>(Init))
- Elts[i] = EmitDesignatedInitUpdater(Elts[i], ChildILE, destElemType);
- else
- Elts[i] = Emitter.tryEmitPrivateForMemory(Init, destElemType);
-
- if (!Elts[i])
- return nullptr;
- RewriteType |= (Elts[i]->getType() != ElemType);
- }
-
- if (RewriteType) {
- std::vector<llvm::Type *> Types;
- Types.reserve(NumElements);
- for (unsigned i = 0; i != NumElements; ++i)
- Types.push_back(Elts[i]->getType());
- llvm::StructType *SType = llvm::StructType::get(AType->getContext(),
- Types, true);
- return llvm::ConstantStruct::get(SType, Elts);
- }
-
- return llvm::ConstantArray::get(AType, Elts);
- }
-
- if (destType->isRecordType())
- return ConstStructBuilder::BuildStruct(Emitter, this, Base, Updater,
- destType);
-
- return nullptr;
- }
-
llvm::Constant *VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E,
QualType destType) {
auto C = Visit(E->getBase(), destType);
- if (!C) return nullptr;
- return EmitDesignatedInitUpdater(C, E->getUpdater(), destType);
+ if (!C)
+ return nullptr;
+
+ ConstantAggregateBuilder Const(CGM);
+ Const.add(C, CharUnits::Zero(), false);
+
+ if (!EmitDesignatedInitUpdater(Emitter, Const, CharUnits::Zero(), destType,
+ E->getUpdater()))
+ return nullptr;
+
+ llvm::Type *ValTy = CGM.getTypes().ConvertType(destType);
+ bool HasFlexibleArray = false;
+ if (auto *RT = destType->getAs<RecordType>())
+ HasFlexibleArray = RT->getDecl()->hasFlexibleArrayMember();
+ return Const.build(ValTy, HasFlexibleArray);
}
llvm::Constant *VisitCXXConstructExpr(CXXConstructExpr *E, QualType Ty) {
@@ -1077,6 +1297,7 @@ public:
}
llvm::Constant *VisitStringLiteral(StringLiteral *E, QualType T) {
+ // This is a string literal initializing an array in an initializer.
return CGM.GetConstantArrayFromStringLiteral(E);
}
@@ -1106,76 +1327,6 @@ public:
} // end anonymous namespace.
-bool ConstStructBuilder::Build(ConstExprEmitter *ExprEmitter,
- llvm::Constant *Base,
- InitListExpr *Updater) {
- assert(Base && "base expression should not be empty");
-
- QualType ExprType = Updater->getType();
- RecordDecl *RD = ExprType->getAs<RecordType>()->getDecl();
- const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
- const llvm::StructLayout *BaseLayout = CGM.getDataLayout().getStructLayout(
- cast<llvm::StructType>(Base->getType()));
- unsigned FieldNo = -1;
- unsigned ElementNo = 0;
-
- // Bail out if we have base classes. We could support these, but they only
- // arise in C++1z where we will have already constant folded most interesting
- // cases. FIXME: There are still a few more cases we can handle this way.
- if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RD))
- if (CXXRD->getNumBases())
- return false;
-
- for (FieldDecl *Field : RD->fields()) {
- ++FieldNo;
-
- if (RD->isUnion() && Updater->getInitializedFieldInUnion() != Field)
- continue;
-
- // Skip anonymous bitfields.
- if (Field->isUnnamedBitfield())
- continue;
-
- llvm::Constant *EltInit = Base->getAggregateElement(ElementNo);
-
- // Bail out if the type of the ConstantStruct does not have the same layout
- // as the type of the InitListExpr.
- if (CGM.getTypes().ConvertType(Field->getType()) != EltInit->getType() ||
- Layout.getFieldOffset(ElementNo) !=
- BaseLayout->getElementOffsetInBits(ElementNo))
- return false;
-
- // Get the initializer. If we encounter an empty field or a NoInitExpr,
- // we use values from the base expression.
- Expr *Init = nullptr;
- if (ElementNo < Updater->getNumInits())
- Init = Updater->getInit(ElementNo);
-
- if (!Init || isa<NoInitExpr>(Init))
- ; // Do nothing.
- else if (InitListExpr *ChildILE = dyn_cast<InitListExpr>(Init))
- EltInit = ExprEmitter->EmitDesignatedInitUpdater(EltInit, ChildILE,
- Field->getType());
- else
- EltInit = Emitter.tryEmitPrivateForMemory(Init, Field->getType());
-
- ++ElementNo;
-
- if (!EltInit)
- return false;
-
- if (!Field->isBitField())
- AppendField(Field, Layout.getFieldOffset(FieldNo), EltInit);
- else if (llvm::ConstantInt *CI = dyn_cast<llvm::ConstantInt>(EltInit))
- AppendBitField(Field, Layout.getFieldOffset(FieldNo), CI);
- else
- // Initializing a bitfield with a non-trivial constant?
- return false;
- }
-
- return true;
-}
-
llvm::Constant *ConstantEmitter::validateAndPopAbstract(llvm::Constant *C,
AbstractState saved) {
Abstract = saved.OldValue;
@@ -1609,6 +1760,7 @@ private:
ConstantLValue VisitConstantExpr(const ConstantExpr *E);
ConstantLValue VisitCompoundLiteralExpr(const CompoundLiteralExpr *E);
ConstantLValue VisitStringLiteral(const StringLiteral *E);
+ ConstantLValue VisitObjCBoxedExpr(const ObjCBoxedExpr *E);
ConstantLValue VisitObjCEncodeExpr(const ObjCEncodeExpr *E);
ConstantLValue VisitObjCStringLiteral(const ObjCStringLiteral *E);
ConstantLValue VisitPredefinedExpr(const PredefinedExpr *E);
@@ -1650,17 +1802,7 @@ private:
llvm::Constant *ConstantLValueEmitter::tryEmit() {
const APValue::LValueBase &base = Value.getLValueBase();
- // Certain special array initializers are represented in APValue
- // as l-values referring to the base expression which generates the
- // array. This happens with e.g. string literals. These should
- // probably just get their own representation kind in APValue.
- if (DestType->isArrayType()) {
- assert(!hasNonZeroOffset() && "offset on array initializer");
- auto expr = const_cast<Expr*>(base.get<const Expr*>());
- return ConstExprEmitter(Emitter).Visit(expr, DestType);
- }
-
- // Otherwise, the destination type should be a pointer or reference
+ // The destination type should be a pointer or reference
// type, but it might also be a cast thereof.
//
// FIXME: the chain of casts required should be reflected in the APValue.
@@ -1700,34 +1842,21 @@ llvm::Constant *ConstantLValueEmitter::tryEmit() {
/// bitcast to pointer type.
llvm::Constant *
ConstantLValueEmitter::tryEmitAbsolute(llvm::Type *destTy) {
- auto offset = getOffset();
-
// If we're producing a pointer, this is easy.
- if (auto destPtrTy = cast<llvm::PointerType>(destTy)) {
- if (Value.isNullPointer()) {
- // FIXME: integer offsets from non-zero null pointers.
- return CGM.getNullPointer(destPtrTy, DestType);
- }
-
- // Convert the integer to a pointer-sized integer before converting it
- // to a pointer.
- // FIXME: signedness depends on the original integer type.
- auto intptrTy = CGM.getDataLayout().getIntPtrType(destPtrTy);
- llvm::Constant *C = offset;
- C = llvm::ConstantExpr::getIntegerCast(getOffset(), intptrTy,
- /*isSigned*/ false);
- C = llvm::ConstantExpr::getIntToPtr(C, destPtrTy);
- return C;
+ auto destPtrTy = cast<llvm::PointerType>(destTy);
+ if (Value.isNullPointer()) {
+ // FIXME: integer offsets from non-zero null pointers.
+ return CGM.getNullPointer(destPtrTy, DestType);
}
- // Otherwise, we're basically returning an integer constant.
-
- // FIXME: this does the wrong thing with ptrtoint of a null pointer,
- // but since we don't know the original pointer type, there's not much
- // we can do about it.
-
- auto C = getOffset();
- C = llvm::ConstantExpr::getIntegerCast(C, destTy, /*isSigned*/ false);
+ // Convert the integer to a pointer-sized integer before converting it
+ // to a pointer.
+ // FIXME: signedness depends on the original integer type.
+ auto intptrTy = CGM.getDataLayout().getIntPtrType(destPtrTy);
+ llvm::Constant *C;
+ C = llvm::ConstantExpr::getIntegerCast(getOffset(), intptrTy,
+ /*isSigned*/ false);
+ C = llvm::ConstantExpr::getIntToPtr(C, destPtrTy);
return C;
}
@@ -1749,7 +1878,7 @@ ConstantLValueEmitter::tryEmitBase(const APValue::LValueBase &base) {
if (VD->isLocalVarDecl()) {
return CGM.getOrCreateStaticVarDecl(
- *VD, CGM.getLLVMLinkageVarDefinition(VD, /*isConstant=*/false));
+ *VD, CGM.getLLVMLinkageVarDefinition(VD, /*IsConstant=*/false));
}
}
}
@@ -1757,6 +1886,17 @@ ConstantLValueEmitter::tryEmitBase(const APValue::LValueBase &base) {
return nullptr;
}
+ // Handle typeid(T).
+ if (TypeInfoLValue TI = base.dyn_cast<TypeInfoLValue>()) {
+ llvm::Type *StdTypeInfoPtrTy =
+ CGM.getTypes().ConvertType(base.getTypeInfoType())->getPointerTo();
+ llvm::Constant *TypeInfo =
+ CGM.GetAddrOfRTTIDescriptor(QualType(TI.getType(), 0));
+ if (TypeInfo->getType() != StdTypeInfoPtrTy)
+ TypeInfo = llvm::ConstantExpr::getBitCast(TypeInfo, StdTypeInfoPtrTy);
+ return TypeInfo;
+ }
+
// Otherwise, it must be an expression.
return Visit(base.get<const Expr*>());
}
@@ -1781,25 +1921,29 @@ ConstantLValueEmitter::VisitObjCEncodeExpr(const ObjCEncodeExpr *E) {
return CGM.GetAddrOfConstantStringFromObjCEncode(E);
}
+static ConstantLValue emitConstantObjCStringLiteral(const StringLiteral *S,
+ QualType T,
+ CodeGenModule &CGM) {
+ auto C = CGM.getObjCRuntime().GenerateConstantString(S);
+ return C.getElementBitCast(CGM.getTypes().ConvertTypeForMem(T));
+}
+
ConstantLValue
ConstantLValueEmitter::VisitObjCStringLiteral(const ObjCStringLiteral *E) {
- auto C = CGM.getObjCRuntime().GenerateConstantString(E->getString());
- return C.getElementBitCast(CGM.getTypes().ConvertTypeForMem(E->getType()));
+ return emitConstantObjCStringLiteral(E->getString(), E->getType(), CGM);
}
ConstantLValue
-ConstantLValueEmitter::VisitPredefinedExpr(const PredefinedExpr *E) {
- if (auto CGF = Emitter.CGF) {
- LValue Res = CGF->EmitPredefinedLValue(E);
- return cast<ConstantAddress>(Res.getAddress());
- }
-
- auto kind = E->getIdentKind();
- if (kind == PredefinedExpr::PrettyFunction) {
- return CGM.GetAddrOfConstantCString("top level", ".tmp");
- }
+ConstantLValueEmitter::VisitObjCBoxedExpr(const ObjCBoxedExpr *E) {
+ assert(E->isExpressibleAsConstantInitializer() &&
+ "this boxed expression can't be emitted as a compile-time constant");
+ auto *SL = cast<StringLiteral>(E->getSubExpr()->IgnoreParenCasts());
+ return emitConstantObjCStringLiteral(SL, E->getType(), CGM);
+}
- return CGM.GetAddrOfConstantCString("", ".tmp");
+ConstantLValue
+ConstantLValueEmitter::VisitPredefinedExpr(const PredefinedExpr *E) {
+ return CGM.GetAddrOfConstantStringFromLiteral(E->getFunctionName());
}
ConstantLValue
@@ -1867,12 +2011,17 @@ ConstantLValueEmitter::VisitMaterializeTemporaryExpr(
llvm::Constant *ConstantEmitter::tryEmitPrivate(const APValue &Value,
QualType DestType) {
switch (Value.getKind()) {
- case APValue::Uninitialized:
- llvm_unreachable("Constant expressions should be initialized.");
+ case APValue::None:
+ case APValue::Indeterminate:
+ // Out-of-lifetime and indeterminate values can be modeled as 'undef'.
+ return llvm::UndefValue::get(CGM.getTypes().ConvertType(DestType));
case APValue::LValue:
return ConstantLValueEmitter(*this, Value, DestType).tryEmit();
case APValue::Int:
return llvm::ConstantInt::get(CGM.getLLVMContext(), Value.getInt());
+ case APValue::FixedPoint:
+ return llvm::ConstantInt::get(CGM.getLLVMContext(),
+ Value.getFixedPoint().getValue());
case APValue::ComplexInt: {
llvm::Constant *Complex[2];
@@ -1990,7 +2139,9 @@ llvm::Constant *ConstantEmitter::tryEmitPrivate(const APValue &Value,
return llvm::ConstantAggregateZero::get(AType);
}
- return EmitArrayConstant(CGM, CAT, CommonElementType, NumElements, Elts,
+ llvm::ArrayType *Desired =
+ cast<llvm::ArrayType>(CGM.getTypes().ConvertType(DestType));
+ return EmitArrayConstant(CGM, Desired, CommonElementType, NumElements, Elts,
Filler);
}
case APValue::MemberPointer:
@@ -2077,7 +2228,7 @@ static llvm::Constant *EmitNullConstant(CodeGenModule &CGM,
for (const auto *Field : record->fields()) {
// Fill in non-bitfields. (Bitfields always use a zero pattern, which we
// will fill in later.)
- if (!Field->isBitField()) {
+ if (!Field->isBitField() && !Field->isZeroSize(CGM.getContext())) {
unsigned fieldIndex = layout.getLLVMFieldNo(Field);
elements[fieldIndex] = CGM.EmitNullConstant(Field->getType());
}
diff --git a/lib/CodeGen/CGExprScalar.cpp b/lib/CodeGen/CGExprScalar.cpp
index 1c14d4c99a23..3d082de2a14f 100644
--- a/lib/CodeGen/CGExprScalar.cpp
+++ b/lib/CodeGen/CGExprScalar.cpp
@@ -1,9 +1,8 @@
//===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -17,6 +16,7 @@
#include "CGObjCRuntime.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
+#include "ConstantEmitter.h"
#include "TargetInfo.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclObjC.h"
@@ -125,6 +125,21 @@ struct BinOpInfo {
return CFP->isZero();
return true;
}
+
+ /// Check if either operand is a fixed point type or integer type, with at
+ /// least one being a fixed point type. In any case, this
+ /// operation did not follow usual arithmetic conversion and both operands may
+ /// not be the same.
+ bool isFixedPointBinOp() const {
+ // We cannot simply check the result type since comparison operations return
+ // an int.
+ if (const auto *BinOp = dyn_cast<BinaryOperator>(E)) {
+ QualType LHSType = BinOp->getLHS()->getType();
+ QualType RHSType = BinOp->getRHS()->getType();
+ return LHSType->isFixedPointType() || RHSType->isFixedPointType();
+ }
+ return false;
+ }
};
static bool MustVisitNullValue(const Expr *E) {
@@ -298,7 +313,7 @@ public:
/// boolean (i1) truth value. This is equivalent to "Val != 0".
Value *EmitConversionToBool(Value *Src, QualType DstTy);
- /// Emit a check that a conversion to or from a floating-point type does not
+ /// Emit a check that a conversion from a floating-point type does not
/// overflow.
void EmitFloatConversionCheck(Value *OrigSrc, QualType OrigSrcType,
Value *Src, QualType SrcType, QualType DstType,
@@ -349,8 +364,14 @@ public:
SourceLocation Loc,
ScalarConversionOpts Opts = ScalarConversionOpts());
+ /// Convert between either a fixed point and other fixed point or fixed point
+ /// and an integer.
Value *EmitFixedPointConversion(Value *Src, QualType SrcTy, QualType DstTy,
SourceLocation Loc);
+ Value *EmitFixedPointConversion(Value *Src, FixedPointSemantics &SrcFixedSema,
+ FixedPointSemantics &DstFixedSema,
+ SourceLocation Loc,
+ bool DstIsInteger = false);
/// Emit a conversion from the specified complex type to the specified
/// destination type, where the destination type is an LLVM scalar type.
@@ -620,12 +641,20 @@ public:
Value *VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E) {
return EmitLoadOfLValue(E);
}
+ Value *VisitSourceLocExpr(SourceLocExpr *SLE) {
+ auto &Ctx = CGF.getContext();
+ APValue Evaluated =
+ SLE->EvaluateInContext(Ctx, CGF.CurSourceLocExprScope.getDefaultExpr());
+ return ConstantEmitter(CGF.CGM, &CGF)
+ .emitAbstract(SLE->getLocation(), Evaluated, SLE->getType());
+ }
Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
+ CodeGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE);
return Visit(DAE->getExpr());
}
Value *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
- CodeGenFunction::CXXDefaultInitExprScope Scope(CGF);
+ CodeGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE);
return Visit(DIE->getExpr());
}
Value *VisitCXXThisExpr(CXXThisExpr *TE) {
@@ -729,6 +758,9 @@ public:
return Builder.CreateOr(Ops.LHS, Ops.RHS, "or");
}
+ // Helper functions for fixed point binary operations.
+ Value *EmitFixedPointBinOp(const BinOpInfo &Ops);
+
BinOpInfo EmitBinOps(const BinaryOperator *E);
LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E,
Value *(ScalarExprEmitter::*F)(const BinOpInfo &),
@@ -832,128 +864,63 @@ Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
void ScalarExprEmitter::EmitFloatConversionCheck(
Value *OrigSrc, QualType OrigSrcType, Value *Src, QualType SrcType,
QualType DstType, llvm::Type *DstTy, SourceLocation Loc) {
+ assert(SrcType->isFloatingType() && "not a conversion from floating point");
+ if (!isa<llvm::IntegerType>(DstTy))
+ return;
+
CodeGenFunction::SanitizerScope SanScope(&CGF);
using llvm::APFloat;
using llvm::APSInt;
- llvm::Type *SrcTy = Src->getType();
-
llvm::Value *Check = nullptr;
- if (llvm::IntegerType *IntTy = dyn_cast<llvm::IntegerType>(SrcTy)) {
- // Integer to floating-point. This can fail for unsigned short -> __half
- // or unsigned __int128 -> float.
- assert(DstType->isFloatingType());
- bool SrcIsUnsigned = OrigSrcType->isUnsignedIntegerOrEnumerationType();
-
- APFloat LargestFloat =
- APFloat::getLargest(CGF.getContext().getFloatTypeSemantics(DstType));
- APSInt LargestInt(IntTy->getBitWidth(), SrcIsUnsigned);
-
- bool IsExact;
- if (LargestFloat.convertToInteger(LargestInt, APFloat::rmTowardZero,
- &IsExact) != APFloat::opOK)
- // The range of representable values of this floating point type includes
- // all values of this integer type. Don't need an overflow check.
- return;
-
- llvm::Value *Max = llvm::ConstantInt::get(VMContext, LargestInt);
- if (SrcIsUnsigned)
- Check = Builder.CreateICmpULE(Src, Max);
- else {
- llvm::Value *Min = llvm::ConstantInt::get(VMContext, -LargestInt);
- llvm::Value *GE = Builder.CreateICmpSGE(Src, Min);
- llvm::Value *LE = Builder.CreateICmpSLE(Src, Max);
- Check = Builder.CreateAnd(GE, LE);
- }
- } else {
- const llvm::fltSemantics &SrcSema =
- CGF.getContext().getFloatTypeSemantics(OrigSrcType);
- if (isa<llvm::IntegerType>(DstTy)) {
- // Floating-point to integer. This has undefined behavior if the source is
- // +-Inf, NaN, or doesn't fit into the destination type (after truncation
- // to an integer).
- unsigned Width = CGF.getContext().getIntWidth(DstType);
- bool Unsigned = DstType->isUnsignedIntegerOrEnumerationType();
-
- APSInt Min = APSInt::getMinValue(Width, Unsigned);
- APFloat MinSrc(SrcSema, APFloat::uninitialized);
- if (MinSrc.convertFromAPInt(Min, !Unsigned, APFloat::rmTowardZero) &
- APFloat::opOverflow)
- // Don't need an overflow check for lower bound. Just check for
- // -Inf/NaN.
- MinSrc = APFloat::getInf(SrcSema, true);
- else
- // Find the largest value which is too small to represent (before
- // truncation toward zero).
- MinSrc.subtract(APFloat(SrcSema, 1), APFloat::rmTowardNegative);
-
- APSInt Max = APSInt::getMaxValue(Width, Unsigned);
- APFloat MaxSrc(SrcSema, APFloat::uninitialized);
- if (MaxSrc.convertFromAPInt(Max, !Unsigned, APFloat::rmTowardZero) &
- APFloat::opOverflow)
- // Don't need an overflow check for upper bound. Just check for
- // +Inf/NaN.
- MaxSrc = APFloat::getInf(SrcSema, false);
- else
- // Find the smallest value which is too large to represent (before
- // truncation toward zero).
- MaxSrc.add(APFloat(SrcSema, 1), APFloat::rmTowardPositive);
-
- // If we're converting from __half, convert the range to float to match
- // the type of src.
- if (OrigSrcType->isHalfType()) {
- const llvm::fltSemantics &Sema =
- CGF.getContext().getFloatTypeSemantics(SrcType);
- bool IsInexact;
- MinSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
- MaxSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
- }
-
- llvm::Value *GE =
- Builder.CreateFCmpOGT(Src, llvm::ConstantFP::get(VMContext, MinSrc));
- llvm::Value *LE =
- Builder.CreateFCmpOLT(Src, llvm::ConstantFP::get(VMContext, MaxSrc));
- Check = Builder.CreateAnd(GE, LE);
- } else {
- // FIXME: Maybe split this sanitizer out from float-cast-overflow.
- //
- // Floating-point to floating-point. This has undefined behavior if the
- // source is not in the range of representable values of the destination
- // type. The C and C++ standards are spectacularly unclear here. We
- // diagnose finite out-of-range conversions, but allow infinities and NaNs
- // to convert to the corresponding value in the smaller type.
- //
- // C11 Annex F gives all such conversions defined behavior for IEC 60559
- // conforming implementations. Unfortunately, LLVM's fptrunc instruction
- // does not.
-
- // Converting from a lower rank to a higher rank can never have
- // undefined behavior, since higher-rank types must have a superset
- // of values of lower-rank types.
- if (CGF.getContext().getFloatingTypeOrder(OrigSrcType, DstType) != 1)
- return;
-
- assert(!OrigSrcType->isHalfType() &&
- "should not check conversion from __half, it has the lowest rank");
-
- const llvm::fltSemantics &DstSema =
- CGF.getContext().getFloatTypeSemantics(DstType);
- APFloat MinBad = APFloat::getLargest(DstSema, false);
- APFloat MaxBad = APFloat::getInf(DstSema, false);
-
- bool IsInexact;
- MinBad.convert(SrcSema, APFloat::rmTowardZero, &IsInexact);
- MaxBad.convert(SrcSema, APFloat::rmTowardZero, &IsInexact);
-
- Value *AbsSrc = CGF.EmitNounwindRuntimeCall(
- CGF.CGM.getIntrinsic(llvm::Intrinsic::fabs, Src->getType()), Src);
- llvm::Value *GE =
- Builder.CreateFCmpOGT(AbsSrc, llvm::ConstantFP::get(VMContext, MinBad));
- llvm::Value *LE =
- Builder.CreateFCmpOLT(AbsSrc, llvm::ConstantFP::get(VMContext, MaxBad));
- Check = Builder.CreateNot(Builder.CreateAnd(GE, LE));
- }
- }
+ const llvm::fltSemantics &SrcSema =
+ CGF.getContext().getFloatTypeSemantics(OrigSrcType);
+
+ // Floating-point to integer. This has undefined behavior if the source is
+ // +-Inf, NaN, or doesn't fit into the destination type (after truncation
+ // to an integer).
+ unsigned Width = CGF.getContext().getIntWidth(DstType);
+ bool Unsigned = DstType->isUnsignedIntegerOrEnumerationType();
+
+ APSInt Min = APSInt::getMinValue(Width, Unsigned);
+ APFloat MinSrc(SrcSema, APFloat::uninitialized);
+ if (MinSrc.convertFromAPInt(Min, !Unsigned, APFloat::rmTowardZero) &
+ APFloat::opOverflow)
+ // Don't need an overflow check for lower bound. Just check for
+ // -Inf/NaN.
+ MinSrc = APFloat::getInf(SrcSema, true);
+ else
+ // Find the largest value which is too small to represent (before
+ // truncation toward zero).
+ MinSrc.subtract(APFloat(SrcSema, 1), APFloat::rmTowardNegative);
+
+ APSInt Max = APSInt::getMaxValue(Width, Unsigned);
+ APFloat MaxSrc(SrcSema, APFloat::uninitialized);
+ if (MaxSrc.convertFromAPInt(Max, !Unsigned, APFloat::rmTowardZero) &
+ APFloat::opOverflow)
+ // Don't need an overflow check for upper bound. Just check for
+ // +Inf/NaN.
+ MaxSrc = APFloat::getInf(SrcSema, false);
+ else
+ // Find the smallest value which is too large to represent (before
+ // truncation toward zero).
+ MaxSrc.add(APFloat(SrcSema, 1), APFloat::rmTowardPositive);
+
+ // If we're converting from __half, convert the range to float to match
+ // the type of src.
+ if (OrigSrcType->isHalfType()) {
+ const llvm::fltSemantics &Sema =
+ CGF.getContext().getFloatTypeSemantics(SrcType);
+ bool IsInexact;
+ MinSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
+ MaxSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
+ }
+
+ llvm::Value *GE =
+ Builder.CreateFCmpOGT(Src, llvm::ConstantFP::get(VMContext, MinSrc));
+ llvm::Value *LE =
+ Builder.CreateFCmpOLT(Src, llvm::ConstantFP::get(VMContext, MaxSrc));
+ Check = Builder.CreateAnd(GE, LE);
llvm::Constant *StaticArgs[] = {CGF.EmitCheckSourceLocation(Loc),
CGF.EmitCheckTypeDescriptor(OrigSrcType),
@@ -1205,17 +1172,25 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
// TODO(leonardchan): When necessary, add another if statement checking for
// conversions to fixed point types from other types.
if (SrcType->isFixedPointType()) {
- if (DstType->isFixedPointType()) {
- return EmitFixedPointConversion(Src, SrcType, DstType, Loc);
- } else if (DstType->isBooleanType()) {
+ if (DstType->isBooleanType())
+ // It is important that we check this before checking if the dest type is
+ // an integer because booleans are technically integer types.
// We do not need to check the padding bit on unsigned types if unsigned
// padding is enabled because overflow into this bit is undefined
// behavior.
return Builder.CreateIsNotNull(Src, "tobool");
- }
+ if (DstType->isFixedPointType() || DstType->isIntegerType())
+ return EmitFixedPointConversion(Src, SrcType, DstType, Loc);
+
+ llvm_unreachable(
+ "Unhandled scalar conversion from a fixed point type to another type.");
+ } else if (DstType->isFixedPointType()) {
+ if (SrcType->isIntegerType())
+ // This also includes converting booleans and enums to fixed point types.
+ return EmitFixedPointConversion(Src, SrcType, DstType, Loc);
llvm_unreachable(
- "Unhandled scalar conversion involving a fixed point type.");
+ "Unhandled scalar conversion to a fixed point type from another type.");
}
QualType NoncanonicalSrcType = SrcType;
@@ -1351,9 +1326,12 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
llvm::Type *ResTy = DstTy;
// An overflowing conversion has undefined behavior if either the source type
- // or the destination type is a floating-point type.
+ // or the destination type is a floating-point type. However, we consider the
+ // range of representable values for all floating-point types to be
+ // [-inf,+inf], so no overflow can ever happen when the destination type is a
+ // floating-point type.
if (CGF.SanOpts.has(SanitizerKind::FloatCastOverflow) &&
- (OrigSrcType->isFloatingType() || DstType->isFloatingType()))
+ OrigSrcType->isFloatingType())
EmitFloatConversionCheck(OrigSrc, OrigSrcType, Src, SrcType, DstType, DstTy,
Loc);
@@ -1423,17 +1401,21 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
Value *ScalarExprEmitter::EmitFixedPointConversion(Value *Src, QualType SrcTy,
QualType DstTy,
SourceLocation Loc) {
- using llvm::APInt;
- using llvm::ConstantInt;
- using llvm::Value;
-
- assert(SrcTy->isFixedPointType());
- assert(DstTy->isFixedPointType());
-
FixedPointSemantics SrcFPSema =
CGF.getContext().getFixedPointSemantics(SrcTy);
FixedPointSemantics DstFPSema =
CGF.getContext().getFixedPointSemantics(DstTy);
+ return EmitFixedPointConversion(Src, SrcFPSema, DstFPSema, Loc,
+ DstTy->isIntegerType());
+}
+
+Value *ScalarExprEmitter::EmitFixedPointConversion(
+ Value *Src, FixedPointSemantics &SrcFPSema, FixedPointSemantics &DstFPSema,
+ SourceLocation Loc, bool DstIsInteger) {
+ using llvm::APInt;
+ using llvm::ConstantInt;
+ using llvm::Value;
+
unsigned SrcWidth = SrcFPSema.getWidth();
unsigned DstWidth = DstFPSema.getWidth();
unsigned SrcScale = SrcFPSema.getScale();
@@ -1446,13 +1428,26 @@ Value *ScalarExprEmitter::EmitFixedPointConversion(Value *Src, QualType SrcTy,
Value *Result = Src;
unsigned ResultWidth = SrcWidth;
- if (!DstFPSema.isSaturated()) {
- // Downscale.
- if (DstScale < SrcScale)
- Result = SrcIsSigned ?
- Builder.CreateAShr(Result, SrcScale - DstScale, "downscale") :
- Builder.CreateLShr(Result, SrcScale - DstScale, "downscale");
+ // Downscale.
+ if (DstScale < SrcScale) {
+ // When converting to integers, we round towards zero. For negative numbers,
+ // right shifting rounds towards negative infinity. In this case, we can
+ // just round up before shifting.
+ if (DstIsInteger && SrcIsSigned) {
+ Value *Zero = llvm::Constant::getNullValue(Result->getType());
+ Value *IsNegative = Builder.CreateICmpSLT(Result, Zero);
+ Value *LowBits = ConstantInt::get(
+ CGF.getLLVMContext(), APInt::getLowBitsSet(ResultWidth, SrcScale));
+ Value *Rounded = Builder.CreateAdd(Result, LowBits);
+ Result = Builder.CreateSelect(IsNegative, Rounded, Result);
+ }
+ Result = SrcIsSigned
+ ? Builder.CreateAShr(Result, SrcScale - DstScale, "downscale")
+ : Builder.CreateLShr(Result, SrcScale - DstScale, "downscale");
+ }
+
+ if (!DstFPSema.isSaturated()) {
// Resize.
Result = Builder.CreateIntCast(Result, DstIntTy, SrcIsSigned, "resize");
@@ -1462,14 +1457,11 @@ Value *ScalarExprEmitter::EmitFixedPointConversion(Value *Src, QualType SrcTy,
} else {
// Adjust the number of fractional bits.
if (DstScale > SrcScale) {
- ResultWidth = SrcWidth + DstScale - SrcScale;
+ // Compare to DstWidth to prevent resizing twice.
+ ResultWidth = std::max(SrcWidth + DstScale - SrcScale, DstWidth);
llvm::Type *UpscaledTy = Builder.getIntNTy(ResultWidth);
Result = Builder.CreateIntCast(Result, UpscaledTy, SrcIsSigned, "resize");
Result = Builder.CreateShl(Result, DstScale - SrcScale, "upscale");
- } else if (DstScale < SrcScale) {
- Result = SrcIsSigned ?
- Builder.CreateAShr(Result, SrcScale - DstScale, "downscale") :
- Builder.CreateLShr(Result, SrcScale - DstScale, "downscale");
}
// Handle saturation.
@@ -1493,7 +1485,8 @@ Value *ScalarExprEmitter::EmitFixedPointConversion(Value *Src, QualType SrcTy,
}
// Resize the integer part to get the final destination size.
- Result = Builder.CreateIntCast(Result, DstIntTy, SrcIsSigned, "resize");
+ if (ResultWidth != DstWidth)
+ Result = Builder.CreateIntCast(Result, DstIntTy, SrcIsSigned, "resize");
}
return Result;
}
@@ -1978,6 +1971,15 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
return EmitLoadOfLValue(LV, CE->getExprLoc());
}
+ case CK_LValueToRValueBitCast: {
+ LValue SourceLVal = CGF.EmitLValue(E);
+ Address Addr = Builder.CreateElementBitCast(SourceLVal.getAddress(),
+ CGF.ConvertTypeForMem(DestTy));
+ LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
+ DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
+ return EmitLoadOfLValue(DestLV, CE->getExprLoc());
+ }
+
case CK_CPointerToObjCPointerCast:
case CK_BlockPointerToObjCPointerCast:
case CK_AnyPointerToBlockPointerCast:
@@ -2017,6 +2019,12 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
}
}
+ // Update heapallocsite metadata when there is an explicit cast.
+ if (llvm::CallInst *CI = dyn_cast<llvm::CallInst>(Src))
+ if (CI->getMetadata("heapallocsite") && isa<ExplicitCastExpr>(CE))
+ CGF.getDebugInfo()->
+ addHeapAllocSiteMetadata(CI, CE->getType(), CE->getExprLoc());
+
return Builder.CreateBitCast(Src, DstTy);
}
case CK_AddressSpaceConversion: {
@@ -2087,14 +2095,14 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
case CK_NullToPointer:
if (MustVisitNullValue(E))
- (void) Visit(E);
+ CGF.EmitIgnoredExpr(E);
return CGF.CGM.getNullPointer(cast<llvm::PointerType>(ConvertType(DestTy)),
DestTy);
case CK_NullToMemberPointer: {
if (MustVisitNullValue(E))
- (void) Visit(E);
+ CGF.EmitIgnoredExpr(E);
const MemberPointerType *MPT = CE->getType()->getAs<MemberPointerType>();
return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT);
@@ -2200,6 +2208,21 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
return EmitScalarConversion(Visit(E), E->getType(), DestTy,
CE->getExprLoc());
+ case CK_FixedPointToIntegral:
+ assert(E->getType()->isFixedPointType() &&
+ "Expected src type to be fixed point type");
+ assert(DestTy->isIntegerType() && "Expected dest type to be an integer");
+ return EmitScalarConversion(Visit(E), E->getType(), DestTy,
+ CE->getExprLoc());
+
+ case CK_IntegralToFixedPoint:
+ assert(E->getType()->isIntegerType() &&
+ "Expected src type to be an integer");
+ assert(DestTy->isFixedPointType() &&
+ "Expected dest type to be fixed point type");
+ return EmitScalarConversion(Visit(E), E->getType(), DestTy,
+ CE->getExprLoc());
+
case CK_IntegralCast: {
ScalarConversionOpts Opts;
if (auto *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
@@ -2527,14 +2550,14 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
}
if (atomicPHI) {
- llvm::BasicBlock *opBB = Builder.GetInsertBlock();
+ llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
auto Pair = CGF.EmitAtomicCompareExchange(
LV, RValue::get(atomicPHI), RValue::get(value), E->getExprLoc());
llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), type);
llvm::Value *success = Pair.second;
- atomicPHI->addIncoming(old, opBB);
- Builder.CreateCondBr(success, contBB, opBB);
+ atomicPHI->addIncoming(old, curBlock);
+ Builder.CreateCondBr(success, contBB, atomicPHI->getParent());
Builder.SetInsertPoint(contBB);
return isPre ? value : input;
}
@@ -2881,14 +2904,14 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue(
Loc, ScalarConversionOpts(CGF.SanOpts));
if (atomicPHI) {
- llvm::BasicBlock *opBB = Builder.GetInsertBlock();
+ llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
auto Pair = CGF.EmitAtomicCompareExchange(
LHSLV, RValue::get(atomicPHI), RValue::get(Result), E->getExprLoc());
llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), LHSTy);
llvm::Value *success = Pair.second;
- atomicPHI->addIncoming(old, opBB);
- Builder.CreateCondBr(success, contBB, opBB);
+ atomicPHI->addIncoming(old, curBlock);
+ Builder.CreateCondBr(success, contBB, atomicPHI->getParent());
Builder.SetInsertPoint(contBB);
return LHSLV;
}
@@ -2908,7 +2931,7 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue(
Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) {
bool Ignore = TestAndClearIgnoreResultAssign();
- Value *RHS;
+ Value *RHS = nullptr;
LValue LHS = EmitCompoundAssignLValue(E, Func, RHS);
// If the result is clearly ignored, return now.
@@ -3090,7 +3113,8 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
llvm::Type *argTypes[] = { CGF.Int64Ty, CGF.Int64Ty, Int8Ty, Int8Ty };
llvm::FunctionType *handlerTy =
llvm::FunctionType::get(CGF.Int64Ty, argTypes, true);
- llvm::Value *handler = CGF.CGM.CreateRuntimeFunction(handlerTy, *handlerName);
+ llvm::FunctionCallee handler =
+ CGF.CGM.CreateRuntimeFunction(handlerTy, *handlerName);
// Sign extend the args to 64-bit, so that we can use the same handler for
// all types of overflow.
@@ -3338,9 +3362,119 @@ Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
return propagateFMFlags(V, op);
}
+ if (op.isFixedPointBinOp())
+ return EmitFixedPointBinOp(op);
+
return Builder.CreateAdd(op.LHS, op.RHS, "add");
}
+/// The resulting value must be calculated with exact precision, so the operands
+/// may not be the same type.
+Value *ScalarExprEmitter::EmitFixedPointBinOp(const BinOpInfo &op) {
+ using llvm::APSInt;
+ using llvm::ConstantInt;
+
+ const auto *BinOp = cast<BinaryOperator>(op.E);
+
+ // The result is a fixed point type and at least one of the operands is fixed
+ // point while the other is either fixed point or an int. This resulting type
+ // should be determined by Sema::handleFixedPointConversions().
+ QualType ResultTy = op.Ty;
+ QualType LHSTy = BinOp->getLHS()->getType();
+ QualType RHSTy = BinOp->getRHS()->getType();
+ ASTContext &Ctx = CGF.getContext();
+ Value *LHS = op.LHS;
+ Value *RHS = op.RHS;
+
+ auto LHSFixedSema = Ctx.getFixedPointSemantics(LHSTy);
+ auto RHSFixedSema = Ctx.getFixedPointSemantics(RHSTy);
+ auto ResultFixedSema = Ctx.getFixedPointSemantics(ResultTy);
+ auto CommonFixedSema = LHSFixedSema.getCommonSemantics(RHSFixedSema);
+
+ // Convert the operands to the full precision type.
+ Value *FullLHS = EmitFixedPointConversion(LHS, LHSFixedSema, CommonFixedSema,
+ BinOp->getExprLoc());
+ Value *FullRHS = EmitFixedPointConversion(RHS, RHSFixedSema, CommonFixedSema,
+ BinOp->getExprLoc());
+
+ // Perform the actual addition.
+ Value *Result;
+ switch (BinOp->getOpcode()) {
+ case BO_Add: {
+ if (ResultFixedSema.isSaturated()) {
+ llvm::Intrinsic::ID IID = ResultFixedSema.isSigned()
+ ? llvm::Intrinsic::sadd_sat
+ : llvm::Intrinsic::uadd_sat;
+ Result = Builder.CreateBinaryIntrinsic(IID, FullLHS, FullRHS);
+ } else {
+ Result = Builder.CreateAdd(FullLHS, FullRHS);
+ }
+ break;
+ }
+ case BO_Sub: {
+ if (ResultFixedSema.isSaturated()) {
+ llvm::Intrinsic::ID IID = ResultFixedSema.isSigned()
+ ? llvm::Intrinsic::ssub_sat
+ : llvm::Intrinsic::usub_sat;
+ Result = Builder.CreateBinaryIntrinsic(IID, FullLHS, FullRHS);
+ } else {
+ Result = Builder.CreateSub(FullLHS, FullRHS);
+ }
+ break;
+ }
+ case BO_LT:
+ return CommonFixedSema.isSigned() ? Builder.CreateICmpSLT(FullLHS, FullRHS)
+ : Builder.CreateICmpULT(FullLHS, FullRHS);
+ case BO_GT:
+ return CommonFixedSema.isSigned() ? Builder.CreateICmpSGT(FullLHS, FullRHS)
+ : Builder.CreateICmpUGT(FullLHS, FullRHS);
+ case BO_LE:
+ return CommonFixedSema.isSigned() ? Builder.CreateICmpSLE(FullLHS, FullRHS)
+ : Builder.CreateICmpULE(FullLHS, FullRHS);
+ case BO_GE:
+ return CommonFixedSema.isSigned() ? Builder.CreateICmpSGE(FullLHS, FullRHS)
+ : Builder.CreateICmpUGE(FullLHS, FullRHS);
+ case BO_EQ:
+ // For equality operations, we assume any padding bits on unsigned types are
+ // zero'd out. They could be overwritten through non-saturating operations
+ // that cause overflow, but this leads to undefined behavior.
+ return Builder.CreateICmpEQ(FullLHS, FullRHS);
+ case BO_NE:
+ return Builder.CreateICmpNE(FullLHS, FullRHS);
+ case BO_Mul:
+ case BO_Div:
+ case BO_Shl:
+ case BO_Shr:
+ case BO_Cmp:
+ case BO_LAnd:
+ case BO_LOr:
+ case BO_MulAssign:
+ case BO_DivAssign:
+ case BO_AddAssign:
+ case BO_SubAssign:
+ case BO_ShlAssign:
+ case BO_ShrAssign:
+ llvm_unreachable("Found unimplemented fixed point binary operation");
+ case BO_PtrMemD:
+ case BO_PtrMemI:
+ case BO_Rem:
+ case BO_Xor:
+ case BO_And:
+ case BO_Or:
+ case BO_Assign:
+ case BO_RemAssign:
+ case BO_AndAssign:
+ case BO_XorAssign:
+ case BO_OrAssign:
+ case BO_Comma:
+ llvm_unreachable("Found unsupported binary operation for fixed point types.");
+ }
+
+ // Convert to the result type.
+ return EmitFixedPointConversion(Result, CommonFixedSema, ResultFixedSema,
+ BinOp->getExprLoc());
+}
+
Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
// The LHS is always a pointer if either side is.
if (!op.LHS->getType()->isPointerTy()) {
@@ -3372,6 +3506,9 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
return propagateFMFlags(V, op);
}
+ if (op.isFixedPointBinOp())
+ return EmitFixedPointBinOp(op);
+
return Builder.CreateSub(op.LHS, op.RHS, "sub");
}
@@ -3450,7 +3587,8 @@ Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
bool SanitizeBase = CGF.SanOpts.has(SanitizerKind::ShiftBase) &&
Ops.Ty->hasSignedIntegerRepresentation() &&
- !CGF.getLangOpts().isSignedOverflowDefined();
+ !CGF.getLangOpts().isSignedOverflowDefined() &&
+ !CGF.getLangOpts().CPlusPlus2a;
bool SanitizeExponent = CGF.SanOpts.has(SanitizerKind::ShiftExponent);
// OpenCL 6.3j: shift values are effectively % word size of LHS.
if (CGF.getLangOpts().OpenCL)
@@ -3591,8 +3729,9 @@ Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,
Result = CGF.CGM.getCXXABI().EmitMemberPointerComparison(
CGF, LHS, RHS, MPT, E->getOpcode() == BO_NE);
} else if (!LHSTy->isAnyComplexType() && !RHSTy->isAnyComplexType()) {
- Value *LHS = Visit(E->getLHS());
- Value *RHS = Visit(E->getRHS());
+ BinOpInfo BOInfo = EmitBinOps(E);
+ Value *LHS = BOInfo.LHS;
+ Value *RHS = BOInfo.RHS;
// If AltiVec, the comparison results in a numeric type, so we use
// intrinsics comparing vectors and giving 0 or 1 as a result
@@ -3670,7 +3809,9 @@ Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,
E->getExprLoc());
}
- if (LHS->getType()->isFPOrFPVectorTy()) {
+ if (BOInfo.isFixedPointBinOp()) {
+ Result = EmitFixedPointBinOp(BOInfo);
+ } else if (LHS->getType()->isFPOrFPVectorTy()) {
Result = Builder.CreateFCmp(FCmpOpc, LHS, RHS, "cmp");
} else if (LHSTy->hasSignedIntegerRepresentation()) {
Result = Builder.CreateICmp(SICmpOpc, LHS, RHS, "cmp");
diff --git a/lib/CodeGen/CGGPUBuiltin.cpp b/lib/CodeGen/CGGPUBuiltin.cpp
index b5375ffb8db7..d7e267630762 100644
--- a/lib/CodeGen/CGGPUBuiltin.cpp
+++ b/lib/CodeGen/CGGPUBuiltin.cpp
@@ -1,9 +1,8 @@
//===------ CGGPUBuiltin.cpp - Codegen for GPU builtins -------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/CodeGen/CGLoopInfo.cpp b/lib/CodeGen/CGLoopInfo.cpp
index fd0a9c773a2e..b2bc42bfa013 100644
--- a/lib/CodeGen/CGLoopInfo.cpp
+++ b/lib/CodeGen/CGLoopInfo.cpp
@@ -1,9 +1,8 @@
//===---- CGLoopInfo.cpp - LLVM CodeGen for loop metadata -*- C++ -*-------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -19,138 +18,396 @@
using namespace clang::CodeGen;
using namespace llvm;
-static MDNode *createMetadata(LLVMContext &Ctx, const LoopAttributes &Attrs,
- const llvm::DebugLoc &StartLoc,
- const llvm::DebugLoc &EndLoc, MDNode *&AccGroup) {
+MDNode *
+LoopInfo::createLoopPropertiesMetadata(ArrayRef<Metadata *> LoopProperties) {
+ LLVMContext &Ctx = Header->getContext();
+ SmallVector<Metadata *, 4> NewLoopProperties;
+ TempMDTuple TempNode = MDNode::getTemporary(Ctx, None);
+ NewLoopProperties.push_back(TempNode.get());
+ NewLoopProperties.append(LoopProperties.begin(), LoopProperties.end());
- if (!Attrs.IsParallel && Attrs.VectorizeWidth == 0 &&
- Attrs.InterleaveCount == 0 && Attrs.UnrollCount == 0 &&
- Attrs.UnrollAndJamCount == 0 && !Attrs.PipelineDisabled &&
- Attrs.PipelineInitiationInterval == 0 &&
- Attrs.VectorizeEnable == LoopAttributes::Unspecified &&
- Attrs.UnrollEnable == LoopAttributes::Unspecified &&
- Attrs.UnrollAndJamEnable == LoopAttributes::Unspecified &&
- Attrs.DistributeEnable == LoopAttributes::Unspecified && !StartLoc &&
- !EndLoc)
- return nullptr;
+ MDNode *LoopID = MDNode::getDistinct(Ctx, NewLoopProperties);
+ LoopID->replaceOperandWith(0, LoopID);
+ return LoopID;
+}
+
+MDNode *LoopInfo::createPipeliningMetadata(const LoopAttributes &Attrs,
+ ArrayRef<Metadata *> LoopProperties,
+ bool &HasUserTransforms) {
+ LLVMContext &Ctx = Header->getContext();
+
+ Optional<bool> Enabled;
+ if (Attrs.PipelineDisabled)
+ Enabled = false;
+ else if (Attrs.PipelineInitiationInterval != 0)
+ Enabled = true;
+
+ if (Enabled != true) {
+ SmallVector<Metadata *, 4> NewLoopProperties;
+ if (Enabled == false) {
+ NewLoopProperties.append(LoopProperties.begin(), LoopProperties.end());
+ NewLoopProperties.push_back(
+ MDNode::get(Ctx, {MDString::get(Ctx, "llvm.loop.pipeline.disable"),
+ ConstantAsMetadata::get(ConstantInt::get(
+ llvm::Type::getInt1Ty(Ctx), 1))}));
+ LoopProperties = NewLoopProperties;
+ }
+ return createLoopPropertiesMetadata(LoopProperties);
+ }
SmallVector<Metadata *, 4> Args;
- // Reserve operand 0 for loop id self reference.
- auto TempNode = MDNode::getTemporary(Ctx, None);
+ TempMDTuple TempNode = MDNode::getTemporary(Ctx, None);
Args.push_back(TempNode.get());
+ Args.append(LoopProperties.begin(), LoopProperties.end());
- // If we have a valid start debug location for the loop, add it.
- if (StartLoc) {
- Args.push_back(StartLoc.getAsMDNode());
-
- // If we also have a valid end debug location for the loop, add it.
- if (EndLoc)
- Args.push_back(EndLoc.getAsMDNode());
- }
-
- // Setting vectorize.width
- if (Attrs.VectorizeWidth > 0) {
- Metadata *Vals[] = {MDString::get(Ctx, "llvm.loop.vectorize.width"),
- ConstantAsMetadata::get(ConstantInt::get(
- Type::getInt32Ty(Ctx), Attrs.VectorizeWidth))};
+ if (Attrs.PipelineInitiationInterval > 0) {
+ Metadata *Vals[] = {
+ MDString::get(Ctx, "llvm.loop.pipeline.initiationinterval"),
+ ConstantAsMetadata::get(ConstantInt::get(
+ llvm::Type::getInt32Ty(Ctx), Attrs.PipelineInitiationInterval))};
Args.push_back(MDNode::get(Ctx, Vals));
}
- // Setting interleave.count
- if (Attrs.InterleaveCount > 0) {
- Metadata *Vals[] = {MDString::get(Ctx, "llvm.loop.interleave.count"),
- ConstantAsMetadata::get(ConstantInt::get(
- Type::getInt32Ty(Ctx), Attrs.InterleaveCount))};
- Args.push_back(MDNode::get(Ctx, Vals));
+ // No follow-up: This is the last transformation.
+
+ MDNode *LoopID = MDNode::getDistinct(Ctx, Args);
+ LoopID->replaceOperandWith(0, LoopID);
+ HasUserTransforms = true;
+ return LoopID;
+}
+
+MDNode *
+LoopInfo::createPartialUnrollMetadata(const LoopAttributes &Attrs,
+ ArrayRef<Metadata *> LoopProperties,
+ bool &HasUserTransforms) {
+ LLVMContext &Ctx = Header->getContext();
+
+ Optional<bool> Enabled;
+ if (Attrs.UnrollEnable == LoopAttributes::Disable)
+ Enabled = false;
+ else if (Attrs.UnrollEnable == LoopAttributes::Full)
+ Enabled = None;
+ else if (Attrs.UnrollEnable != LoopAttributes::Unspecified ||
+ Attrs.UnrollCount != 0)
+ Enabled = true;
+
+ if (Enabled != true) {
+ // createFullUnrollMetadata will already have added llvm.loop.unroll.disable
+ // if unrolling is disabled.
+ return createPipeliningMetadata(Attrs, LoopProperties, HasUserTransforms);
}
+ SmallVector<Metadata *, 4> FollowupLoopProperties;
+
+ // Apply all loop properties to the unrolled loop.
+ FollowupLoopProperties.append(LoopProperties.begin(), LoopProperties.end());
+
+ // Don't unroll an already unrolled loop.
+ FollowupLoopProperties.push_back(
+ MDNode::get(Ctx, MDString::get(Ctx, "llvm.loop.unroll.disable")));
+
+ bool FollowupHasTransforms = false;
+ MDNode *Followup = createPipeliningMetadata(Attrs, FollowupLoopProperties,
+ FollowupHasTransforms);
+
+ SmallVector<Metadata *, 4> Args;
+ TempMDTuple TempNode = MDNode::getTemporary(Ctx, None);
+ Args.push_back(TempNode.get());
+ Args.append(LoopProperties.begin(), LoopProperties.end());
+
// Setting unroll.count
if (Attrs.UnrollCount > 0) {
Metadata *Vals[] = {MDString::get(Ctx, "llvm.loop.unroll.count"),
ConstantAsMetadata::get(ConstantInt::get(
- Type::getInt32Ty(Ctx), Attrs.UnrollCount))};
+ llvm::Type::getInt32Ty(Ctx), Attrs.UnrollCount))};
Args.push_back(MDNode::get(Ctx, Vals));
}
- // Setting unroll_and_jam.count
- if (Attrs.UnrollAndJamCount > 0) {
- Metadata *Vals[] = {MDString::get(Ctx, "llvm.loop.unroll_and_jam.count"),
- ConstantAsMetadata::get(ConstantInt::get(
- Type::getInt32Ty(Ctx), Attrs.UnrollAndJamCount))};
+ // Setting unroll.full or unroll.disable
+ if (Attrs.UnrollEnable == LoopAttributes::Enable) {
+ Metadata *Vals[] = {MDString::get(Ctx, "llvm.loop.unroll.enable")};
Args.push_back(MDNode::get(Ctx, Vals));
}
- // Setting vectorize.enable
- if (Attrs.VectorizeEnable != LoopAttributes::Unspecified) {
- Metadata *Vals[] = {MDString::get(Ctx, "llvm.loop.vectorize.enable"),
- ConstantAsMetadata::get(ConstantInt::get(
- Type::getInt1Ty(Ctx), (Attrs.VectorizeEnable ==
- LoopAttributes::Enable)))};
- Args.push_back(MDNode::get(Ctx, Vals));
- }
+ if (FollowupHasTransforms)
+ Args.push_back(MDNode::get(
+ Ctx, {MDString::get(Ctx, "llvm.loop.unroll.followup_all"), Followup}));
- // Setting unroll.full or unroll.disable
- if (Attrs.UnrollEnable != LoopAttributes::Unspecified) {
- std::string Name;
- if (Attrs.UnrollEnable == LoopAttributes::Enable)
- Name = "llvm.loop.unroll.enable";
- else if (Attrs.UnrollEnable == LoopAttributes::Full)
- Name = "llvm.loop.unroll.full";
- else
- Name = "llvm.loop.unroll.disable";
- Metadata *Vals[] = {MDString::get(Ctx, Name)};
- Args.push_back(MDNode::get(Ctx, Vals));
+ MDNode *LoopID = MDNode::getDistinct(Ctx, Args);
+ LoopID->replaceOperandWith(0, LoopID);
+ HasUserTransforms = true;
+ return LoopID;
+}
+
+MDNode *
+LoopInfo::createUnrollAndJamMetadata(const LoopAttributes &Attrs,
+ ArrayRef<Metadata *> LoopProperties,
+ bool &HasUserTransforms) {
+ LLVMContext &Ctx = Header->getContext();
+
+ Optional<bool> Enabled;
+ if (Attrs.UnrollAndJamEnable == LoopAttributes::Disable)
+ Enabled = false;
+ else if (Attrs.UnrollAndJamEnable == LoopAttributes::Enable ||
+ Attrs.UnrollAndJamCount != 0)
+ Enabled = true;
+
+ if (Enabled != true) {
+ SmallVector<Metadata *, 4> NewLoopProperties;
+ if (Enabled == false) {
+ NewLoopProperties.append(LoopProperties.begin(), LoopProperties.end());
+ NewLoopProperties.push_back(MDNode::get(
+ Ctx, MDString::get(Ctx, "llvm.loop.unroll_and_jam.disable")));
+ LoopProperties = NewLoopProperties;
+ }
+ return createPartialUnrollMetadata(Attrs, LoopProperties,
+ HasUserTransforms);
}
- // Setting unroll_and_jam.full or unroll_and_jam.disable
- if (Attrs.UnrollAndJamEnable != LoopAttributes::Unspecified) {
- std::string Name;
- if (Attrs.UnrollAndJamEnable == LoopAttributes::Enable)
- Name = "llvm.loop.unroll_and_jam.enable";
- else if (Attrs.UnrollAndJamEnable == LoopAttributes::Full)
- Name = "llvm.loop.unroll_and_jam.full";
- else
- Name = "llvm.loop.unroll_and_jam.disable";
- Metadata *Vals[] = {MDString::get(Ctx, Name)};
+ SmallVector<Metadata *, 4> FollowupLoopProperties;
+ FollowupLoopProperties.append(LoopProperties.begin(), LoopProperties.end());
+ FollowupLoopProperties.push_back(
+ MDNode::get(Ctx, MDString::get(Ctx, "llvm.loop.unroll_and_jam.disable")));
+
+ bool FollowupHasTransforms = false;
+ MDNode *Followup = createPartialUnrollMetadata(Attrs, FollowupLoopProperties,
+ FollowupHasTransforms);
+
+ SmallVector<Metadata *, 4> Args;
+ TempMDTuple TempNode = MDNode::getTemporary(Ctx, None);
+ Args.push_back(TempNode.get());
+ Args.append(LoopProperties.begin(), LoopProperties.end());
+
+ // Setting unroll_and_jam.count
+ if (Attrs.UnrollAndJamCount > 0) {
+ Metadata *Vals[] = {
+ MDString::get(Ctx, "llvm.loop.unroll_and_jam.count"),
+ ConstantAsMetadata::get(ConstantInt::get(llvm::Type::getInt32Ty(Ctx),
+ Attrs.UnrollAndJamCount))};
Args.push_back(MDNode::get(Ctx, Vals));
}
- if (Attrs.DistributeEnable != LoopAttributes::Unspecified) {
- Metadata *Vals[] = {MDString::get(Ctx, "llvm.loop.distribute.enable"),
- ConstantAsMetadata::get(ConstantInt::get(
- Type::getInt1Ty(Ctx), (Attrs.DistributeEnable ==
- LoopAttributes::Enable)))};
+ if (Attrs.UnrollAndJamEnable == LoopAttributes::Enable) {
+ Metadata *Vals[] = {MDString::get(Ctx, "llvm.loop.unroll_and_jam.enable")};
Args.push_back(MDNode::get(Ctx, Vals));
}
- if (Attrs.IsParallel) {
- AccGroup = MDNode::getDistinct(Ctx, {});
+ if (FollowupHasTransforms)
Args.push_back(MDNode::get(
- Ctx, {MDString::get(Ctx, "llvm.loop.parallel_accesses"), AccGroup}));
+ Ctx, {MDString::get(Ctx, "llvm.loop.unroll_and_jam.followup_outer"),
+ Followup}));
+
+ if (UnrollAndJamInnerFollowup)
+ Args.push_back(MDNode::get(
+ Ctx, {MDString::get(Ctx, "llvm.loop.unroll_and_jam.followup_inner"),
+ UnrollAndJamInnerFollowup}));
+
+ MDNode *LoopID = MDNode::getDistinct(Ctx, Args);
+ LoopID->replaceOperandWith(0, LoopID);
+ HasUserTransforms = true;
+ return LoopID;
+}
+
+MDNode *
+LoopInfo::createLoopVectorizeMetadata(const LoopAttributes &Attrs,
+ ArrayRef<Metadata *> LoopProperties,
+ bool &HasUserTransforms) {
+ LLVMContext &Ctx = Header->getContext();
+
+ Optional<bool> Enabled;
+ if (Attrs.VectorizeEnable == LoopAttributes::Disable)
+ Enabled = false;
+ else if (Attrs.VectorizeEnable != LoopAttributes::Unspecified ||
+ Attrs.InterleaveCount != 0 || Attrs.VectorizeWidth != 0)
+ Enabled = true;
+
+ if (Enabled != true) {
+ SmallVector<Metadata *, 4> NewLoopProperties;
+ if (Enabled == false) {
+ NewLoopProperties.append(LoopProperties.begin(), LoopProperties.end());
+ NewLoopProperties.push_back(
+ MDNode::get(Ctx, {MDString::get(Ctx, "llvm.loop.vectorize.enable"),
+ ConstantAsMetadata::get(ConstantInt::get(
+ llvm::Type::getInt1Ty(Ctx), 0))}));
+ LoopProperties = NewLoopProperties;
+ }
+ return createUnrollAndJamMetadata(Attrs, LoopProperties, HasUserTransforms);
+ }
+
+ // Apply all loop properties to the vectorized loop.
+ SmallVector<Metadata *, 4> FollowupLoopProperties;
+ FollowupLoopProperties.append(LoopProperties.begin(), LoopProperties.end());
+
+ // Don't vectorize an already vectorized loop.
+ FollowupLoopProperties.push_back(
+ MDNode::get(Ctx, MDString::get(Ctx, "llvm.loop.isvectorized")));
+
+ bool FollowupHasTransforms = false;
+ MDNode *Followup = createUnrollAndJamMetadata(Attrs, FollowupLoopProperties,
+ FollowupHasTransforms);
+
+ SmallVector<Metadata *, 4> Args;
+ TempMDTuple TempNode = MDNode::getTemporary(Ctx, None);
+ Args.push_back(TempNode.get());
+ Args.append(LoopProperties.begin(), LoopProperties.end());
+
+ // Setting vectorize.width
+ if (Attrs.VectorizeWidth > 0) {
+ Metadata *Vals[] = {
+ MDString::get(Ctx, "llvm.loop.vectorize.width"),
+ ConstantAsMetadata::get(ConstantInt::get(llvm::Type::getInt32Ty(Ctx),
+ Attrs.VectorizeWidth))};
+ Args.push_back(MDNode::get(Ctx, Vals));
}
- if (Attrs.PipelineDisabled) {
+ // Setting interleave.count
+ if (Attrs.InterleaveCount > 0) {
Metadata *Vals[] = {
- MDString::get(Ctx, "llvm.loop.pipeline.disable"),
- ConstantAsMetadata::get(ConstantInt::get(
- Type::getInt1Ty(Ctx), (Attrs.PipelineDisabled == true)))};
+ MDString::get(Ctx, "llvm.loop.interleave.count"),
+ ConstantAsMetadata::get(ConstantInt::get(llvm::Type::getInt32Ty(Ctx),
+ Attrs.InterleaveCount))};
Args.push_back(MDNode::get(Ctx, Vals));
}
- if (Attrs.PipelineInitiationInterval > 0) {
+ // Setting vectorize.enable
+ if (Attrs.VectorizeEnable != LoopAttributes::Unspecified) {
Metadata *Vals[] = {
- MDString::get(Ctx, "llvm.loop.pipeline.initiationinterval"),
+ MDString::get(Ctx, "llvm.loop.vectorize.enable"),
ConstantAsMetadata::get(ConstantInt::get(
- Type::getInt32Ty(Ctx), Attrs.PipelineInitiationInterval))};
+ llvm::Type::getInt1Ty(Ctx),
+ (Attrs.VectorizeEnable == LoopAttributes::Enable)))};
Args.push_back(MDNode::get(Ctx, Vals));
}
- // Set the first operand to itself.
+ if (FollowupHasTransforms)
+ Args.push_back(MDNode::get(
+ Ctx,
+ {MDString::get(Ctx, "llvm.loop.vectorize.followup_all"), Followup}));
+
MDNode *LoopID = MDNode::get(Ctx, Args);
LoopID->replaceOperandWith(0, LoopID);
+ HasUserTransforms = true;
return LoopID;
}
+MDNode *
+LoopInfo::createLoopDistributeMetadata(const LoopAttributes &Attrs,
+ ArrayRef<Metadata *> LoopProperties,
+ bool &HasUserTransforms) {
+ LLVMContext &Ctx = Header->getContext();
+
+ Optional<bool> Enabled;
+ if (Attrs.DistributeEnable == LoopAttributes::Disable)
+ Enabled = false;
+ if (Attrs.DistributeEnable == LoopAttributes::Enable)
+ Enabled = true;
+
+ if (Enabled != true) {
+ SmallVector<Metadata *, 4> NewLoopProperties;
+ if (Enabled == false) {
+ NewLoopProperties.append(LoopProperties.begin(), LoopProperties.end());
+ NewLoopProperties.push_back(
+ MDNode::get(Ctx, {MDString::get(Ctx, "llvm.loop.distribute.enable"),
+ ConstantAsMetadata::get(ConstantInt::get(
+ llvm::Type::getInt1Ty(Ctx), 0))}));
+ LoopProperties = NewLoopProperties;
+ }
+ return createLoopVectorizeMetadata(Attrs, LoopProperties,
+ HasUserTransforms);
+ }
+
+ bool FollowupHasTransforms = false;
+ MDNode *Followup =
+ createLoopVectorizeMetadata(Attrs, LoopProperties, FollowupHasTransforms);
+
+ SmallVector<Metadata *, 4> Args;
+ TempMDTuple TempNode = MDNode::getTemporary(Ctx, None);
+ Args.push_back(TempNode.get());
+ Args.append(LoopProperties.begin(), LoopProperties.end());
+
+ Metadata *Vals[] = {MDString::get(Ctx, "llvm.loop.distribute.enable"),
+ ConstantAsMetadata::get(ConstantInt::get(
+ llvm::Type::getInt1Ty(Ctx),
+ (Attrs.DistributeEnable == LoopAttributes::Enable)))};
+ Args.push_back(MDNode::get(Ctx, Vals));
+
+ if (FollowupHasTransforms)
+ Args.push_back(MDNode::get(
+ Ctx,
+ {MDString::get(Ctx, "llvm.loop.distribute.followup_all"), Followup}));
+
+ MDNode *LoopID = MDNode::get(Ctx, Args);
+ LoopID->replaceOperandWith(0, LoopID);
+ HasUserTransforms = true;
+ return LoopID;
+}
+
+MDNode *LoopInfo::createFullUnrollMetadata(const LoopAttributes &Attrs,
+ ArrayRef<Metadata *> LoopProperties,
+ bool &HasUserTransforms) {
+ LLVMContext &Ctx = Header->getContext();
+
+ Optional<bool> Enabled;
+ if (Attrs.UnrollEnable == LoopAttributes::Disable)
+ Enabled = false;
+ else if (Attrs.UnrollEnable == LoopAttributes::Full)
+ Enabled = true;
+
+ if (Enabled != true) {
+ SmallVector<Metadata *, 4> NewLoopProperties;
+ if (Enabled == false) {
+ NewLoopProperties.append(LoopProperties.begin(), LoopProperties.end());
+ NewLoopProperties.push_back(
+ MDNode::get(Ctx, MDString::get(Ctx, "llvm.loop.unroll.disable")));
+ LoopProperties = NewLoopProperties;
+ }
+ return createLoopDistributeMetadata(Attrs, LoopProperties,
+ HasUserTransforms);
+ }
+
+ SmallVector<Metadata *, 4> Args;
+ TempMDTuple TempNode = MDNode::getTemporary(Ctx, None);
+ Args.push_back(TempNode.get());
+ Args.append(LoopProperties.begin(), LoopProperties.end());
+ Args.push_back(MDNode::get(Ctx, MDString::get(Ctx, "llvm.loop.unroll.full")));
+
+ // No follow-up: there is no loop after full unrolling.
+ // TODO: Warn if there are transformations after full unrolling.
+
+ MDNode *LoopID = MDNode::getDistinct(Ctx, Args);
+ LoopID->replaceOperandWith(0, LoopID);
+ HasUserTransforms = true;
+ return LoopID;
+}
+
+MDNode *LoopInfo::createMetadata(
+ const LoopAttributes &Attrs,
+ llvm::ArrayRef<llvm::Metadata *> AdditionalLoopProperties,
+ bool &HasUserTransforms) {
+ SmallVector<Metadata *, 3> LoopProperties;
+
+ // If we have a valid start debug location for the loop, add it.
+ if (StartLoc) {
+ LoopProperties.push_back(StartLoc.getAsMDNode());
+
+ // If we also have a valid end debug location for the loop, add it.
+ if (EndLoc)
+ LoopProperties.push_back(EndLoc.getAsMDNode());
+ }
+
+ assert(!!AccGroup == Attrs.IsParallel &&
+ "There must be an access group iff the loop is parallel");
+ if (Attrs.IsParallel) {
+ LLVMContext &Ctx = Header->getContext();
+ LoopProperties.push_back(MDNode::get(
+ Ctx, {MDString::get(Ctx, "llvm.loop.parallel_accesses"), AccGroup}));
+ }
+
+ LoopProperties.insert(LoopProperties.end(), AdditionalLoopProperties.begin(),
+ AdditionalLoopProperties.end());
+ return createFullUnrollMetadata(Attrs, LoopProperties, HasUserTransforms);
+}
+
LoopAttributes::LoopAttributes(bool IsParallel)
: IsParallel(IsParallel), VectorizeEnable(LoopAttributes::Unspecified),
UnrollEnable(LoopAttributes::Unspecified),
@@ -174,15 +431,114 @@ void LoopAttributes::clear() {
}
LoopInfo::LoopInfo(BasicBlock *Header, const LoopAttributes &Attrs,
- const llvm::DebugLoc &StartLoc, const llvm::DebugLoc &EndLoc)
- : LoopID(nullptr), Header(Header), Attrs(Attrs) {
- LoopID =
- createMetadata(Header->getContext(), Attrs, StartLoc, EndLoc, AccGroup);
+ const llvm::DebugLoc &StartLoc, const llvm::DebugLoc &EndLoc,
+ LoopInfo *Parent)
+ : Header(Header), Attrs(Attrs), StartLoc(StartLoc), EndLoc(EndLoc),
+ Parent(Parent) {
+
+ if (Attrs.IsParallel) {
+ // Create an access group for this loop.
+ LLVMContext &Ctx = Header->getContext();
+ AccGroup = MDNode::getDistinct(Ctx, {});
+ }
+
+ if (!Attrs.IsParallel && Attrs.VectorizeWidth == 0 &&
+ Attrs.InterleaveCount == 0 && Attrs.UnrollCount == 0 &&
+ Attrs.UnrollAndJamCount == 0 && !Attrs.PipelineDisabled &&
+ Attrs.PipelineInitiationInterval == 0 &&
+ Attrs.VectorizeEnable == LoopAttributes::Unspecified &&
+ Attrs.UnrollEnable == LoopAttributes::Unspecified &&
+ Attrs.UnrollAndJamEnable == LoopAttributes::Unspecified &&
+ Attrs.DistributeEnable == LoopAttributes::Unspecified && !StartLoc &&
+ !EndLoc)
+ return;
+
+ TempLoopID = MDNode::getTemporary(Header->getContext(), None);
+}
+
+void LoopInfo::finish() {
+ // We did not annotate the loop body instructions because there are no
+ // attributes for this loop.
+ if (!TempLoopID)
+ return;
+
+ MDNode *LoopID;
+ LoopAttributes CurLoopAttr = Attrs;
+ LLVMContext &Ctx = Header->getContext();
+
+ if (Parent && (Parent->Attrs.UnrollAndJamEnable ||
+ Parent->Attrs.UnrollAndJamCount != 0)) {
+ // Parent unroll-and-jams this loop.
+ // Split the transformations in those that happens before the unroll-and-jam
+ // and those after.
+
+ LoopAttributes BeforeJam, AfterJam;
+
+ BeforeJam.IsParallel = AfterJam.IsParallel = Attrs.IsParallel;
+
+ BeforeJam.VectorizeWidth = Attrs.VectorizeWidth;
+ BeforeJam.InterleaveCount = Attrs.InterleaveCount;
+ BeforeJam.VectorizeEnable = Attrs.VectorizeEnable;
+ BeforeJam.DistributeEnable = Attrs.DistributeEnable;
+
+ switch (Attrs.UnrollEnable) {
+ case LoopAttributes::Unspecified:
+ case LoopAttributes::Disable:
+ BeforeJam.UnrollEnable = Attrs.UnrollEnable;
+ AfterJam.UnrollEnable = Attrs.UnrollEnable;
+ break;
+ case LoopAttributes::Full:
+ BeforeJam.UnrollEnable = LoopAttributes::Full;
+ break;
+ case LoopAttributes::Enable:
+ AfterJam.UnrollEnable = LoopAttributes::Enable;
+ break;
+ }
+
+ AfterJam.UnrollCount = Attrs.UnrollCount;
+ AfterJam.PipelineDisabled = Attrs.PipelineDisabled;
+ AfterJam.PipelineInitiationInterval = Attrs.PipelineInitiationInterval;
+
+ // If this loop is subject of an unroll-and-jam by the parent loop, and has
+ // an unroll-and-jam annotation itself, we have to decide whether to first
+ // apply the parent's unroll-and-jam or this loop's unroll-and-jam. The
+ // UnrollAndJam pass processes loops from inner to outer, so we apply the
+ // inner first.
+ BeforeJam.UnrollAndJamCount = Attrs.UnrollAndJamCount;
+ BeforeJam.UnrollAndJamEnable = Attrs.UnrollAndJamEnable;
+
+ // Set the inner followup metadata to process by the outer loop. Only
+ // consider the first inner loop.
+ if (!Parent->UnrollAndJamInnerFollowup) {
+ // Splitting the attributes into a BeforeJam and an AfterJam part will
+ // stop 'llvm.loop.isvectorized' (generated by vectorization in BeforeJam)
+ // to be forwarded to the AfterJam part. We detect the situation here and
+ // add it manually.
+ SmallVector<Metadata *, 1> BeforeLoopProperties;
+ if (BeforeJam.VectorizeEnable != LoopAttributes::Unspecified ||
+ BeforeJam.InterleaveCount != 0 || BeforeJam.VectorizeWidth != 0)
+ BeforeLoopProperties.push_back(
+ MDNode::get(Ctx, MDString::get(Ctx, "llvm.loop.isvectorized")));
+
+ bool InnerFollowupHasTransform = false;
+ MDNode *InnerFollowup = createMetadata(AfterJam, BeforeLoopProperties,
+ InnerFollowupHasTransform);
+ if (InnerFollowupHasTransform)
+ Parent->UnrollAndJamInnerFollowup = InnerFollowup;
+ }
+
+ CurLoopAttr = BeforeJam;
+ }
+
+ bool HasUserTransforms = false;
+ LoopID = createMetadata(CurLoopAttr, {}, HasUserTransforms);
+ TempLoopID->replaceAllUsesWith(LoopID);
}
void LoopInfoStack::push(BasicBlock *Header, const llvm::DebugLoc &StartLoc,
const llvm::DebugLoc &EndLoc) {
- Active.push_back(LoopInfo(Header, StagedAttrs, StartLoc, EndLoc));
+ Active.push_back(LoopInfo(Header, StagedAttrs, StartLoc, EndLoc,
+ Active.empty() ? nullptr : &Active.back()));
// Clear the attributes so nested loops do not inherit them.
StagedAttrs.clear();
}
@@ -209,13 +565,13 @@ void LoopInfoStack::push(BasicBlock *Header, clang::ASTContext &Ctx,
// Translate opencl_unroll_hint attribute argument to
// equivalent LoopHintAttr enums.
// OpenCL v2.0 s6.11.5:
- // 0 - full unroll (no argument).
+ // 0 - enable unroll (no argument).
// 1 - disable unroll.
// other positive integer n - unroll by n.
if (OpenCLHint) {
ValueInt = OpenCLHint->getUnrollHint();
if (ValueInt == 0) {
- State = LoopHintAttr::Full;
+ State = LoopHintAttr::Enable;
} else if (ValueInt != 1) {
Option = LoopHintAttr::UnrollCount;
State = LoopHintAttr::Numeric;
@@ -365,6 +721,7 @@ void LoopInfoStack::push(BasicBlock *Header, clang::ASTContext &Ctx,
void LoopInfoStack::pop() {
assert(!Active.empty() && "No active loops to pop");
+ Active.back().finish();
Active.pop_back();
}
diff --git a/lib/CodeGen/CGLoopInfo.h b/lib/CodeGen/CGLoopInfo.h
index 84ba03bfb00b..35d0e00527b9 100644
--- a/lib/CodeGen/CGLoopInfo.h
+++ b/lib/CodeGen/CGLoopInfo.h
@@ -1,9 +1,8 @@
//===---- CGLoopInfo.h - LLVM CodeGen for loop metadata -*- C++ -*---------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -79,10 +78,11 @@ class LoopInfo {
public:
/// Construct a new LoopInfo for the loop with entry Header.
LoopInfo(llvm::BasicBlock *Header, const LoopAttributes &Attrs,
- const llvm::DebugLoc &StartLoc, const llvm::DebugLoc &EndLoc);
+ const llvm::DebugLoc &StartLoc, const llvm::DebugLoc &EndLoc,
+ LoopInfo *Parent);
/// Get the loop id metadata for this loop.
- llvm::MDNode *getLoopID() const { return LoopID; }
+ llvm::MDNode *getLoopID() const { return TempLoopID.get(); }
/// Get the header block of this loop.
llvm::BasicBlock *getHeader() const { return Header; }
@@ -93,15 +93,92 @@ public:
/// Return this loop's access group or nullptr if it does not have one.
llvm::MDNode *getAccessGroup() const { return AccGroup; }
+ /// Create the loop's metadata. Must be called after its nested loops have
+ /// been processed.
+ void finish();
+
private:
/// Loop ID metadata.
- llvm::MDNode *LoopID;
+ llvm::TempMDTuple TempLoopID;
/// Header block of this loop.
llvm::BasicBlock *Header;
/// The attributes for this loop.
LoopAttributes Attrs;
/// The access group for memory accesses parallel to this loop.
llvm::MDNode *AccGroup = nullptr;
+ /// Start location of this loop.
+ llvm::DebugLoc StartLoc;
+ /// End location of this loop.
+ llvm::DebugLoc EndLoc;
+ /// The next outer loop, or nullptr if this is the outermost loop.
+ LoopInfo *Parent;
+ /// If this loop has unroll-and-jam metadata, this can be set by the inner
+ /// loop's LoopInfo to set the llvm.loop.unroll_and_jam.followup_inner
+ /// metadata.
+ llvm::MDNode *UnrollAndJamInnerFollowup = nullptr;
+
+ /// Create a LoopID without any transformations.
+ llvm::MDNode *
+ createLoopPropertiesMetadata(llvm::ArrayRef<llvm::Metadata *> LoopProperties);
+
+ /// Create a LoopID for transformations.
+ ///
+ /// The methods call each other in case multiple transformations are applied
+ /// to a loop. The transformation first to be applied will use LoopID of the
+ /// next transformation in its followup attribute.
+ ///
+ /// @param Attrs The loop's transformations.
+ /// @param LoopProperties Non-transformation properties such as debug
+ /// location, parallel accesses and disabled
+ /// transformations. These are added to the returned
+ /// LoopID.
+ /// @param HasUserTransforms [out] Set to true if the returned MDNode encodes
+ /// at least one transformation.
+ ///
+ /// @return A LoopID (metadata node) that can be used for the llvm.loop
+ /// annotation or followup-attribute.
+ /// @{
+ llvm::MDNode *
+ createPipeliningMetadata(const LoopAttributes &Attrs,
+ llvm::ArrayRef<llvm::Metadata *> LoopProperties,
+ bool &HasUserTransforms);
+ llvm::MDNode *
+ createPartialUnrollMetadata(const LoopAttributes &Attrs,
+ llvm::ArrayRef<llvm::Metadata *> LoopProperties,
+ bool &HasUserTransforms);
+ llvm::MDNode *
+ createUnrollAndJamMetadata(const LoopAttributes &Attrs,
+ llvm::ArrayRef<llvm::Metadata *> LoopProperties,
+ bool &HasUserTransforms);
+ llvm::MDNode *
+ createLoopVectorizeMetadata(const LoopAttributes &Attrs,
+ llvm::ArrayRef<llvm::Metadata *> LoopProperties,
+ bool &HasUserTransforms);
+ llvm::MDNode *
+ createLoopDistributeMetadata(const LoopAttributes &Attrs,
+ llvm::ArrayRef<llvm::Metadata *> LoopProperties,
+ bool &HasUserTransforms);
+ llvm::MDNode *
+ createFullUnrollMetadata(const LoopAttributes &Attrs,
+ llvm::ArrayRef<llvm::Metadata *> LoopProperties,
+ bool &HasUserTransforms);
+ /// @}
+
+ /// Create a LoopID for this loop, including transformation-unspecific
+ /// metadata such as debug location.
+ ///
+ /// @param Attrs This loop's attributes and transformations.
+ /// @param LoopProperties Additional non-transformation properties to add
+ /// to the LoopID, such as transformation-specific
+ /// metadata that are not covered by @p Attrs.
+ /// @param HasUserTransforms [out] Set to true if the returned MDNode encodes
+ /// at least one transformation.
+ ///
+ /// @return A LoopID (metadata node) that can be used for the llvm.loop
+ /// annotation.
+ llvm::MDNode *createMetadata(const LoopAttributes &Attrs,
+ llvm::ArrayRef<llvm::Metadata *> LoopProperties,
+ bool &HasUserTransforms);
};
/// A stack of loop information corresponding to loop nesting levels.
diff --git a/lib/CodeGen/CGNonTrivialStruct.cpp b/lib/CodeGen/CGNonTrivialStruct.cpp
index c6a96a912622..caf62d2ac93a 100644
--- a/lib/CodeGen/CGNonTrivialStruct.cpp
+++ b/lib/CodeGen/CGNonTrivialStruct.cpp
@@ -1,9 +1,8 @@
//===--- CGNonTrivialStruct.cpp - Emit Special Functions for C Structs ----===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -15,6 +14,7 @@
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "clang/AST/NonTrivialTypeVisitor.h"
+#include "clang/CodeGen/CodeGenABITypes.h"
#include "llvm/Support/ScopedPrinter.h"
#include <array>
@@ -84,23 +84,22 @@ struct CopyStructVisitor : StructVisitor<Derived>,
template <class... Ts>
void preVisit(QualType::PrimitiveCopyKind PCK, QualType FT,
- const FieldDecl *FD, CharUnits CurStructOffsset,
- Ts &&... Args) {
+ const FieldDecl *FD, CharUnits CurStructOffset, Ts &&... Args) {
if (PCK)
asDerived().flushTrivialFields(std::forward<Ts>(Args)...);
}
template <class... Ts>
void visitWithKind(QualType::PrimitiveCopyKind PCK, QualType FT,
- const FieldDecl *FD, CharUnits CurStructOffsset,
+ const FieldDecl *FD, CharUnits CurStructOffset,
Ts &&... Args) {
if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) {
asDerived().visitArray(PCK, AT, FT.isVolatileQualified(), FD,
- CurStructOffsset, std::forward<Ts>(Args)...);
+ CurStructOffset, std::forward<Ts>(Args)...);
return;
}
- Super::visitWithKind(PCK, FT, FD, CurStructOffsset,
+ Super::visitWithKind(PCK, FT, FD, CurStructOffset,
std::forward<Ts>(Args)...);
}
@@ -140,8 +139,8 @@ struct CopyStructVisitor : StructVisitor<Derived>,
// <alignment-info> ::= <dst-alignment> ["_" <src-alignment>]
// <struct-field-info> ::= <field-info>+
// <field-info> ::= <struct-or-scalar-field-info> | <array-field-info>
-// <struct-or-scalar-field-info> ::= <struct-field-info> | <strong-field-info> |
-// <trivial-field-info>
+// <struct-or-scalar-field-info> ::= "_S" <struct-field-info> |
+// <strong-field-info> | <trivial-field-info>
// <array-field-info> ::= "_AB" <array-offset> "s" <element-size> "n"
// <num-elements> <innermost-element-info> "_AE"
// <innermost-element-info> ::= <struct-or-scalar-field-info>
@@ -176,6 +175,7 @@ template <class Derived> struct GenFuncNameBase {
void visitStruct(QualType QT, const FieldDecl *FD,
CharUnits CurStructOffset) {
CharUnits FieldOffset = CurStructOffset + asDerived().getFieldOffset(FD);
+ appendStr("_S");
asDerived().visitStructFields(QT, FieldOffset);
}
@@ -253,11 +253,11 @@ struct GenBinaryFuncName : CopyStructVisitor<GenBinaryFuncName<IsMove>, IsMove>,
}
void visitVolatileTrivial(QualType FT, const FieldDecl *FD,
- CharUnits CurStackOffset) {
+ CharUnits CurStructOffset) {
// Because volatile fields can be bit-fields and are individually copied,
// their offset and width are in bits.
uint64_t OffsetInBits =
- this->Ctx.toBits(CurStackOffset) + this->getFieldOffsetInBits(FD);
+ this->Ctx.toBits(CurStructOffset) + this->getFieldOffsetInBits(FD);
this->appendStr("_tv" + llvm::to_string(OffsetInBits) + "w" +
llvm::to_string(getFieldSize(FD, FT, this->Ctx)));
}
@@ -286,8 +286,7 @@ struct GenDestructorFuncName : GenUnaryFuncName<GenDestructorFuncName>,
using Super = DestructedTypeVisitor<GenDestructorFuncName>;
GenDestructorFuncName(const char *Prefix, CharUnits DstAlignment,
ASTContext &Ctx)
- : GenUnaryFuncName<GenDestructorFuncName>(Prefix, DstAlignment,
- Ctx) {}
+ : GenUnaryFuncName<GenDestructorFuncName>(Prefix, DstAlignment, Ctx) {}
void visitWithKind(QualType::DestructionKind DK, QualType FT,
const FieldDecl *FD, CharUnits CurStructOffset) {
if (const auto *AT = getContext().getAsArrayType(FT)) {
@@ -322,19 +321,19 @@ static const CGFunctionInfo &getFunctionInfo(CodeGenModule &CGM,
// functions.
template <class Derived> struct GenFuncBase {
template <size_t N>
- void visitStruct(QualType FT, const FieldDecl *FD, CharUnits CurStackOffset,
+ void visitStruct(QualType FT, const FieldDecl *FD, CharUnits CurStructOffset,
std::array<Address, N> Addrs) {
this->asDerived().callSpecialFunction(
- FT, CurStackOffset + asDerived().getFieldOffset(FD), Addrs);
+ FT, CurStructOffset + asDerived().getFieldOffset(FD), Addrs);
}
template <class FieldKind, size_t N>
void visitArray(FieldKind FK, const ArrayType *AT, bool IsVolatile,
- const FieldDecl *FD, CharUnits CurStackOffset,
+ const FieldDecl *FD, CharUnits CurStructOffset,
std::array<Address, N> Addrs) {
// Non-volatile trivial fields are copied when flushTrivialFields is called.
if (!FK)
- return asDerived().visitTrivial(QualType(AT, 0), FD, CurStackOffset,
+ return asDerived().visitTrivial(QualType(AT, 0), FD, CurStructOffset,
Addrs);
asDerived().flushTrivialFields(Addrs);
@@ -345,7 +344,7 @@ template <class Derived> struct GenFuncBase {
QualType BaseEltQT;
std::array<Address, N> StartAddrs = Addrs;
for (unsigned I = 0; I < N; ++I)
- StartAddrs[I] = getAddrWithOffset(Addrs[I], CurStackOffset, FD);
+ StartAddrs[I] = getAddrWithOffset(Addrs[I], CurStructOffset, FD);
Address DstAddr = StartAddrs[DstIdx];
llvm::Value *NumElts = CGF.emitArrayLength(AT, BaseEltQT, DstAddr);
unsigned BaseEltSize = Ctx.getTypeSizeInChars(BaseEltQT).getQuantity();
@@ -414,8 +413,7 @@ template <class Derived> struct GenFuncBase {
if (Offset.getQuantity() == 0)
return Addr;
Addr = CGF->Builder.CreateBitCast(Addr, CGF->CGM.Int8PtrTy);
- Addr = CGF->Builder.CreateConstInBoundsGEP(Addr, Offset.getQuantity(),
- CharUnits::One());
+ Addr = CGF->Builder.CreateConstInBoundsGEP(Addr, Offset.getQuantity());
return CGF->Builder.CreateBitCast(Addr, CGF->CGM.Int8PtrPtrTy);
}
@@ -586,15 +584,15 @@ struct GenDestructor : StructVisitor<GenDestructor>,
}
void visitARCStrong(QualType QT, const FieldDecl *FD,
- CharUnits CurStackOffset, std::array<Address, 1> Addrs) {
+ CharUnits CurStructOffset, std::array<Address, 1> Addrs) {
CGF->destroyARCStrongImprecise(
- *CGF, getAddrWithOffset(Addrs[DstIdx], CurStackOffset, FD), QT);
+ *CGF, getAddrWithOffset(Addrs[DstIdx], CurStructOffset, FD), QT);
}
- void visitARCWeak(QualType QT, const FieldDecl *FD, CharUnits CurStackOffset,
+ void visitARCWeak(QualType QT, const FieldDecl *FD, CharUnits CurStructOffset,
std::array<Address, 1> Addrs) {
CGF->destroyARCWeak(
- *CGF, getAddrWithOffset(Addrs[DstIdx], CurStackOffset, FD), QT);
+ *CGF, getAddrWithOffset(Addrs[DstIdx], CurStructOffset, FD), QT);
}
void callSpecialFunction(QualType FT, CharUnits Offset,
@@ -627,35 +625,35 @@ struct GenDefaultInitialize
}
void visitARCStrong(QualType QT, const FieldDecl *FD,
- CharUnits CurStackOffset, std::array<Address, 1> Addrs) {
+ CharUnits CurStructOffset, std::array<Address, 1> Addrs) {
CGF->EmitNullInitialization(
- getAddrWithOffset(Addrs[DstIdx], CurStackOffset, FD), QT);
+ getAddrWithOffset(Addrs[DstIdx], CurStructOffset, FD), QT);
}
- void visitARCWeak(QualType QT, const FieldDecl *FD, CharUnits CurStackOffset,
+ void visitARCWeak(QualType QT, const FieldDecl *FD, CharUnits CurStructOffset,
std::array<Address, 1> Addrs) {
CGF->EmitNullInitialization(
- getAddrWithOffset(Addrs[DstIdx], CurStackOffset, FD), QT);
+ getAddrWithOffset(Addrs[DstIdx], CurStructOffset, FD), QT);
}
template <class FieldKind, size_t... Is>
void visitArray(FieldKind FK, const ArrayType *AT, bool IsVolatile,
- const FieldDecl *FD, CharUnits CurStackOffset,
+ const FieldDecl *FD, CharUnits CurStructOffset,
std::array<Address, 1> Addrs) {
if (!FK)
- return visitTrivial(QualType(AT, 0), FD, CurStackOffset, Addrs);
+ return visitTrivial(QualType(AT, 0), FD, CurStructOffset, Addrs);
ASTContext &Ctx = getContext();
CharUnits Size = Ctx.getTypeSizeInChars(QualType(AT, 0));
QualType EltTy = Ctx.getBaseElementType(QualType(AT, 0));
if (Size < CharUnits::fromQuantity(16) || EltTy->getAs<RecordType>()) {
- GenFuncBaseTy::visitArray(FK, AT, IsVolatile, FD, CurStackOffset, Addrs);
+ GenFuncBaseTy::visitArray(FK, AT, IsVolatile, FD, CurStructOffset, Addrs);
return;
}
llvm::Constant *SizeVal = CGF->Builder.getInt64(Size.getQuantity());
- Address DstAddr = getAddrWithOffset(Addrs[DstIdx], CurStackOffset, FD);
+ Address DstAddr = getAddrWithOffset(Addrs[DstIdx], CurStructOffset, FD);
Address Loc = CGF->Builder.CreateElementBitCast(DstAddr, CGF->Int8Ty);
CGF->Builder.CreateMemSet(Loc, CGF->Builder.getInt8(0), SizeVal,
IsVolatile);
@@ -673,24 +671,26 @@ struct GenCopyConstructor : GenBinaryFunc<GenCopyConstructor, false> {
: GenBinaryFunc<GenCopyConstructor, false>(Ctx) {}
void visitARCStrong(QualType QT, const FieldDecl *FD,
- CharUnits CurStackOffset, std::array<Address, 2> Addrs) {
- Addrs[DstIdx] = getAddrWithOffset(Addrs[DstIdx], CurStackOffset, FD);
- Addrs[SrcIdx] = getAddrWithOffset(Addrs[SrcIdx], CurStackOffset, FD);
+ CharUnits CurStructOffset, std::array<Address, 2> Addrs) {
+ Addrs[DstIdx] = getAddrWithOffset(Addrs[DstIdx], CurStructOffset, FD);
+ Addrs[SrcIdx] = getAddrWithOffset(Addrs[SrcIdx], CurStructOffset, FD);
llvm::Value *SrcVal = CGF->EmitLoadOfScalar(
Addrs[SrcIdx], QT.isVolatileQualified(), QT, SourceLocation());
llvm::Value *Val = CGF->EmitARCRetain(QT, SrcVal);
CGF->EmitStoreOfScalar(Val, CGF->MakeAddrLValue(Addrs[DstIdx], QT), true);
}
- void visitARCWeak(QualType QT, const FieldDecl *FD, CharUnits CurStackOffset,
+ void visitARCWeak(QualType QT, const FieldDecl *FD, CharUnits CurStructOffset,
std::array<Address, 2> Addrs) {
- Addrs[DstIdx] = getAddrWithOffset(Addrs[DstIdx], CurStackOffset, FD);
- Addrs[SrcIdx] = getAddrWithOffset(Addrs[SrcIdx], CurStackOffset, FD);
+ Addrs[DstIdx] = getAddrWithOffset(Addrs[DstIdx], CurStructOffset, FD);
+ Addrs[SrcIdx] = getAddrWithOffset(Addrs[SrcIdx], CurStructOffset, FD);
CGF->EmitARCCopyWeak(Addrs[DstIdx], Addrs[SrcIdx]);
}
void callSpecialFunction(QualType FT, CharUnits Offset,
std::array<Address, 2> Addrs) {
+ Addrs[DstIdx] = getAddrWithOffset(Addrs[DstIdx], Offset);
+ Addrs[SrcIdx] = getAddrWithOffset(Addrs[SrcIdx], Offset);
CGF->callCStructCopyConstructor(CGF->MakeAddrLValue(Addrs[DstIdx], FT),
CGF->MakeAddrLValue(Addrs[SrcIdx], FT));
}
@@ -701,9 +701,9 @@ struct GenMoveConstructor : GenBinaryFunc<GenMoveConstructor, true> {
: GenBinaryFunc<GenMoveConstructor, true>(Ctx) {}
void visitARCStrong(QualType QT, const FieldDecl *FD,
- CharUnits CurStackOffset, std::array<Address, 2> Addrs) {
- Addrs[DstIdx] = getAddrWithOffset(Addrs[DstIdx], CurStackOffset, FD);
- Addrs[SrcIdx] = getAddrWithOffset(Addrs[SrcIdx], CurStackOffset, FD);
+ CharUnits CurStructOffset, std::array<Address, 2> Addrs) {
+ Addrs[DstIdx] = getAddrWithOffset(Addrs[DstIdx], CurStructOffset, FD);
+ Addrs[SrcIdx] = getAddrWithOffset(Addrs[SrcIdx], CurStructOffset, FD);
LValue SrcLV = CGF->MakeAddrLValue(Addrs[SrcIdx], QT);
llvm::Value *SrcVal =
CGF->EmitLoadOfLValue(SrcLV, SourceLocation()).getScalarVal();
@@ -712,15 +712,17 @@ struct GenMoveConstructor : GenBinaryFunc<GenMoveConstructor, true> {
/* isInitialization */ true);
}
- void visitARCWeak(QualType QT, const FieldDecl *FD, CharUnits CurStackOffset,
+ void visitARCWeak(QualType QT, const FieldDecl *FD, CharUnits CurStructOffset,
std::array<Address, 2> Addrs) {
- Addrs[DstIdx] = getAddrWithOffset(Addrs[DstIdx], CurStackOffset, FD);
- Addrs[SrcIdx] = getAddrWithOffset(Addrs[SrcIdx], CurStackOffset, FD);
+ Addrs[DstIdx] = getAddrWithOffset(Addrs[DstIdx], CurStructOffset, FD);
+ Addrs[SrcIdx] = getAddrWithOffset(Addrs[SrcIdx], CurStructOffset, FD);
CGF->EmitARCMoveWeak(Addrs[DstIdx], Addrs[SrcIdx]);
}
void callSpecialFunction(QualType FT, CharUnits Offset,
std::array<Address, 2> Addrs) {
+ Addrs[DstIdx] = getAddrWithOffset(Addrs[DstIdx], Offset);
+ Addrs[SrcIdx] = getAddrWithOffset(Addrs[SrcIdx], Offset);
CGF->callCStructMoveConstructor(CGF->MakeAddrLValue(Addrs[DstIdx], FT),
CGF->MakeAddrLValue(Addrs[SrcIdx], FT));
}
@@ -731,24 +733,26 @@ struct GenCopyAssignment : GenBinaryFunc<GenCopyAssignment, false> {
: GenBinaryFunc<GenCopyAssignment, false>(Ctx) {}
void visitARCStrong(QualType QT, const FieldDecl *FD,
- CharUnits CurStackOffset, std::array<Address, 2> Addrs) {
- Addrs[DstIdx] = getAddrWithOffset(Addrs[DstIdx], CurStackOffset, FD);
- Addrs[SrcIdx] = getAddrWithOffset(Addrs[SrcIdx], CurStackOffset, FD);
+ CharUnits CurStructOffset, std::array<Address, 2> Addrs) {
+ Addrs[DstIdx] = getAddrWithOffset(Addrs[DstIdx], CurStructOffset, FD);
+ Addrs[SrcIdx] = getAddrWithOffset(Addrs[SrcIdx], CurStructOffset, FD);
llvm::Value *SrcVal = CGF->EmitLoadOfScalar(
Addrs[SrcIdx], QT.isVolatileQualified(), QT, SourceLocation());
CGF->EmitARCStoreStrong(CGF->MakeAddrLValue(Addrs[DstIdx], QT), SrcVal,
false);
}
- void visitARCWeak(QualType QT, const FieldDecl *FD, CharUnits CurStackOffset,
+ void visitARCWeak(QualType QT, const FieldDecl *FD, CharUnits CurStructOffset,
std::array<Address, 2> Addrs) {
- Addrs[DstIdx] = getAddrWithOffset(Addrs[DstIdx], CurStackOffset, FD);
- Addrs[SrcIdx] = getAddrWithOffset(Addrs[SrcIdx], CurStackOffset, FD);
+ Addrs[DstIdx] = getAddrWithOffset(Addrs[DstIdx], CurStructOffset, FD);
+ Addrs[SrcIdx] = getAddrWithOffset(Addrs[SrcIdx], CurStructOffset, FD);
CGF->emitARCCopyAssignWeak(QT, Addrs[DstIdx], Addrs[SrcIdx]);
}
void callSpecialFunction(QualType FT, CharUnits Offset,
std::array<Address, 2> Addrs) {
+ Addrs[DstIdx] = getAddrWithOffset(Addrs[DstIdx], Offset);
+ Addrs[SrcIdx] = getAddrWithOffset(Addrs[SrcIdx], Offset);
CGF->callCStructCopyAssignmentOperator(
CGF->MakeAddrLValue(Addrs[DstIdx], FT),
CGF->MakeAddrLValue(Addrs[SrcIdx], FT));
@@ -760,9 +764,9 @@ struct GenMoveAssignment : GenBinaryFunc<GenMoveAssignment, true> {
: GenBinaryFunc<GenMoveAssignment, true>(Ctx) {}
void visitARCStrong(QualType QT, const FieldDecl *FD,
- CharUnits CurStackOffset, std::array<Address, 2> Addrs) {
- Addrs[DstIdx] = getAddrWithOffset(Addrs[DstIdx], CurStackOffset, FD);
- Addrs[SrcIdx] = getAddrWithOffset(Addrs[SrcIdx], CurStackOffset, FD);
+ CharUnits CurStructOffset, std::array<Address, 2> Addrs) {
+ Addrs[DstIdx] = getAddrWithOffset(Addrs[DstIdx], CurStructOffset, FD);
+ Addrs[SrcIdx] = getAddrWithOffset(Addrs[SrcIdx], CurStructOffset, FD);
LValue SrcLV = CGF->MakeAddrLValue(Addrs[SrcIdx], QT);
llvm::Value *SrcVal =
CGF->EmitLoadOfLValue(SrcLV, SourceLocation()).getScalarVal();
@@ -774,15 +778,17 @@ struct GenMoveAssignment : GenBinaryFunc<GenMoveAssignment, true> {
CGF->EmitARCRelease(DstVal, ARCImpreciseLifetime);
}
- void visitARCWeak(QualType QT, const FieldDecl *FD, CharUnits CurStackOffset,
+ void visitARCWeak(QualType QT, const FieldDecl *FD, CharUnits CurStructOffset,
std::array<Address, 2> Addrs) {
- Addrs[DstIdx] = getAddrWithOffset(Addrs[DstIdx], CurStackOffset, FD);
- Addrs[SrcIdx] = getAddrWithOffset(Addrs[SrcIdx], CurStackOffset, FD);
+ Addrs[DstIdx] = getAddrWithOffset(Addrs[DstIdx], CurStructOffset, FD);
+ Addrs[SrcIdx] = getAddrWithOffset(Addrs[SrcIdx], CurStructOffset, FD);
CGF->emitARCMoveAssignWeak(QT, Addrs[DstIdx], Addrs[SrcIdx]);
}
void callSpecialFunction(QualType FT, CharUnits Offset,
std::array<Address, 2> Addrs) {
+ Addrs[DstIdx] = getAddrWithOffset(Addrs[DstIdx], Offset);
+ Addrs[SrcIdx] = getAddrWithOffset(Addrs[SrcIdx], Offset);
CGF->callCStructMoveAssignmentOperator(
CGF->MakeAddrLValue(Addrs[DstIdx], FT),
CGF->MakeAddrLValue(Addrs[SrcIdx], FT));
@@ -817,6 +823,29 @@ static void callSpecialFunction(G &&Gen, StringRef FuncName, QualType QT,
Gen.callFunc(FuncName, QT, Addrs, CGF);
}
+template <size_t N> std::array<Address, N> createNullAddressArray();
+
+template <> std::array<Address, 1> createNullAddressArray() {
+ return std::array<Address, 1>({{Address(nullptr, CharUnits::Zero())}});
+}
+
+template <> std::array<Address, 2> createNullAddressArray() {
+ return std::array<Address, 2>({{Address(nullptr, CharUnits::Zero()),
+ Address(nullptr, CharUnits::Zero())}});
+}
+
+template <class G, size_t N>
+static llvm::Function *
+getSpecialFunction(G &&Gen, StringRef FuncName, QualType QT, bool IsVolatile,
+ std::array<CharUnits, N> Alignments, CodeGenModule &CGM) {
+ QT = IsVolatile ? QT.withVolatile() : QT;
+ // The following call requires an array of addresses as arguments, but doesn't
+ // actually use them (it overwrites them with the addresses of the arguments
+ // of the created function).
+ return Gen.getFunction(FuncName, QT, createNullAddressArray<N>(), Alignments,
+ CGM);
+}
+
// Functions to emit calls to the special functions of a non-trivial C struct.
void CodeGenFunction::callCStructDefaultConstructor(LValue Dst) {
bool IsVolatile = Dst.isVolatile();
@@ -828,18 +857,16 @@ void CodeGenFunction::callCStructDefaultConstructor(LValue Dst) {
IsVolatile, *this, std::array<Address, 1>({{DstPtr}}));
}
-std::string
-CodeGenFunction::getNonTrivialCopyConstructorStr(QualType QT,
- CharUnits Alignment,
- bool IsVolatile,
- ASTContext &Ctx) {
+std::string CodeGenFunction::getNonTrivialCopyConstructorStr(
+ QualType QT, CharUnits Alignment, bool IsVolatile, ASTContext &Ctx) {
GenBinaryFuncName<false> GenName("", Alignment, Alignment, Ctx);
return GenName.getName(QT, IsVolatile);
}
-std::string
-CodeGenFunction::getNonTrivialDestructorStr(QualType QT, CharUnits Alignment,
- bool IsVolatile, ASTContext &Ctx) {
+std::string CodeGenFunction::getNonTrivialDestructorStr(QualType QT,
+ CharUnits Alignment,
+ bool IsVolatile,
+ ASTContext &Ctx) {
GenDestructorFuncName GenName("", Alignment, Ctx);
return GenName.getName(QT, IsVolatile);
}
@@ -904,3 +931,69 @@ void CodeGenFunction::callCStructMoveAssignmentOperator(LValue Dst, LValue Src
callSpecialFunction(GenMoveAssignment(getContext()), FuncName, QT, IsVolatile,
*this, std::array<Address, 2>({{DstPtr, SrcPtr}}));
}
+
+llvm::Function *clang::CodeGen::getNonTrivialCStructDefaultConstructor(
+ CodeGenModule &CGM, CharUnits DstAlignment, bool IsVolatile, QualType QT) {
+ ASTContext &Ctx = CGM.getContext();
+ GenDefaultInitializeFuncName GenName(DstAlignment, Ctx);
+ std::string FuncName = GenName.getName(QT, IsVolatile);
+ return getSpecialFunction(GenDefaultInitialize(Ctx), FuncName, QT, IsVolatile,
+ std::array<CharUnits, 1>({{DstAlignment}}), CGM);
+}
+
+llvm::Function *clang::CodeGen::getNonTrivialCStructCopyConstructor(
+ CodeGenModule &CGM, CharUnits DstAlignment, CharUnits SrcAlignment,
+ bool IsVolatile, QualType QT) {
+ ASTContext &Ctx = CGM.getContext();
+ GenBinaryFuncName<false> GenName("__copy_constructor_", DstAlignment,
+ SrcAlignment, Ctx);
+ std::string FuncName = GenName.getName(QT, IsVolatile);
+ return getSpecialFunction(
+ GenCopyConstructor(Ctx), FuncName, QT, IsVolatile,
+ std::array<CharUnits, 2>({{DstAlignment, SrcAlignment}}), CGM);
+}
+
+llvm::Function *clang::CodeGen::getNonTrivialCStructMoveConstructor(
+ CodeGenModule &CGM, CharUnits DstAlignment, CharUnits SrcAlignment,
+ bool IsVolatile, QualType QT) {
+ ASTContext &Ctx = CGM.getContext();
+ GenBinaryFuncName<true> GenName("__move_constructor_", DstAlignment,
+ SrcAlignment, Ctx);
+ std::string FuncName = GenName.getName(QT, IsVolatile);
+ return getSpecialFunction(
+ GenMoveConstructor(Ctx), FuncName, QT, IsVolatile,
+ std::array<CharUnits, 2>({{DstAlignment, SrcAlignment}}), CGM);
+}
+
+llvm::Function *clang::CodeGen::getNonTrivialCStructCopyAssignmentOperator(
+ CodeGenModule &CGM, CharUnits DstAlignment, CharUnits SrcAlignment,
+ bool IsVolatile, QualType QT) {
+ ASTContext &Ctx = CGM.getContext();
+ GenBinaryFuncName<false> GenName("__copy_assignment_", DstAlignment,
+ SrcAlignment, Ctx);
+ std::string FuncName = GenName.getName(QT, IsVolatile);
+ return getSpecialFunction(
+ GenCopyAssignment(Ctx), FuncName, QT, IsVolatile,
+ std::array<CharUnits, 2>({{DstAlignment, SrcAlignment}}), CGM);
+}
+
+llvm::Function *clang::CodeGen::getNonTrivialCStructMoveAssignmentOperator(
+ CodeGenModule &CGM, CharUnits DstAlignment, CharUnits SrcAlignment,
+ bool IsVolatile, QualType QT) {
+ ASTContext &Ctx = CGM.getContext();
+ GenBinaryFuncName<true> GenName("__move_assignment_", DstAlignment,
+ SrcAlignment, Ctx);
+ std::string FuncName = GenName.getName(QT, IsVolatile);
+ return getSpecialFunction(
+ GenMoveAssignment(Ctx), FuncName, QT, IsVolatile,
+ std::array<CharUnits, 2>({{DstAlignment, SrcAlignment}}), CGM);
+}
+
+llvm::Function *clang::CodeGen::getNonTrivialCStructDestructor(
+ CodeGenModule &CGM, CharUnits DstAlignment, bool IsVolatile, QualType QT) {
+ ASTContext &Ctx = CGM.getContext();
+ GenDestructorFuncName GenName("__destructor_", DstAlignment, Ctx);
+ std::string FuncName = GenName.getName(QT, IsVolatile);
+ return getSpecialFunction(GenDestructor(Ctx), FuncName, QT, IsVolatile,
+ std::array<CharUnits, 1>({{DstAlignment}}), CGM);
+}
diff --git a/lib/CodeGen/CGObjC.cpp b/lib/CodeGen/CGObjC.cpp
index 9c66ff0e8fb2..1dd7ec52230e 100644
--- a/lib/CodeGen/CGObjC.cpp
+++ b/lib/CodeGen/CGObjC.cpp
@@ -1,9 +1,8 @@
//===---- CGObjC.cpp - Emit LLVM Code for Objective-C ---------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -15,6 +14,7 @@
#include "CGObjCRuntime.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
+#include "ConstantEmitter.h"
#include "TargetInfo.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclObjC.h"
@@ -22,7 +22,6 @@
#include "clang/Basic/Diagnostic.h"
#include "clang/CodeGen/CGFunctionInfo.h"
#include "llvm/ADT/STLExtras.h"
-#include "llvm/IR/CallSite.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/InlineAsm.h"
using namespace clang;
@@ -62,7 +61,12 @@ CodeGenFunction::EmitObjCBoxedExpr(const ObjCBoxedExpr *E) {
// Get the method.
const ObjCMethodDecl *BoxingMethod = E->getBoxingMethod();
const Expr *SubExpr = E->getSubExpr();
- assert(BoxingMethod && "BoxingMethod is null");
+
+ if (E->isExpressibleAsConstantInitializer()) {
+ ConstantEmitter ConstEmitter(CGM);
+ return ConstEmitter.tryEmitAbstract(E, E->getType());
+ }
+
assert(BoxingMethod->isClassMethod() && "BoxingMethod must be a class method");
Selector Sel = BoxingMethod->getSelector();
@@ -160,9 +164,8 @@ llvm::Value *CodeGenFunction::EmitObjCCollectionLiteral(const Expr *E,
if (ALE) {
// Emit the element and store it to the appropriate array slot.
const Expr *Rhs = ALE->getElement(i);
- LValue LV = MakeAddrLValue(
- Builder.CreateConstArrayGEP(Objects, i, getPointerSize()),
- ElementType, AlignmentSource::Decl);
+ LValue LV = MakeAddrLValue(Builder.CreateConstArrayGEP(Objects, i),
+ ElementType, AlignmentSource::Decl);
llvm::Value *value = EmitScalarExpr(Rhs);
EmitStoreThroughLValue(RValue::get(value), LV, true);
@@ -172,17 +175,15 @@ llvm::Value *CodeGenFunction::EmitObjCCollectionLiteral(const Expr *E,
} else {
// Emit the key and store it to the appropriate array slot.
const Expr *Key = DLE->getKeyValueElement(i).Key;
- LValue KeyLV = MakeAddrLValue(
- Builder.CreateConstArrayGEP(Keys, i, getPointerSize()),
- ElementType, AlignmentSource::Decl);
+ LValue KeyLV = MakeAddrLValue(Builder.CreateConstArrayGEP(Keys, i),
+ ElementType, AlignmentSource::Decl);
llvm::Value *keyValue = EmitScalarExpr(Key);
EmitStoreThroughLValue(RValue::get(keyValue), KeyLV, /*isInit=*/true);
// Emit the value and store it to the appropriate array slot.
const Expr *Value = DLE->getKeyValueElement(i).Value;
- LValue ValueLV = MakeAddrLValue(
- Builder.CreateConstArrayGEP(Objects, i, getPointerSize()),
- ElementType, AlignmentSource::Decl);
+ LValue ValueLV = MakeAddrLValue(Builder.CreateConstArrayGEP(Objects, i),
+ ElementType, AlignmentSource::Decl);
llvm::Value *valueValue = EmitScalarExpr(Value);
EmitStoreThroughLValue(RValue::get(valueValue), ValueLV, /*isInit=*/true);
if (TrackNeededObjects) {
@@ -382,10 +383,12 @@ tryGenerateSpecializedMessageSend(CodeGenFunction &CGF, QualType ResultType,
if (isClassMessage &&
Runtime.shouldUseRuntimeFunctionsForAlloc() &&
ResultType->isObjCObjectPointerType()) {
- // [Foo alloc] -> objc_alloc(Foo)
+ // [Foo alloc] -> objc_alloc(Foo) or
+ // [self alloc] -> objc_alloc(self)
if (Sel.isUnarySelector() && Sel.getNameForSlot(0) == "alloc")
return CGF.EmitObjCAlloc(Receiver, CGF.ConvertType(ResultType));
- // [Foo allocWithZone:nil] -> objc_allocWithZone(Foo)
+ // [Foo allocWithZone:nil] -> objc_allocWithZone(Foo) or
+ // [self allocWithZone:nil] -> objc_allocWithZone(self)
if (Sel.isKeywordSelector() && Sel.getNumArgs() == 1 &&
Args.size() == 1 && Args.front().getType()->isPointerType() &&
Sel.getNameForSlot(0) == "allocWithZone") {
@@ -427,6 +430,57 @@ tryGenerateSpecializedMessageSend(CodeGenFunction &CGF, QualType ResultType,
return None;
}
+/// Instead of '[[MyClass alloc] init]', try to generate
+/// 'objc_alloc_init(MyClass)'. This provides a code size improvement on the
+/// caller side, as well as the optimized objc_alloc.
+static Optional<llvm::Value *>
+tryEmitSpecializedAllocInit(CodeGenFunction &CGF, const ObjCMessageExpr *OME) {
+ auto &Runtime = CGF.getLangOpts().ObjCRuntime;
+ if (!Runtime.shouldUseRuntimeFunctionForCombinedAllocInit())
+ return None;
+
+ // Match the exact pattern '[[MyClass alloc] init]'.
+ Selector Sel = OME->getSelector();
+ if (OME->getReceiverKind() != ObjCMessageExpr::Instance ||
+ !OME->getType()->isObjCObjectPointerType() || !Sel.isUnarySelector() ||
+ Sel.getNameForSlot(0) != "init")
+ return None;
+
+ // Okay, this is '[receiver init]', check if 'receiver' is '[cls alloc]' or
+ // we are in an ObjC class method and 'receiver' is '[self alloc]'.
+ auto *SubOME =
+ dyn_cast<ObjCMessageExpr>(OME->getInstanceReceiver()->IgnoreParenCasts());
+ if (!SubOME)
+ return None;
+ Selector SubSel = SubOME->getSelector();
+
+ // Check if we are in an ObjC class method and the receiver expression is
+ // 'self'.
+ const Expr *SelfInClassMethod = nullptr;
+ if (const auto *CurMD = dyn_cast_or_null<ObjCMethodDecl>(CGF.CurFuncDecl))
+ if (CurMD->isClassMethod())
+ if ((SelfInClassMethod = SubOME->getInstanceReceiver()))
+ if (!SelfInClassMethod->isObjCSelfExpr())
+ SelfInClassMethod = nullptr;
+
+ if ((SubOME->getReceiverKind() != ObjCMessageExpr::Class &&
+ !SelfInClassMethod) || !SubOME->getType()->isObjCObjectPointerType() ||
+ !SubSel.isUnarySelector() || SubSel.getNameForSlot(0) != "alloc")
+ return None;
+
+ llvm::Value *Receiver;
+ if (SelfInClassMethod) {
+ Receiver = CGF.EmitScalarExpr(SelfInClassMethod);
+ } else {
+ QualType ReceiverType = SubOME->getClassReceiver();
+ const ObjCObjectType *ObjTy = ReceiverType->getAs<ObjCObjectType>();
+ const ObjCInterfaceDecl *ID = ObjTy->getInterface();
+ assert(ID && "null interface should be impossible here");
+ Receiver = CGF.CGM.getObjCRuntime().GetClass(CGF, ID);
+ }
+ return CGF.EmitObjCAllocInit(Receiver, CGF.ConvertType(OME->getType()));
+}
+
RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
ReturnValueSlot Return) {
// Only the lookup mechanism and first two arguments of the method
@@ -448,6 +502,9 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
}
}
+ if (Optional<llvm::Value *> Val = tryEmitSpecializedAllocInit(*this, E))
+ return AdjustObjCObjectType(*this, E->getType(), RValue::get(*Val));
+
// We don't retain the receiver in delegate init calls, and this is
// safe because the receiver value is always loaded from 'self',
// which we zero out. We don't want to Block_copy block receivers,
@@ -468,6 +525,10 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
switch (E->getReceiverKind()) {
case ObjCMessageExpr::Instance:
ReceiverType = E->getInstanceReceiver()->getType();
+ if (auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(CurFuncDecl))
+ if (OMD->isClassMethod())
+ if (E->getInstanceReceiver()->isObjCSelfExpr())
+ isClassMessage = true;
if (retainSelf) {
TryEmitResult ter = tryEmitARCRetainScalarExpr(*this,
E->getInstanceReceiver());
@@ -685,7 +746,7 @@ static void emitStructGetterCall(CodeGenFunction &CGF, ObjCIvarDecl *ivar,
args.add(RValue::get(CGF.Builder.getInt1(isAtomic)), Context.BoolTy);
args.add(RValue::get(CGF.Builder.getInt1(hasStrong)), Context.BoolTy);
- llvm::Constant *fn = CGF.CGM.getObjCRuntime().GetGetStructFunction();
+ llvm::FunctionCallee fn = CGF.CGM.getObjCRuntime().GetGetStructFunction();
CGCallee callee = CGCallee::forDirect(fn);
CGF.EmitCall(CGF.getTypes().arrangeBuiltinFunctionCall(Context.VoidTy, args),
callee, ReturnValueSlot(), args);
@@ -949,8 +1010,8 @@ static void emitCPPObjectAtomicGetterCall(CodeGenFunction &CGF,
// Third argument is the helper function.
args.add(RValue::get(AtomicHelperFn), CGF.getContext().VoidPtrTy);
- llvm::Constant *copyCppAtomicObjectFn =
- CGF.CGM.getObjCRuntime().GetCppAtomicObjectGetFunction();
+ llvm::FunctionCallee copyCppAtomicObjectFn =
+ CGF.CGM.getObjCRuntime().GetCppAtomicObjectGetFunction();
CGCallee callee = CGCallee::forDirect(copyCppAtomicObjectFn);
CGF.EmitCall(
CGF.getTypes().arrangeBuiltinFunctionCall(CGF.getContext().VoidTy, args),
@@ -1026,8 +1087,8 @@ CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
}
case PropertyImplStrategy::GetSetProperty: {
- llvm::Constant *getPropertyFn =
- CGM.getObjCRuntime().GetPropertyGetFunction();
+ llvm::FunctionCallee getPropertyFn =
+ CGM.getObjCRuntime().GetPropertyGetFunction();
if (!getPropertyFn) {
CGM.ErrorUnsupported(propImpl, "Obj-C getter requiring atomic copy");
return;
@@ -1052,10 +1113,10 @@ CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
// FIXME: We shouldn't need to get the function info here, the
// runtime already should have computed it to build the function.
- llvm::Instruction *CallInstruction;
- RValue RV = EmitCall(
- getTypes().arrangeBuiltinFunctionCall(propType, args),
- callee, ReturnValueSlot(), args, &CallInstruction);
+ llvm::CallBase *CallInstruction;
+ RValue RV = EmitCall(getTypes().arrangeBuiltinFunctionCall(
+ getContext().getObjCIdType(), args),
+ callee, ReturnValueSlot(), args, &CallInstruction);
if (llvm::CallInst *call = dyn_cast<llvm::CallInst>(CallInstruction))
call->setTailCall();
@@ -1096,7 +1157,7 @@ CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
// that's not necessarily the same as "on the stack", so
// we still potentially need objc_memmove_collectable.
EmitAggregateCopy(/* Dest= */ MakeAddrLValue(ReturnValue, ivarType),
- /* Src= */ LV, ivarType, overlapForReturnValue());
+ /* Src= */ LV, ivarType, getOverlapForReturnValue());
return;
}
case TEK_Scalar: {
@@ -1170,7 +1231,7 @@ static void emitStructSetterCall(CodeGenFunction &CGF, ObjCMethodDecl *OMD,
// FIXME: should this really always be false?
args.add(RValue::get(CGF.Builder.getFalse()), CGF.getContext().BoolTy);
- llvm::Constant *fn = CGF.CGM.getObjCRuntime().GetSetStructFunction();
+ llvm::FunctionCallee fn = CGF.CGM.getObjCRuntime().GetSetStructFunction();
CGCallee callee = CGCallee::forDirect(fn);
CGF.EmitCall(
CGF.getTypes().arrangeBuiltinFunctionCall(CGF.getContext().VoidTy, args),
@@ -1207,8 +1268,8 @@ static void emitCPPObjectAtomicSetterCall(CodeGenFunction &CGF,
// Third argument is the helper function.
args.add(RValue::get(AtomicHelperFn), CGF.getContext().VoidPtrTy);
- llvm::Constant *fn =
- CGF.CGM.getObjCRuntime().GetCppAtomicObjectSetFunction();
+ llvm::FunctionCallee fn =
+ CGF.CGM.getObjCRuntime().GetCppAtomicObjectSetFunction();
CGCallee callee = CGCallee::forDirect(fn);
CGF.EmitCall(
CGF.getTypes().arrangeBuiltinFunctionCall(CGF.getContext().VoidTy, args),
@@ -1302,14 +1363,13 @@ CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
case PropertyImplStrategy::GetSetProperty:
case PropertyImplStrategy::SetPropertyAndExpressionGet: {
- llvm::Constant *setOptimizedPropertyFn = nullptr;
- llvm::Constant *setPropertyFn = nullptr;
+ llvm::FunctionCallee setOptimizedPropertyFn = nullptr;
+ llvm::FunctionCallee setPropertyFn = nullptr;
if (UseOptimizedSetter(CGM)) {
// 10.8 and iOS 6.0 code and GC is off
setOptimizedPropertyFn =
- CGM.getObjCRuntime()
- .GetOptimizedPropertySetFunction(strategy.isAtomic(),
- strategy.isCopy());
+ CGM.getObjCRuntime().GetOptimizedPropertySetFunction(
+ strategy.isAtomic(), strategy.isCopy());
if (!setOptimizedPropertyFn) {
CGM.ErrorUnsupported(propImpl, "Obj-C optimized setter - NYI");
return;
@@ -1560,8 +1620,8 @@ QualType CodeGenFunction::TypeOfSelfObject() {
}
void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
- llvm::Constant *EnumerationMutationFnPtr =
- CGM.getObjCRuntime().EnumerationMutationFunction();
+ llvm::FunctionCallee EnumerationMutationFnPtr =
+ CGM.getObjCRuntime().EnumerationMutationFunction();
if (!EnumerationMutationFnPtr) {
CGM.ErrorUnsupported(&S, "Obj-C fast enumeration for this runtime");
return;
@@ -1669,8 +1729,8 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
// Save the initial mutations value. This is the value at an
// address that was written into the state object by
// countByEnumeratingWithState:objects:count:.
- Address StateMutationsPtrPtr = Builder.CreateStructGEP(
- StatePtr, 2, 2 * getPointerSize(), "mutationsptr.ptr");
+ Address StateMutationsPtrPtr =
+ Builder.CreateStructGEP(StatePtr, 2, "mutationsptr.ptr");
llvm::Value *StateMutationsPtr
= Builder.CreateLoad(StateMutationsPtrPtr, "mutationsptr");
@@ -1751,8 +1811,8 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
// Fetch the buffer out of the enumeration state.
// TODO: this pointer should actually be invariant between
// refreshes, which would help us do certain loop optimizations.
- Address StateItemsPtr = Builder.CreateStructGEP(
- StatePtr, 1, getPointerSize(), "stateitems.ptr");
+ Address StateItemsPtr =
+ Builder.CreateStructGEP(StatePtr, 1, "stateitems.ptr");
llvm::Value *EnumStateItems =
Builder.CreateLoad(StateItemsPtr, "stateitems");
@@ -1891,7 +1951,7 @@ llvm::Value *CodeGenFunction::EmitObjCExtendObjectLifetime(QualType type,
/// Given a number of pointers, inform the optimizer that they're
/// being intrinsically used up until this point in the program.
void CodeGenFunction::EmitARCIntrinsicUse(ArrayRef<llvm::Value*> values) {
- llvm::Constant *&fn = CGM.getObjCEntrypoints().clang_arc_use;
+ llvm::Function *&fn = CGM.getObjCEntrypoints().clang_arc_use;
if (!fn)
fn = CGM.getIntrinsic(llvm::Intrinsic::objc_clang_arc_use);
@@ -1900,8 +1960,7 @@ void CodeGenFunction::EmitARCIntrinsicUse(ArrayRef<llvm::Value*> values) {
EmitNounwindRuntimeCall(fn, values);
}
-static void setARCRuntimeFunctionLinkage(CodeGenModule &CGM,
- llvm::Constant *RTF) {
+static void setARCRuntimeFunctionLinkage(CodeGenModule &CGM, llvm::Value *RTF) {
if (auto *F = dyn_cast<llvm::Function>(RTF)) {
// If the target runtime doesn't naturally support ARC, emit weak
// references to the runtime support library. We don't really
@@ -1913,15 +1972,18 @@ static void setARCRuntimeFunctionLinkage(CodeGenModule &CGM,
}
}
+static void setARCRuntimeFunctionLinkage(CodeGenModule &CGM,
+ llvm::FunctionCallee RTF) {
+ setARCRuntimeFunctionLinkage(CGM, RTF.getCallee());
+}
+
/// Perform an operation having the signature
/// i8* (i8*)
/// where a null input causes a no-op and returns null.
-static llvm::Value *emitARCValueOperation(CodeGenFunction &CGF,
- llvm::Value *value,
- llvm::Type *returnType,
- llvm::Constant *&fn,
- llvm::Intrinsic::ID IntID,
- bool isTailCall = false) {
+static llvm::Value *emitARCValueOperation(
+ CodeGenFunction &CGF, llvm::Value *value, llvm::Type *returnType,
+ llvm::Function *&fn, llvm::Intrinsic::ID IntID,
+ llvm::CallInst::TailCallKind tailKind = llvm::CallInst::TCK_None) {
if (isa<llvm::ConstantPointerNull>(value))
return value;
@@ -1936,8 +1998,7 @@ static llvm::Value *emitARCValueOperation(CodeGenFunction &CGF,
// Call the function.
llvm::CallInst *call = CGF.EmitNounwindRuntimeCall(fn, value);
- if (isTailCall)
- call->setTailCall();
+ call->setTailCallKind(tailKind);
// Cast the result back to the original type.
return CGF.Builder.CreateBitCast(call, origType);
@@ -1945,9 +2006,8 @@ static llvm::Value *emitARCValueOperation(CodeGenFunction &CGF,
/// Perform an operation having the following signature:
/// i8* (i8**)
-static llvm::Value *emitARCLoadOperation(CodeGenFunction &CGF,
- Address addr,
- llvm::Constant *&fn,
+static llvm::Value *emitARCLoadOperation(CodeGenFunction &CGF, Address addr,
+ llvm::Function *&fn,
llvm::Intrinsic::ID IntID) {
if (!fn) {
fn = CGF.CGM.getIntrinsic(IntID);
@@ -1970,10 +2030,9 @@ static llvm::Value *emitARCLoadOperation(CodeGenFunction &CGF,
/// Perform an operation having the following signature:
/// i8* (i8**, i8*)
-static llvm::Value *emitARCStoreOperation(CodeGenFunction &CGF,
- Address addr,
+static llvm::Value *emitARCStoreOperation(CodeGenFunction &CGF, Address addr,
llvm::Value *value,
- llvm::Constant *&fn,
+ llvm::Function *&fn,
llvm::Intrinsic::ID IntID,
bool ignored) {
assert(addr.getElementType() == value->getType());
@@ -1998,10 +2057,8 @@ static llvm::Value *emitARCStoreOperation(CodeGenFunction &CGF,
/// Perform an operation having the following signature:
/// void (i8**, i8**)
-static void emitARCCopyOperation(CodeGenFunction &CGF,
- Address dst,
- Address src,
- llvm::Constant *&fn,
+static void emitARCCopyOperation(CodeGenFunction &CGF, Address dst, Address src,
+ llvm::Function *&fn,
llvm::Intrinsic::ID IntID) {
assert(dst.getType() == src.getType());
@@ -2023,7 +2080,7 @@ static void emitARCCopyOperation(CodeGenFunction &CGF,
static llvm::Value *emitObjCValueOperation(CodeGenFunction &CGF,
llvm::Value *value,
llvm::Type *returnType,
- llvm::Constant *&fn,
+ llvm::FunctionCallee &fn,
StringRef fnName) {
if (isa<llvm::ConstantPointerNull>(value))
return value;
@@ -2034,7 +2091,7 @@ static llvm::Value *emitObjCValueOperation(CodeGenFunction &CGF,
fn = CGF.CGM.CreateRuntimeFunction(fnType, fnName);
// We have Native ARC, so set nonlazybind attribute for performance
- if (llvm::Function *f = dyn_cast<llvm::Function>(fn))
+ if (llvm::Function *f = dyn_cast<llvm::Function>(fn.getCallee()))
if (fnName == "objc_retain")
f->addFnAttr(llvm::Attribute::NonLazyBind);
}
@@ -2044,10 +2101,10 @@ static llvm::Value *emitObjCValueOperation(CodeGenFunction &CGF,
value = CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy);
// Call the function.
- llvm::CallInst *call = CGF.EmitNounwindRuntimeCall(fn, value);
+ llvm::CallBase *Inst = CGF.EmitCallOrInvoke(fn, value);
// Cast the result back to the original type.
- return CGF.Builder.CreateBitCast(call, origType);
+ return CGF.Builder.CreateBitCast(Inst, origType);
}
/// Produce the code to do a retain. Based on the type, calls one of:
@@ -2122,14 +2179,10 @@ static void emitAutoreleasedReturnValueMarker(CodeGenFunction &CGF) {
// with this marker yet, so leave a breadcrumb for the ARC
// optimizer to pick up.
} else {
- llvm::NamedMDNode *metadata =
- CGF.CGM.getModule().getOrInsertNamedMetadata(
- "clang.arc.retainAutoreleasedReturnValueMarker");
- assert(metadata->getNumOperands() <= 1);
- if (metadata->getNumOperands() == 0) {
- auto &ctx = CGF.getLLVMContext();
- metadata->addOperand(llvm::MDNode::get(ctx,
- llvm::MDString::get(ctx, assembly)));
+ const char *markerKey = "clang.arc.retainAutoreleasedReturnValueMarker";
+ if (!CGF.CGM.getModule().getModuleFlag(markerKey)) {
+ auto *str = llvm::MDString::get(CGF.getLLVMContext(), assembly);
+ CGF.CGM.getModule().addModuleFlag(llvm::Module::Error, markerKey, str);
}
}
}
@@ -2147,9 +2200,15 @@ static void emitAutoreleasedReturnValueMarker(CodeGenFunction &CGF) {
llvm::Value *
CodeGenFunction::EmitARCRetainAutoreleasedReturnValue(llvm::Value *value) {
emitAutoreleasedReturnValueMarker(*this);
- return emitARCValueOperation(*this, value, nullptr,
- CGM.getObjCEntrypoints().objc_retainAutoreleasedReturnValue,
- llvm::Intrinsic::objc_retainAutoreleasedReturnValue);
+ llvm::CallInst::TailCallKind tailKind =
+ CGM.getTargetCodeGenInfo()
+ .shouldSuppressTailCallsOfRetainAutoreleasedReturnValue()
+ ? llvm::CallInst::TCK_NoTail
+ : llvm::CallInst::TCK_None;
+ return emitARCValueOperation(
+ *this, value, nullptr,
+ CGM.getObjCEntrypoints().objc_retainAutoreleasedReturnValue,
+ llvm::Intrinsic::objc_retainAutoreleasedReturnValue, tailKind);
}
/// Claim a possibly-autoreleased return value at +0. This is only
@@ -2173,7 +2232,7 @@ void CodeGenFunction::EmitARCRelease(llvm::Value *value,
ARCPreciseLifetime_t precise) {
if (isa<llvm::ConstantPointerNull>(value)) return;
- llvm::Constant *&fn = CGM.getObjCEntrypoints().objc_release;
+ llvm::Function *&fn = CGM.getObjCEntrypoints().objc_release;
if (!fn) {
fn = CGM.getIntrinsic(llvm::Intrinsic::objc_release);
setARCRuntimeFunctionLinkage(CGM, fn);
@@ -2219,7 +2278,7 @@ llvm::Value *CodeGenFunction::EmitARCStoreStrongCall(Address addr,
bool ignored) {
assert(addr.getElementType() == value->getType());
- llvm::Constant *&fn = CGM.getObjCEntrypoints().objc_storeStrong;
+ llvm::Function *&fn = CGM.getObjCEntrypoints().objc_storeStrong;
if (!fn) {
fn = CGM.getIntrinsic(llvm::Intrinsic::objc_storeStrong);
setARCRuntimeFunctionLinkage(CGM, fn);
@@ -2286,7 +2345,7 @@ CodeGenFunction::EmitARCAutoreleaseReturnValue(llvm::Value *value) {
return emitARCValueOperation(*this, value, nullptr,
CGM.getObjCEntrypoints().objc_autoreleaseReturnValue,
llvm::Intrinsic::objc_autoreleaseReturnValue,
- /*isTailCall*/ true);
+ llvm::CallInst::TCK_Tail);
}
/// Do a fused retain/autorelease of the given object.
@@ -2296,7 +2355,7 @@ CodeGenFunction::EmitARCRetainAutoreleaseReturnValue(llvm::Value *value) {
return emitARCValueOperation(*this, value, nullptr,
CGM.getObjCEntrypoints().objc_retainAutoreleaseReturnValue,
llvm::Intrinsic::objc_retainAutoreleaseReturnValue,
- /*isTailCall*/ true);
+ llvm::CallInst::TCK_Tail);
}
/// Do a fused retain/autorelease of the given object.
@@ -2375,7 +2434,7 @@ void CodeGenFunction::EmitARCInitWeak(Address addr, llvm::Value *value) {
/// void \@objc_destroyWeak(i8** %addr)
/// Essentially objc_storeWeak(addr, nil).
void CodeGenFunction::EmitARCDestroyWeak(Address addr) {
- llvm::Constant *&fn = CGM.getObjCEntrypoints().objc_destroyWeak;
+ llvm::Function *&fn = CGM.getObjCEntrypoints().objc_destroyWeak;
if (!fn) {
fn = CGM.getIntrinsic(llvm::Intrinsic::objc_destroyWeak);
setARCRuntimeFunctionLinkage(CGM, fn);
@@ -2423,7 +2482,7 @@ void CodeGenFunction::emitARCMoveAssignWeak(QualType Ty, Address DstAddr,
/// Produce the code to do a objc_autoreleasepool_push.
/// call i8* \@objc_autoreleasePoolPush(void)
llvm::Value *CodeGenFunction::EmitObjCAutoreleasePoolPush() {
- llvm::Constant *&fn = CGM.getObjCEntrypoints().objc_autoreleasePoolPush;
+ llvm::Function *&fn = CGM.getObjCEntrypoints().objc_autoreleasePoolPush;
if (!fn) {
fn = CGM.getIntrinsic(llvm::Intrinsic::objc_autoreleasePoolPush);
setARCRuntimeFunctionLinkage(CGM, fn);
@@ -2439,8 +2498,8 @@ void CodeGenFunction::EmitObjCAutoreleasePoolPop(llvm::Value *value) {
if (getInvokeDest()) {
// Call the runtime method not the intrinsic if we are handling exceptions
- llvm::Constant *&fn =
- CGM.getObjCEntrypoints().objc_autoreleasePoolPopInvoke;
+ llvm::FunctionCallee &fn =
+ CGM.getObjCEntrypoints().objc_autoreleasePoolPopInvoke;
if (!fn) {
llvm::FunctionType *fnType =
llvm::FunctionType::get(Builder.getVoidTy(), Int8PtrTy, false);
@@ -2451,7 +2510,7 @@ void CodeGenFunction::EmitObjCAutoreleasePoolPop(llvm::Value *value) {
// objc_autoreleasePoolPop can throw.
EmitRuntimeCallOrInvoke(fn, value);
} else {
- llvm::Constant *&fn = CGM.getObjCEntrypoints().objc_autoreleasePoolPop;
+ llvm::FunctionCallee &fn = CGM.getObjCEntrypoints().objc_autoreleasePoolPop;
if (!fn) {
fn = CGM.getIntrinsic(llvm::Intrinsic::objc_autoreleasePoolPop);
setARCRuntimeFunctionLinkage(CGM, fn);
@@ -2507,6 +2566,13 @@ llvm::Value *CodeGenFunction::EmitObjCAllocWithZone(llvm::Value *value,
"objc_allocWithZone");
}
+llvm::Value *CodeGenFunction::EmitObjCAllocInit(llvm::Value *value,
+ llvm::Type *resultType) {
+ return emitObjCValueOperation(*this, value, resultType,
+ CGM.getObjCEntrypoints().objc_alloc_init,
+ "objc_alloc_init");
+}
+
/// Produce the code to do a primitive release.
/// [tmp drain];
void CodeGenFunction::EmitObjCMRRAutoreleasePoolPop(llvm::Value *Arg) {
@@ -2545,18 +2611,19 @@ void CodeGenFunction::emitARCIntrinsicUse(CodeGenFunction &CGF, Address addr,
/// call i8* \@objc_autorelease(i8* %value)
llvm::Value *CodeGenFunction::EmitObjCAutorelease(llvm::Value *value,
llvm::Type *returnType) {
- return emitObjCValueOperation(*this, value, returnType,
- CGM.getObjCEntrypoints().objc_autoreleaseRuntimeFunction,
- "objc_autorelease");
+ return emitObjCValueOperation(
+ *this, value, returnType,
+ CGM.getObjCEntrypoints().objc_autoreleaseRuntimeFunction,
+ "objc_autorelease");
}
/// Retain the given object, with normal retain semantics.
/// call i8* \@objc_retain(i8* %value)
llvm::Value *CodeGenFunction::EmitObjCRetainNonBlock(llvm::Value *value,
llvm::Type *returnType) {
- return emitObjCValueOperation(*this, value, returnType,
- CGM.getObjCEntrypoints().objc_retainRuntimeFunction,
- "objc_retain");
+ return emitObjCValueOperation(
+ *this, value, returnType,
+ CGM.getObjCEntrypoints().objc_retainRuntimeFunction, "objc_retain");
}
/// Release the given object.
@@ -2565,24 +2632,23 @@ void CodeGenFunction::EmitObjCRelease(llvm::Value *value,
ARCPreciseLifetime_t precise) {
if (isa<llvm::ConstantPointerNull>(value)) return;
- llvm::Constant *&fn = CGM.getObjCEntrypoints().objc_release;
+ llvm::FunctionCallee &fn =
+ CGM.getObjCEntrypoints().objc_releaseRuntimeFunction;
if (!fn) {
- if (!fn) {
- llvm::FunctionType *fnType =
+ llvm::FunctionType *fnType =
llvm::FunctionType::get(Builder.getVoidTy(), Int8PtrTy, false);
- fn = CGM.CreateRuntimeFunction(fnType, "objc_release");
- setARCRuntimeFunctionLinkage(CGM, fn);
- // We have Native ARC, so set nonlazybind attribute for performance
- if (llvm::Function *f = dyn_cast<llvm::Function>(fn))
- f->addFnAttr(llvm::Attribute::NonLazyBind);
- }
+ fn = CGM.CreateRuntimeFunction(fnType, "objc_release");
+ setARCRuntimeFunctionLinkage(CGM, fn);
+ // We have Native ARC, so set nonlazybind attribute for performance
+ if (llvm::Function *f = dyn_cast<llvm::Function>(fn.getCallee()))
+ f->addFnAttr(llvm::Attribute::NonLazyBind);
}
// Cast the argument to 'id'.
value = Builder.CreateBitCast(value, Int8PtrTy);
// Call objc_release.
- llvm::CallInst *call = EmitNounwindRuntimeCall(fn, value);
+ llvm::CallBase *call = EmitCallOrInvoke(fn, value);
if (precise == ARCImpreciseLifetime) {
call->setMetadata("clang.imprecise_release",
@@ -2829,6 +2895,7 @@ public:
Result visit(const Expr *e);
Result visitCastExpr(const CastExpr *e);
Result visitPseudoObjectExpr(const PseudoObjectExpr *e);
+ Result visitBlockExpr(const BlockExpr *e);
Result visitBinaryOperator(const BinaryOperator *e);
Result visitBinAssign(const BinaryOperator *e);
Result visitBinAssignUnsafeUnretained(const BinaryOperator *e);
@@ -2905,6 +2972,12 @@ ARCExprEmitter<Impl,Result>::visitPseudoObjectExpr(const PseudoObjectExpr *E) {
}
template <typename Impl, typename Result>
+Result ARCExprEmitter<Impl, Result>::visitBlockExpr(const BlockExpr *e) {
+ // The default implementation just forwards the expression to visitExpr.
+ return asImpl().visitExpr(e);
+}
+
+template <typename Impl, typename Result>
Result ARCExprEmitter<Impl,Result>::visitCastExpr(const CastExpr *e) {
switch (e->getCastKind()) {
@@ -3047,7 +3120,8 @@ Result ARCExprEmitter<Impl,Result>::visit(const Expr *e) {
// Look through pseudo-object expressions.
} else if (const PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(e)) {
return asImpl().visitPseudoObjectExpr(pseudo);
- }
+ } else if (auto *be = dyn_cast<BlockExpr>(e))
+ return asImpl().visitBlockExpr(be);
return asImpl().visitExpr(e);
}
@@ -3082,6 +3156,15 @@ struct ARCRetainExprEmitter :
return TryEmitResult(result, true);
}
+ TryEmitResult visitBlockExpr(const BlockExpr *e) {
+ TryEmitResult result = visitExpr(e);
+ // Avoid the block-retain if this is a block literal that doesn't need to be
+ // copied to the heap.
+ if (e->getBlockDecl()->canAvoidCopyToHeap())
+ result.setInt(true);
+ return result;
+ }
+
/// Block extends are net +0. Naively, we could just recurse on
/// the subexpression, but actually we need to ensure that the
/// value is copied as a block, so there's a little filter here.
@@ -3384,11 +3467,10 @@ void CodeGenFunction::EmitExtendGCLifetime(llvm::Value *object) {
// We just use an inline assembly.
llvm::FunctionType *extenderType
= llvm::FunctionType::get(VoidTy, VoidPtrTy, RequiredArgs::All);
- llvm::Value *extender
- = llvm::InlineAsm::get(extenderType,
- /* assembly */ "",
- /* constraints */ "r",
- /* side effects */ true);
+ llvm::InlineAsm *extender = llvm::InlineAsm::get(extenderType,
+ /* assembly */ "",
+ /* constraints */ "r",
+ /* side effects */ true);
object = Builder.CreateBitCast(object, VoidPtrTy);
EmitNounwindRuntimeCall(extender, object);
@@ -3647,19 +3729,25 @@ void CodeGenModule::emitAtAvailableLinkGuard() {
// CoreFoundation is linked into the final binary.
llvm::FunctionType *FTy =
llvm::FunctionType::get(Int32Ty, {VoidPtrTy}, false);
- llvm::Constant *CFFunc =
+ llvm::FunctionCallee CFFunc =
CreateRuntimeFunction(FTy, "CFBundleGetVersionNumber");
llvm::FunctionType *CheckFTy = llvm::FunctionType::get(VoidTy, {}, false);
- llvm::Function *CFLinkCheckFunc = cast<llvm::Function>(CreateBuiltinFunction(
- CheckFTy, "__clang_at_available_requires_core_foundation_framework"));
- CFLinkCheckFunc->setLinkage(llvm::GlobalValue::LinkOnceAnyLinkage);
- CFLinkCheckFunc->setVisibility(llvm::GlobalValue::HiddenVisibility);
- CodeGenFunction CGF(*this);
- CGF.Builder.SetInsertPoint(CGF.createBasicBlock("", CFLinkCheckFunc));
- CGF.EmitNounwindRuntimeCall(CFFunc, llvm::Constant::getNullValue(VoidPtrTy));
- CGF.Builder.CreateUnreachable();
- addCompilerUsedGlobal(CFLinkCheckFunc);
+ llvm::FunctionCallee CFLinkCheckFuncRef = CreateRuntimeFunction(
+ CheckFTy, "__clang_at_available_requires_core_foundation_framework",
+ llvm::AttributeList(), /*Local=*/true);
+ llvm::Function *CFLinkCheckFunc =
+ cast<llvm::Function>(CFLinkCheckFuncRef.getCallee()->stripPointerCasts());
+ if (CFLinkCheckFunc->empty()) {
+ CFLinkCheckFunc->setLinkage(llvm::GlobalValue::LinkOnceAnyLinkage);
+ CFLinkCheckFunc->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ CodeGenFunction CGF(*this);
+ CGF.Builder.SetInsertPoint(CGF.createBasicBlock("", CFLinkCheckFunc));
+ CGF.EmitNounwindRuntimeCall(CFFunc,
+ llvm::Constant::getNullValue(VoidPtrTy));
+ CGF.Builder.CreateUnreachable();
+ addCompilerUsedGlobal(CFLinkCheckFunc);
+ }
}
CGObjCRuntime::~CGObjCRuntime() {}
diff --git a/lib/CodeGen/CGObjCGNU.cpp b/lib/CodeGen/CGObjCGNU.cpp
index 548bd6b3fd72..ee5c12aa35bd 100644
--- a/lib/CodeGen/CGObjCGNU.cpp
+++ b/lib/CodeGen/CGObjCGNU.cpp
@@ -1,9 +1,8 @@
//===------- CGObjCGNU.cpp - Emit LLVM Code from ASTs for a Module --------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -29,7 +28,6 @@
#include "clang/Basic/SourceManager.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
-#include "llvm/IR/CallSite.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/LLVMContext.h"
@@ -60,7 +58,7 @@ class LazyRuntimeFunction {
CodeGenModule *CGM;
llvm::FunctionType *FTy;
const char *FunctionName;
- llvm::Constant *Function;
+ llvm::FunctionCallee Function;
public:
/// Constructor leaves this class uninitialized, because it is intended to
@@ -90,7 +88,7 @@ public:
/// Overloaded cast operator, allows the class to be implicitly cast to an
/// LLVM constant.
- operator llvm::Constant *() {
+ operator llvm::FunctionCallee() {
if (!Function) {
if (!FunctionName)
return nullptr;
@@ -98,9 +96,6 @@ public:
}
return Function;
}
- operator llvm::Function *() {
- return cast<llvm::Function>((llvm::Constant *)*this);
- }
};
@@ -190,12 +185,16 @@ protected:
(R.getVersion() >= VersionTuple(major, minor));
}
- std::string SymbolForProtocol(StringRef Name) {
- return (StringRef("._OBJC_PROTOCOL_") + Name).str();
+ std::string ManglePublicSymbol(StringRef Name) {
+ return (StringRef(CGM.getTriple().isOSBinFormatCOFF() ? "$_" : "._") + Name).str();
+ }
+
+ std::string SymbolForProtocol(Twine Name) {
+ return (ManglePublicSymbol("OBJC_PROTOCOL_") + Name).str();
}
std::string SymbolForProtocolRef(StringRef Name) {
- return (StringRef("._OBJC_REF_PROTOCOL_") + Name).str();
+ return (ManglePublicSymbol("OBJC_REF_PROTOCOL_") + Name).str();
}
@@ -614,15 +613,15 @@ public:
const ObjCProtocolDecl *PD) override;
void GenerateProtocol(const ObjCProtocolDecl *PD) override;
llvm::Function *ModuleInitFunction() override;
- llvm::Constant *GetPropertyGetFunction() override;
- llvm::Constant *GetPropertySetFunction() override;
- llvm::Constant *GetOptimizedPropertySetFunction(bool atomic,
- bool copy) override;
- llvm::Constant *GetSetStructFunction() override;
- llvm::Constant *GetGetStructFunction() override;
- llvm::Constant *GetCppAtomicObjectGetFunction() override;
- llvm::Constant *GetCppAtomicObjectSetFunction() override;
- llvm::Constant *EnumerationMutationFunction() override;
+ llvm::FunctionCallee GetPropertyGetFunction() override;
+ llvm::FunctionCallee GetPropertySetFunction() override;
+ llvm::FunctionCallee GetOptimizedPropertySetFunction(bool atomic,
+ bool copy) override;
+ llvm::FunctionCallee GetSetStructFunction() override;
+ llvm::FunctionCallee GetGetStructFunction() override;
+ llvm::FunctionCallee GetCppAtomicObjectGetFunction() override;
+ llvm::FunctionCallee GetCppAtomicObjectSetFunction() override;
+ llvm::FunctionCallee EnumerationMutationFunction() override;
void EmitTryStmt(CodeGenFunction &CGF,
const ObjCAtTryStmt &S) override;
@@ -691,9 +690,9 @@ protected:
llvm::Value *args[] = {
EnforceType(Builder, Receiver, IdTy),
EnforceType(Builder, cmd, SelectorTy) };
- llvm::CallSite imp = CGF.EmitRuntimeCallOrInvoke(MsgLookupFn, args);
+ llvm::CallBase *imp = CGF.EmitRuntimeCallOrInvoke(MsgLookupFn, args);
imp->setMetadata(msgSendMDKind, node);
- return imp.getInstruction();
+ return imp;
}
llvm::Value *LookupIMPSuper(CodeGenFunction &CGF, Address ObjCSuper,
@@ -750,7 +749,7 @@ class CGObjCGNUstep : public CGObjCGNU {
llvm::Value *cmd, llvm::MDNode *node,
MessageSendInfo &MSI) override {
CGBuilderTy &Builder = CGF.Builder;
- llvm::Function *LookupFn = SlotLookupFn;
+ llvm::FunctionCallee LookupFn = SlotLookupFn;
// Store the receiver on the stack so that we can reload it later
Address ReceiverPtr =
@@ -766,20 +765,20 @@ class CGObjCGNUstep : public CGObjCGNU {
}
// The lookup function is guaranteed not to capture the receiver pointer.
- LookupFn->addParamAttr(0, llvm::Attribute::NoCapture);
+ if (auto *LookupFn2 = dyn_cast<llvm::Function>(LookupFn.getCallee()))
+ LookupFn2->addParamAttr(0, llvm::Attribute::NoCapture);
llvm::Value *args[] = {
EnforceType(Builder, ReceiverPtr.getPointer(), PtrToIdTy),
EnforceType(Builder, cmd, SelectorTy),
EnforceType(Builder, self, IdTy) };
- llvm::CallSite slot = CGF.EmitRuntimeCallOrInvoke(LookupFn, args);
- slot.setOnlyReadsMemory();
+ llvm::CallBase *slot = CGF.EmitRuntimeCallOrInvoke(LookupFn, args);
+ slot->setOnlyReadsMemory();
slot->setMetadata(msgSendMDKind, node);
// Load the imp from the slot
llvm::Value *imp = Builder.CreateAlignedLoad(
- Builder.CreateStructGEP(nullptr, slot.getInstruction(), 4),
- CGF.getPointerAlign());
+ Builder.CreateStructGEP(nullptr, slot, 4), CGF.getPointerAlign());
// The lookup function may have changed the receiver, so make sure we use
// the new one.
@@ -859,7 +858,7 @@ class CGObjCGNUstep : public CGObjCGNU {
PtrTy, PtrTy);
}
- llvm::Constant *GetCppAtomicObjectGetFunction() override {
+ llvm::FunctionCallee GetCppAtomicObjectGetFunction() override {
// The optimised functions were added in version 1.7 of the GNUstep
// runtime.
assert (CGM.getLangOpts().ObjCRuntime.getVersion() >=
@@ -867,7 +866,7 @@ class CGObjCGNUstep : public CGObjCGNU {
return CxxAtomicObjectGetFn;
}
- llvm::Constant *GetCppAtomicObjectSetFunction() override {
+ llvm::FunctionCallee GetCppAtomicObjectSetFunction() override {
// The optimised functions were added in version 1.7 of the GNUstep
// runtime.
assert (CGM.getLangOpts().ObjCRuntime.getVersion() >=
@@ -875,8 +874,8 @@ class CGObjCGNUstep : public CGObjCGNU {
return CxxAtomicObjectSetFn;
}
- llvm::Constant *GetOptimizedPropertySetFunction(bool atomic,
- bool copy) override {
+ llvm::FunctionCallee GetOptimizedPropertySetFunction(bool atomic,
+ bool copy) override {
// The optimised property functions omit the GC check, and so are not
// safe to use in GC mode. The standard functions are fast in GC mode,
// so there is less advantage in using them.
@@ -911,12 +910,15 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
ConstantStringSection
};
static const char *const SectionsBaseNames[8];
+ static const char *const PECOFFSectionsBaseNames[8];
template<SectionKind K>
std::string sectionName() {
- std::string name(SectionsBaseNames[K]);
- if (CGM.getTriple().isOSBinFormatCOFF())
+ if (CGM.getTriple().isOSBinFormatCOFF()) {
+ std::string name(PECOFFSectionsBaseNames[K]);
name += "$m";
- return name;
+ return name;
+ }
+ return SectionsBaseNames[K];
}
/// The GCC ABI superclass message lookup function. Takes a pointer to a
/// structure describing the receiver and the class, and a selector as
@@ -937,15 +939,19 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
bool EmittedClass = false;
/// Generate the name of a symbol for a reference to a class. Accesses to
/// classes should be indirected via this.
+
+ typedef std::pair<std::string, std::pair<llvm::Constant*, int>> EarlyInitPair;
+ std::vector<EarlyInitPair> EarlyInitList;
+
std::string SymbolForClassRef(StringRef Name, bool isWeak) {
if (isWeak)
- return (StringRef("._OBJC_WEAK_REF_CLASS_") + Name).str();
+ return (ManglePublicSymbol("OBJC_WEAK_REF_CLASS_") + Name).str();
else
- return (StringRef("._OBJC_REF_CLASS_") + Name).str();
+ return (ManglePublicSymbol("OBJC_REF_CLASS_") + Name).str();
}
/// Generate the name of a class symbol.
std::string SymbolForClass(StringRef Name) {
- return (StringRef("._OBJC_CLASS_") + Name).str();
+ return (ManglePublicSymbol("OBJC_CLASS_") + Name).str();
}
void CallRuntimeFunction(CGBuilderTy &B, StringRef FunctionName,
ArrayRef<llvm::Value*> Args) {
@@ -954,7 +960,7 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
Types.push_back(Arg->getType());
llvm::FunctionType *FT = llvm::FunctionType::get(B.getVoidTy(), Types,
false);
- llvm::Value *Fn = CGM.CreateRuntimeFunction(FT, FunctionName);
+ llvm::FunctionCallee Fn = CGM.CreateRuntimeFunction(FT, FunctionName);
B.CreateCall(Fn, Args);
}
@@ -999,10 +1005,13 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
llvm::Constant *isa = TheModule.getNamedGlobal(Sym);
- if (!isa)
+ if (!isa) {
isa = new llvm::GlobalVariable(TheModule, IdTy, /* isConstant */false,
llvm::GlobalValue::ExternalLinkage, nullptr, Sym);
- else if (isa->getType() != PtrToIdTy)
+ if (CGM.getTriple().isOSBinFormatCOFF()) {
+ cast<llvm::GlobalValue>(isa)->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
+ }
+ } else if (isa->getType() != PtrToIdTy)
isa = llvm::ConstantExpr::getBitCast(isa, PtrToIdTy);
// struct
@@ -1017,7 +1026,11 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
ConstantInitBuilder Builder(CGM);
auto Fields = Builder.beginStruct();
- Fields.add(isa);
+ if (!CGM.getTriple().isOSBinFormatCOFF()) {
+ Fields.add(isa);
+ } else {
+ Fields.addNullPointer(PtrTy);
+ }
// For now, all non-ASCII strings are represented as UTF-16. As such, the
// number of bytes is simply double the number of UTF-16 codepoints. In
// ASCII strings, the number of bytes is equal to the number of non-ASCII
@@ -1088,6 +1101,10 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
ObjCStrGV->setComdat(TheModule.getOrInsertComdat(StringName));
ObjCStrGV->setVisibility(llvm::GlobalValue::HiddenVisibility);
}
+ if (CGM.getTriple().isOSBinFormatCOFF()) {
+ std::pair<llvm::Constant*, int> v{ObjCStrGV, 0};
+ EarlyInitList.emplace_back(Sym, v);
+ }
llvm::Constant *ObjCStr = llvm::ConstantExpr::getBitCast(ObjCStrGV, IdTy);
ObjCStrings[Str] = ObjCStr;
ConstantStrings.push_back(ObjCStr);
@@ -1201,6 +1218,33 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
ClassSymbol->setInitializer(new llvm::GlobalVariable(TheModule,
Int8Ty, false, llvm::GlobalValue::ExternalWeakLinkage,
nullptr, SymbolForClass(Name)));
+ else {
+ if (CGM.getTriple().isOSBinFormatCOFF()) {
+ IdentifierInfo &II = CGM.getContext().Idents.get(Name);
+ TranslationUnitDecl *TUDecl = CGM.getContext().getTranslationUnitDecl();
+ DeclContext *DC = TranslationUnitDecl::castToDeclContext(TUDecl);
+
+ const ObjCInterfaceDecl *OID = nullptr;
+ for (const auto &Result : DC->lookup(&II))
+ if ((OID = dyn_cast<ObjCInterfaceDecl>(Result)))
+ break;
+
+ // The first Interface we find may be a @class,
+ // which should only be treated as the source of
+ // truth in the absence of a true declaration.
+ const ObjCInterfaceDecl *OIDDef = OID->getDefinition();
+ if (OIDDef != nullptr)
+ OID = OIDDef;
+
+ auto Storage = llvm::GlobalValue::DefaultStorageClass;
+ if (OID->hasAttr<DLLImportAttr>())
+ Storage = llvm::GlobalValue::DLLImportStorageClass;
+ else if (OID->hasAttr<DLLExportAttr>())
+ Storage = llvm::GlobalValue::DLLExportStorageClass;
+
+ cast<llvm::GlobalValue>(ClassSymbol)->setDLLStorageClass(Storage);
+ }
+ }
assert(ClassSymbol->getName() == SymbolName);
return ClassSymbol;
}
@@ -1453,7 +1497,7 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
Sym->setSection((Section + SecSuffix).str());
Sym->setComdat(TheModule.getOrInsertComdat((Prefix +
Section).str()));
- Sym->setAlignment(1);
+ Sym->setAlignment(CGM.getPointerAlign().getQuantity());
return Sym;
};
return { Sym("__start_", "$a"), Sym("__stop", "$z") };
@@ -1488,11 +1532,12 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
ConstantInitBuilder builder(CGM);
auto InitStructBuilder = builder.beginStruct();
InitStructBuilder.addInt(Int64Ty, 0);
- for (auto *s : SectionsBaseNames) {
+ auto &sectionVec = CGM.getTriple().isOSBinFormatCOFF() ? PECOFFSectionsBaseNames : SectionsBaseNames;
+ for (auto *s : sectionVec) {
auto bounds = GetSectionBounds(s);
InitStructBuilder.add(bounds.first);
InitStructBuilder.add(bounds.second);
- };
+ }
auto *InitStruct = InitStructBuilder.finishAndCreateGlobal(".objc_init",
CGM.getPointerAlign(), false, llvm::GlobalValue::LinkOnceODRLinkage);
InitStruct->setVisibility(llvm::GlobalValue::HiddenVisibility);
@@ -1519,7 +1564,12 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
if (CGM.getTriple().isOSBinFormatCOFF())
InitVar->setSection(".CRT$XCLz");
else
- InitVar->setSection(".ctors");
+ {
+ if (CGM.getCodeGenOpts().UseInitArray)
+ InitVar->setSection(".init_array");
+ else
+ InitVar->setSection(".ctors");
+ }
InitVar->setVisibility(llvm::GlobalValue::HiddenVisibility);
InitVar->setComdat(TheModule.getOrInsertComdat(".objc_ctor"));
CGM.addUsedGlobal(InitVar);
@@ -1582,6 +1632,29 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
ConstantStrings.clear();
Categories.clear();
Classes.clear();
+
+ if (EarlyInitList.size() > 0) {
+ auto *Init = llvm::Function::Create(llvm::FunctionType::get(CGM.VoidTy,
+ {}), llvm::GlobalValue::InternalLinkage, ".objc_early_init",
+ &CGM.getModule());
+ llvm::IRBuilder<> b(llvm::BasicBlock::Create(CGM.getLLVMContext(), "entry",
+ Init));
+ for (const auto &lateInit : EarlyInitList) {
+ auto *global = TheModule.getGlobalVariable(lateInit.first);
+ if (global) {
+ b.CreateAlignedStore(global,
+ b.CreateStructGEP(lateInit.second.first, lateInit.second.second), CGM.getPointerAlign().getQuantity());
+ }
+ }
+ b.CreateRetVoid();
+ // We can't use the normal LLVM global initialisation array, because we
+ // need to specify that this runs early in library initialisation.
+ auto *InitVar = new llvm::GlobalVariable(CGM.getModule(), Init->getType(),
+ /*isConstant*/true, llvm::GlobalValue::InternalLinkage,
+ Init, ".objc_early_init_ptr");
+ InitVar->setSection(".CRT$XCLb");
+ CGM.addUsedGlobal(InitVar);
+ }
return nullptr;
}
/// In the v2 ABI, ivar offset variables use the type encoding in their name
@@ -1613,6 +1686,7 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
}
void GenerateClass(const ObjCImplementationDecl *OID) override {
ASTContext &Context = CGM.getContext();
+ bool IsCOFF = CGM.getTriple().isOSBinFormatCOFF();
// Get the class name
ObjCInterfaceDecl *classDecl =
@@ -1671,8 +1745,9 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
// struct objc_property_list *properties
metaclassFields.add(GeneratePropertyList(OID, classDecl, /*isClassProperty*/true));
- auto *metaclass = metaclassFields.finishAndCreateGlobal("._OBJC_METACLASS_"
- + className, CGM.getPointerAlign());
+ auto *metaclass = metaclassFields.finishAndCreateGlobal(
+ ManglePublicSymbol("OBJC_METACLASS_") + className,
+ CGM.getPointerAlign());
auto classFields = builder.beginStruct();
// struct objc_class *isa;
@@ -1681,15 +1756,28 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
// Get the superclass name.
const ObjCInterfaceDecl * SuperClassDecl =
OID->getClassInterface()->getSuperClass();
+ llvm::Constant *SuperClass = nullptr;
if (SuperClassDecl) {
auto SuperClassName = SymbolForClass(SuperClassDecl->getNameAsString());
- llvm::Constant *SuperClass = TheModule.getNamedGlobal(SuperClassName);
+ SuperClass = TheModule.getNamedGlobal(SuperClassName);
if (!SuperClass)
{
SuperClass = new llvm::GlobalVariable(TheModule, PtrTy, false,
llvm::GlobalValue::ExternalLinkage, nullptr, SuperClassName);
+ if (IsCOFF) {
+ auto Storage = llvm::GlobalValue::DefaultStorageClass;
+ if (SuperClassDecl->hasAttr<DLLImportAttr>())
+ Storage = llvm::GlobalValue::DLLImportStorageClass;
+ else if (SuperClassDecl->hasAttr<DLLExportAttr>())
+ Storage = llvm::GlobalValue::DLLExportStorageClass;
+
+ cast<llvm::GlobalValue>(SuperClass)->setDLLStorageClass(Storage);
+ }
}
- classFields.add(llvm::ConstantExpr::getBitCast(SuperClass, PtrTy));
+ if (!IsCOFF)
+ classFields.add(llvm::ConstantExpr::getBitCast(SuperClass, PtrTy));
+ else
+ classFields.addNullPointer(PtrTy);
} else
classFields.addNullPointer(PtrTy);
// const char *name;
@@ -1731,7 +1819,6 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
CGM.getContext().getCharWidth());
// struct objc_ivar ivars[]
auto ivarArrayBuilder = ivarListBuilder.beginArray();
- CodeGenTypes &Types = CGM.getTypes();
for (const ObjCIvarDecl *IVD = classDecl->all_declared_ivar_begin(); IVD;
IVD = IVD->getNextIvar()) {
auto ivarTy = IVD->getType();
@@ -1765,8 +1852,7 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
ivarBuilder.add(OffsetVar);
// Ivar size
ivarBuilder.addInt(Int32Ty,
- td.getTypeSizeInBits(Types.ConvertType(ivarTy)) /
- CGM.getContext().getCharWidth());
+ CGM.getContext().getTypeSizeInChars(ivarTy).getQuantity());
// Alignment will be stored as a base-2 log of the alignment.
int align = llvm::Log2_32(Context.getTypeAlignInChars(ivarTy).getQuantity());
// Objects that require more than 2^64-byte alignment should be impossible!
@@ -1839,19 +1925,24 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
classFields.finishAndCreateGlobal(SymbolForClass(className),
CGM.getPointerAlign(), false, llvm::GlobalValue::ExternalLinkage);
- if (CGM.getTriple().isOSBinFormatCOFF()) {
- auto Storage = llvm::GlobalValue::DefaultStorageClass;
- if (OID->getClassInterface()->hasAttr<DLLImportAttr>())
- Storage = llvm::GlobalValue::DLLImportStorageClass;
- else if (OID->getClassInterface()->hasAttr<DLLExportAttr>())
- Storage = llvm::GlobalValue::DLLExportStorageClass;
- cast<llvm::GlobalValue>(classStruct)->setDLLStorageClass(Storage);
- }
-
auto *classRefSymbol = GetClassVar(className);
classRefSymbol->setSection(sectionName<ClassReferenceSection>());
classRefSymbol->setInitializer(llvm::ConstantExpr::getBitCast(classStruct, IdTy));
+ if (IsCOFF) {
+ // we can't import a class struct.
+ if (OID->getClassInterface()->hasAttr<DLLExportAttr>()) {
+ cast<llvm::GlobalValue>(classStruct)->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
+ cast<llvm::GlobalValue>(classRefSymbol)->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
+ }
+
+ if (SuperClass) {
+ std::pair<llvm::Constant*, int> v{classStruct, 1};
+ EarlyInitList.emplace_back(SuperClass->getName(), std::move(v));
+ }
+
+ }
+
// Resolve the class aliases, if they exist.
// FIXME: Class pointer aliases shouldn't exist!
@@ -1879,7 +1970,7 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
auto classInitRef = new llvm::GlobalVariable(TheModule,
classStruct->getType(), false, llvm::GlobalValue::ExternalLinkage,
- classStruct, "._OBJC_INIT_CLASS_" + className);
+ classStruct, ManglePublicSymbol("OBJC_INIT_CLASS_") + className);
classInitRef->setSection(sectionName<ClassSection>());
CGM.addUsedGlobal(classInitRef);
@@ -1916,6 +2007,18 @@ const char *const CGObjCGNUstep2::SectionsBaseNames[8] =
"__objc_constant_string"
};
+const char *const CGObjCGNUstep2::PECOFFSectionsBaseNames[8] =
+{
+".objcrt$SEL",
+".objcrt$CLS",
+".objcrt$CLR",
+".objcrt$CAT",
+".objcrt$PCL",
+".objcrt$PCR",
+".objcrt$CAL",
+".objcrt$STR"
+};
+
/// Support for the ObjFW runtime.
class CGObjCObjFW: public CGObjCGNU {
protected:
@@ -1938,14 +2041,14 @@ protected:
EnforceType(Builder, Receiver, IdTy),
EnforceType(Builder, cmd, SelectorTy) };
- llvm::CallSite imp;
+ llvm::CallBase *imp;
if (CGM.ReturnTypeUsesSRet(MSI.CallInfo))
imp = CGF.EmitRuntimeCallOrInvoke(MsgLookupFnSRet, args);
else
imp = CGF.EmitRuntimeCallOrInvoke(MsgLookupFn, args);
imp->setMetadata(msgSendMDKind, node);
- return imp.getInstruction();
+ return imp;
}
llvm::Value *LookupIMPSuper(CodeGenFunction &CGF, Address ObjCSuper,
@@ -2174,9 +2277,8 @@ llvm::Value *CGObjCGNU::GetClassNamed(CodeGenFunction &CGF,
if (!isWeak)
EmitClassRef(Name);
- llvm::Constant *ClassLookupFn =
- CGM.CreateRuntimeFunction(llvm::FunctionType::get(IdTy, PtrToInt8Ty, true),
- "objc_lookup_class");
+ llvm::FunctionCallee ClassLookupFn = CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(IdTy, PtrToInt8Ty, true), "objc_lookup_class");
return CGF.EmitNounwindRuntimeCall(ClassLookupFn, ClassName);
}
@@ -2432,7 +2534,7 @@ CGObjCGNU::GenerateMessageSendSuper(CodeGenFunction &CGF,
ReceiverClass = EnforceType(Builder, ReceiverClass, IdTy);
} else {
if (isCategoryImpl) {
- llvm::Constant *classLookupFunction = nullptr;
+ llvm::FunctionCallee classLookupFunction = nullptr;
if (IsClassMessage) {
classLookupFunction = CGM.CreateRuntimeFunction(llvm::FunctionType::get(
IdTy, PtrTy, true), "objc_get_meta_class");
@@ -2481,10 +2583,8 @@ CGObjCGNU::GenerateMessageSendSuper(CodeGenFunction &CGF,
Address ObjCSuper = CGF.CreateTempAlloca(ObjCSuperTy,
CGF.getPointerAlign());
- Builder.CreateStore(Receiver,
- Builder.CreateStructGEP(ObjCSuper, 0, CharUnits::Zero()));
- Builder.CreateStore(ReceiverClass,
- Builder.CreateStructGEP(ObjCSuper, 1, CGF.getPointerSize()));
+ Builder.CreateStore(Receiver, Builder.CreateStructGEP(ObjCSuper, 0));
+ Builder.CreateStore(ReceiverClass, Builder.CreateStructGEP(ObjCSuper, 1));
ObjCSuper = EnforceType(Builder, ObjCSuper, PtrToObjCSuperTy);
@@ -2501,7 +2601,7 @@ CGObjCGNU::GenerateMessageSendSuper(CodeGenFunction &CGF,
CGCallee callee(CGCalleeInfo(), imp);
- llvm::Instruction *call;
+ llvm::CallBase *call;
RValue msgRet = CGF.EmitCall(MSI.CallInfo, callee, Return, ActualArgs, &call);
call->setMetadata(msgSendMDKind, node);
return msgRet;
@@ -2595,16 +2695,21 @@ CGObjCGNU::GenerateMessageSend(CodeGenFunction &CGF,
case CodeGenOptions::Mixed:
case CodeGenOptions::NonLegacy:
if (CGM.ReturnTypeUsesFPRet(ResultType)) {
- imp = CGM.CreateRuntimeFunction(llvm::FunctionType::get(IdTy, IdTy, true),
- "objc_msgSend_fpret");
+ imp =
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(IdTy, IdTy, true),
+ "objc_msgSend_fpret")
+ .getCallee();
} else if (CGM.ReturnTypeUsesSRet(MSI.CallInfo)) {
// The actual types here don't matter - we're going to bitcast the
// function anyway
- imp = CGM.CreateRuntimeFunction(llvm::FunctionType::get(IdTy, IdTy, true),
- "objc_msgSend_stret");
+ imp =
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(IdTy, IdTy, true),
+ "objc_msgSend_stret")
+ .getCallee();
} else {
- imp = CGM.CreateRuntimeFunction(llvm::FunctionType::get(IdTy, IdTy, true),
- "objc_msgSend");
+ imp = CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(IdTy, IdTy, true), "objc_msgSend")
+ .getCallee();
}
}
@@ -2613,7 +2718,7 @@ CGObjCGNU::GenerateMessageSend(CodeGenFunction &CGF,
imp = EnforceType(Builder, imp, MSI.MessengerType);
- llvm::Instruction *call;
+ llvm::CallBase *call;
CGCallee callee(CGCalleeInfo(), imp);
RValue msgRet = CGF.EmitCall(MSI.CallInfo, callee, Return, ActualArgs, &call);
call->setMetadata(msgSendMDKind, node);
@@ -3697,7 +3802,8 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
llvm::FunctionType *FT =
llvm::FunctionType::get(Builder.getVoidTy(), module->getType(), true);
- llvm::Value *Register = CGM.CreateRuntimeFunction(FT, "__objc_exec_class");
+ llvm::FunctionCallee Register =
+ CGM.CreateRuntimeFunction(FT, "__objc_exec_class");
Builder.CreateCall(Register, module);
if (!ClassAliases.empty()) {
@@ -3766,36 +3872,36 @@ llvm::Function *CGObjCGNU::GenerateMethod(const ObjCMethodDecl *OMD,
return Method;
}
-llvm::Constant *CGObjCGNU::GetPropertyGetFunction() {
+llvm::FunctionCallee CGObjCGNU::GetPropertyGetFunction() {
return GetPropertyFn;
}
-llvm::Constant *CGObjCGNU::GetPropertySetFunction() {
+llvm::FunctionCallee CGObjCGNU::GetPropertySetFunction() {
return SetPropertyFn;
}
-llvm::Constant *CGObjCGNU::GetOptimizedPropertySetFunction(bool atomic,
- bool copy) {
+llvm::FunctionCallee CGObjCGNU::GetOptimizedPropertySetFunction(bool atomic,
+ bool copy) {
return nullptr;
}
-llvm::Constant *CGObjCGNU::GetGetStructFunction() {
+llvm::FunctionCallee CGObjCGNU::GetGetStructFunction() {
return GetStructPropertyFn;
}
-llvm::Constant *CGObjCGNU::GetSetStructFunction() {
+llvm::FunctionCallee CGObjCGNU::GetSetStructFunction() {
return SetStructPropertyFn;
}
-llvm::Constant *CGObjCGNU::GetCppAtomicObjectGetFunction() {
+llvm::FunctionCallee CGObjCGNU::GetCppAtomicObjectGetFunction() {
return nullptr;
}
-llvm::Constant *CGObjCGNU::GetCppAtomicObjectSetFunction() {
+llvm::FunctionCallee CGObjCGNU::GetCppAtomicObjectSetFunction() {
return nullptr;
}
-llvm::Constant *CGObjCGNU::EnumerationMutationFunction() {
+llvm::FunctionCallee CGObjCGNU::EnumerationMutationFunction() {
return EnumerationMutationFn;
}
@@ -3844,13 +3950,14 @@ void CGObjCGNU::EmitThrowStmt(CodeGenFunction &CGF,
// that was passed into the `@catch` block, then this code path is not
// reached and we will instead call `objc_exception_throw` with an explicit
// argument.
- CGF.EmitRuntimeCallOrInvoke(ExceptionReThrowFn).setDoesNotReturn();
+ llvm::CallBase *Throw = CGF.EmitRuntimeCallOrInvoke(ExceptionReThrowFn);
+ Throw->setDoesNotReturn();
}
else {
ExceptionAsObject = CGF.Builder.CreateBitCast(ExceptionAsObject, IdTy);
- llvm::CallSite Throw =
+ llvm::CallBase *Throw =
CGF.EmitRuntimeCallOrInvoke(ExceptionThrowFn, ExceptionAsObject);
- Throw.setDoesNotReturn();
+ Throw->setDoesNotReturn();
}
CGF.Builder.CreateUnreachable();
if (ClearInsertionPoint)
@@ -3861,8 +3968,7 @@ llvm::Value * CGObjCGNU::EmitObjCWeakRead(CodeGenFunction &CGF,
Address AddrWeakObj) {
CGBuilderTy &B = CGF.Builder;
AddrWeakObj = EnforceType(B, AddrWeakObj, PtrToIdTy);
- return B.CreateCall(WeakReadFn.getType(), WeakReadFn,
- AddrWeakObj.getPointer());
+ return B.CreateCall(WeakReadFn, AddrWeakObj.getPointer());
}
void CGObjCGNU::EmitObjCWeakAssign(CodeGenFunction &CGF,
@@ -3870,8 +3976,7 @@ void CGObjCGNU::EmitObjCWeakAssign(CodeGenFunction &CGF,
CGBuilderTy &B = CGF.Builder;
src = EnforceType(B, src, IdTy);
dst = EnforceType(B, dst, PtrToIdTy);
- B.CreateCall(WeakAssignFn.getType(), WeakAssignFn,
- {src, dst.getPointer()});
+ B.CreateCall(WeakAssignFn, {src, dst.getPointer()});
}
void CGObjCGNU::EmitObjCGlobalAssign(CodeGenFunction &CGF,
@@ -3882,8 +3987,7 @@ void CGObjCGNU::EmitObjCGlobalAssign(CodeGenFunction &CGF,
dst = EnforceType(B, dst, PtrToIdTy);
// FIXME. Add threadloca assign API
assert(!threadlocal && "EmitObjCGlobalAssign - Threal Local API NYI");
- B.CreateCall(GlobalAssignFn.getType(), GlobalAssignFn,
- {src, dst.getPointer()});
+ B.CreateCall(GlobalAssignFn, {src, dst.getPointer()});
}
void CGObjCGNU::EmitObjCIvarAssign(CodeGenFunction &CGF,
@@ -3892,8 +3996,7 @@ void CGObjCGNU::EmitObjCIvarAssign(CodeGenFunction &CGF,
CGBuilderTy &B = CGF.Builder;
src = EnforceType(B, src, IdTy);
dst = EnforceType(B, dst, IdTy);
- B.CreateCall(IvarAssignFn.getType(), IvarAssignFn,
- {src, dst.getPointer(), ivarOffset});
+ B.CreateCall(IvarAssignFn, {src, dst.getPointer(), ivarOffset});
}
void CGObjCGNU::EmitObjCStrongCastAssign(CodeGenFunction &CGF,
@@ -3901,8 +4004,7 @@ void CGObjCGNU::EmitObjCStrongCastAssign(CodeGenFunction &CGF,
CGBuilderTy &B = CGF.Builder;
src = EnforceType(B, src, IdTy);
dst = EnforceType(B, dst, PtrToIdTy);
- B.CreateCall(StrongCastAssignFn.getType(), StrongCastAssignFn,
- {src, dst.getPointer()});
+ B.CreateCall(StrongCastAssignFn, {src, dst.getPointer()});
}
void CGObjCGNU::EmitGCMemmoveCollectable(CodeGenFunction &CGF,
@@ -3913,8 +4015,7 @@ void CGObjCGNU::EmitGCMemmoveCollectable(CodeGenFunction &CGF,
DestPtr = EnforceType(B, DestPtr, PtrTy);
SrcPtr = EnforceType(B, SrcPtr, PtrTy);
- B.CreateCall(MemMoveFn.getType(), MemMoveFn,
- {DestPtr.getPointer(), SrcPtr.getPointer(), Size});
+ B.CreateCall(MemMoveFn, {DestPtr.getPointer(), SrcPtr.getPointer(), Size});
}
llvm::GlobalVariable *CGObjCGNU::ObjCIvarOffsetVariable(
diff --git a/lib/CodeGen/CGObjCMac.cpp b/lib/CodeGen/CGObjCMac.cpp
index d91eb43ca322..12880fecbadf 100644
--- a/lib/CodeGen/CGObjCMac.cpp
+++ b/lib/CodeGen/CGObjCMac.cpp
@@ -1,9 +1,8 @@
//===------- CGObjCMac.cpp - Interface to Apple Objective-C Runtime -------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -31,7 +30,6 @@
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallString.h"
-#include "llvm/IR/CallSite.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/IntrinsicInst.h"
@@ -61,7 +59,7 @@ private:
///
/// The default messenger, used for sends whose ABI is unchanged from
/// the all-integer/pointer case.
- llvm::Constant *getMessageSendFn() const {
+ llvm::FunctionCallee getMessageSendFn() const {
// Add the non-lazy-bind attribute, since objc_msgSend is likely to
// be called a lot.
llvm::Type *params[] = { ObjectPtrTy, SelectorPtrTy };
@@ -77,12 +75,11 @@ private:
/// The messenger used when the return value is an aggregate returned
/// by indirect reference in the first argument, and therefore the
/// self and selector parameters are shifted over by one.
- llvm::Constant *getMessageSendStretFn() const {
+ llvm::FunctionCallee getMessageSendStretFn() const {
llvm::Type *params[] = { ObjectPtrTy, SelectorPtrTy };
return CGM.CreateRuntimeFunction(llvm::FunctionType::get(CGM.VoidTy,
params, true),
"objc_msgSend_stret");
-
}
/// [double | long double] objc_msgSend_fpret(id self, SEL op, ...)
@@ -90,12 +87,11 @@ private:
/// The messenger used when the return value is returned on the x87
/// floating-point stack; without a special entrypoint, the nil case
/// would be unbalanced.
- llvm::Constant *getMessageSendFpretFn() const {
+ llvm::FunctionCallee getMessageSendFpretFn() const {
llvm::Type *params[] = { ObjectPtrTy, SelectorPtrTy };
return CGM.CreateRuntimeFunction(llvm::FunctionType::get(CGM.DoubleTy,
params, true),
"objc_msgSend_fpret");
-
}
/// _Complex long double objc_msgSend_fp2ret(id self, SEL op, ...)
@@ -103,7 +99,7 @@ private:
/// The messenger used when the return value is returned in two values on the
/// x87 floating point stack; without a special entrypoint, the nil case
/// would be unbalanced. Only used on 64-bit X86.
- llvm::Constant *getMessageSendFp2retFn() const {
+ llvm::FunctionCallee getMessageSendFp2retFn() const {
llvm::Type *params[] = { ObjectPtrTy, SelectorPtrTy };
llvm::Type *longDoubleType = llvm::Type::getX86_FP80Ty(VMContext);
llvm::Type *resultType =
@@ -119,7 +115,7 @@ private:
/// The messenger used for super calls, which have different dispatch
/// semantics. The class passed is the superclass of the current
/// class.
- llvm::Constant *getMessageSendSuperFn() const {
+ llvm::FunctionCallee getMessageSendSuperFn() const {
llvm::Type *params[] = { SuperPtrTy, SelectorPtrTy };
return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
params, true),
@@ -130,7 +126,7 @@ private:
///
/// A slightly different messenger used for super calls. The class
/// passed is the current class.
- llvm::Constant *getMessageSendSuperFn2() const {
+ llvm::FunctionCallee getMessageSendSuperFn2() const {
llvm::Type *params[] = { SuperPtrTy, SelectorPtrTy };
return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
params, true),
@@ -141,7 +137,7 @@ private:
/// SEL op, ...)
///
/// The messenger used for super calls which return an aggregate indirectly.
- llvm::Constant *getMessageSendSuperStretFn() const {
+ llvm::FunctionCallee getMessageSendSuperStretFn() const {
llvm::Type *params[] = { Int8PtrTy, SuperPtrTy, SelectorPtrTy };
return CGM.CreateRuntimeFunction(
llvm::FunctionType::get(CGM.VoidTy, params, true),
@@ -152,19 +148,19 @@ private:
/// SEL op, ...)
///
/// objc_msgSendSuper_stret with the super2 semantics.
- llvm::Constant *getMessageSendSuperStretFn2() const {
+ llvm::FunctionCallee getMessageSendSuperStretFn2() const {
llvm::Type *params[] = { Int8PtrTy, SuperPtrTy, SelectorPtrTy };
return CGM.CreateRuntimeFunction(
llvm::FunctionType::get(CGM.VoidTy, params, true),
"objc_msgSendSuper2_stret");
}
- llvm::Constant *getMessageSendSuperFpretFn() const {
+ llvm::FunctionCallee getMessageSendSuperFpretFn() const {
// There is no objc_msgSendSuper_fpret? How can that work?
return getMessageSendSuperFn();
}
- llvm::Constant *getMessageSendSuperFpretFn2() const {
+ llvm::FunctionCallee getMessageSendSuperFpretFn2() const {
// There is no objc_msgSendSuper_fpret? How can that work?
return getMessageSendSuperFn2();
}
@@ -233,7 +229,7 @@ public:
/// CachePtrTy - LLVM type for struct objc_cache *.
llvm::PointerType *CachePtrTy;
- llvm::Constant *getGetPropertyFn() {
+ llvm::FunctionCallee getGetPropertyFn() {
CodeGen::CodeGenTypes &Types = CGM.getTypes();
ASTContext &Ctx = CGM.getContext();
// id objc_getProperty (id, SEL, ptrdiff_t, bool)
@@ -248,7 +244,7 @@ public:
return CGM.CreateRuntimeFunction(FTy, "objc_getProperty");
}
- llvm::Constant *getSetPropertyFn() {
+ llvm::FunctionCallee getSetPropertyFn() {
CodeGen::CodeGenTypes &Types = CGM.getTypes();
ASTContext &Ctx = CGM.getContext();
// void objc_setProperty (id, SEL, ptrdiff_t, id, bool, bool)
@@ -267,7 +263,7 @@ public:
return CGM.CreateRuntimeFunction(FTy, "objc_setProperty");
}
- llvm::Constant *getOptimizedSetPropertyFn(bool atomic, bool copy) {
+ llvm::FunctionCallee getOptimizedSetPropertyFn(bool atomic, bool copy) {
CodeGen::CodeGenTypes &Types = CGM.getTypes();
ASTContext &Ctx = CGM.getContext();
// void objc_setProperty_atomic(id self, SEL _cmd,
@@ -302,7 +298,7 @@ public:
return CGM.CreateRuntimeFunction(FTy, name);
}
- llvm::Constant *getCopyStructFn() {
+ llvm::FunctionCallee getCopyStructFn() {
CodeGen::CodeGenTypes &Types = CGM.getTypes();
ASTContext &Ctx = CGM.getContext();
// void objc_copyStruct (void *, const void *, size_t, bool, bool)
@@ -322,7 +318,7 @@ public:
/// void objc_copyCppObjectAtomic(
/// void *dest, const void *src,
/// void (*copyHelper) (void *dest, const void *source));
- llvm::Constant *getCppAtomicObjectFunction() {
+ llvm::FunctionCallee getCppAtomicObjectFunction() {
CodeGen::CodeGenTypes &Types = CGM.getTypes();
ASTContext &Ctx = CGM.getContext();
/// void objc_copyCppObjectAtomic(void *dest, const void *src, void *helper);
@@ -336,7 +332,7 @@ public:
return CGM.CreateRuntimeFunction(FTy, "objc_copyCppObjectAtomic");
}
- llvm::Constant *getEnumerationMutationFn() {
+ llvm::FunctionCallee getEnumerationMutationFn() {
CodeGen::CodeGenTypes &Types = CGM.getTypes();
ASTContext &Ctx = CGM.getContext();
// void objc_enumerationMutation (id)
@@ -348,7 +344,7 @@ public:
return CGM.CreateRuntimeFunction(FTy, "objc_enumerationMutation");
}
- llvm::Constant *getLookUpClassFn() {
+ llvm::FunctionCallee getLookUpClassFn() {
CodeGen::CodeGenTypes &Types = CGM.getTypes();
ASTContext &Ctx = CGM.getContext();
// Class objc_lookUpClass (const char *)
@@ -363,7 +359,7 @@ public:
}
/// GcReadWeakFn -- LLVM objc_read_weak (id *src) function.
- llvm::Constant *getGcReadWeakFn() {
+ llvm::FunctionCallee getGcReadWeakFn() {
// id objc_read_weak (id *)
llvm::Type *args[] = { ObjectPtrTy->getPointerTo() };
llvm::FunctionType *FTy =
@@ -372,7 +368,7 @@ public:
}
/// GcAssignWeakFn -- LLVM objc_assign_weak function.
- llvm::Constant *getGcAssignWeakFn() {
+ llvm::FunctionCallee getGcAssignWeakFn() {
// id objc_assign_weak (id, id *)
llvm::Type *args[] = { ObjectPtrTy, ObjectPtrTy->getPointerTo() };
llvm::FunctionType *FTy =
@@ -381,7 +377,7 @@ public:
}
/// GcAssignGlobalFn -- LLVM objc_assign_global function.
- llvm::Constant *getGcAssignGlobalFn() {
+ llvm::FunctionCallee getGcAssignGlobalFn() {
// id objc_assign_global(id, id *)
llvm::Type *args[] = { ObjectPtrTy, ObjectPtrTy->getPointerTo() };
llvm::FunctionType *FTy =
@@ -390,7 +386,7 @@ public:
}
/// GcAssignThreadLocalFn -- LLVM objc_assign_threadlocal function.
- llvm::Constant *getGcAssignThreadLocalFn() {
+ llvm::FunctionCallee getGcAssignThreadLocalFn() {
// id objc_assign_threadlocal(id src, id * dest)
llvm::Type *args[] = { ObjectPtrTy, ObjectPtrTy->getPointerTo() };
llvm::FunctionType *FTy =
@@ -399,7 +395,7 @@ public:
}
/// GcAssignIvarFn -- LLVM objc_assign_ivar function.
- llvm::Constant *getGcAssignIvarFn() {
+ llvm::FunctionCallee getGcAssignIvarFn() {
// id objc_assign_ivar(id, id *, ptrdiff_t)
llvm::Type *args[] = { ObjectPtrTy, ObjectPtrTy->getPointerTo(),
CGM.PtrDiffTy };
@@ -409,7 +405,7 @@ public:
}
/// GcMemmoveCollectableFn -- LLVM objc_memmove_collectable function.
- llvm::Constant *GcMemmoveCollectableFn() {
+ llvm::FunctionCallee GcMemmoveCollectableFn() {
// void *objc_memmove_collectable(void *dst, const void *src, size_t size)
llvm::Type *args[] = { Int8PtrTy, Int8PtrTy, LongTy };
llvm::FunctionType *FTy = llvm::FunctionType::get(Int8PtrTy, args, false);
@@ -417,7 +413,7 @@ public:
}
/// GcAssignStrongCastFn -- LLVM objc_assign_strongCast function.
- llvm::Constant *getGcAssignStrongCastFn() {
+ llvm::FunctionCallee getGcAssignStrongCastFn() {
// id objc_assign_strongCast(id, id *)
llvm::Type *args[] = { ObjectPtrTy, ObjectPtrTy->getPointerTo() };
llvm::FunctionType *FTy =
@@ -426,7 +422,7 @@ public:
}
/// ExceptionThrowFn - LLVM objc_exception_throw function.
- llvm::Constant *getExceptionThrowFn() {
+ llvm::FunctionCallee getExceptionThrowFn() {
// void objc_exception_throw(id)
llvm::Type *args[] = { ObjectPtrTy };
llvm::FunctionType *FTy =
@@ -435,14 +431,14 @@ public:
}
/// ExceptionRethrowFn - LLVM objc_exception_rethrow function.
- llvm::Constant *getExceptionRethrowFn() {
+ llvm::FunctionCallee getExceptionRethrowFn() {
// void objc_exception_rethrow(void)
llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, false);
return CGM.CreateRuntimeFunction(FTy, "objc_exception_rethrow");
}
/// SyncEnterFn - LLVM object_sync_enter function.
- llvm::Constant *getSyncEnterFn() {
+ llvm::FunctionCallee getSyncEnterFn() {
// int objc_sync_enter (id)
llvm::Type *args[] = { ObjectPtrTy };
llvm::FunctionType *FTy =
@@ -451,7 +447,7 @@ public:
}
/// SyncExitFn - LLVM object_sync_exit function.
- llvm::Constant *getSyncExitFn() {
+ llvm::FunctionCallee getSyncExitFn() {
// int objc_sync_exit (id)
llvm::Type *args[] = { ObjectPtrTy };
llvm::FunctionType *FTy =
@@ -459,35 +455,35 @@ public:
return CGM.CreateRuntimeFunction(FTy, "objc_sync_exit");
}
- llvm::Constant *getSendFn(bool IsSuper) const {
+ llvm::FunctionCallee getSendFn(bool IsSuper) const {
return IsSuper ? getMessageSendSuperFn() : getMessageSendFn();
}
- llvm::Constant *getSendFn2(bool IsSuper) const {
+ llvm::FunctionCallee getSendFn2(bool IsSuper) const {
return IsSuper ? getMessageSendSuperFn2() : getMessageSendFn();
}
- llvm::Constant *getSendStretFn(bool IsSuper) const {
+ llvm::FunctionCallee getSendStretFn(bool IsSuper) const {
return IsSuper ? getMessageSendSuperStretFn() : getMessageSendStretFn();
}
- llvm::Constant *getSendStretFn2(bool IsSuper) const {
+ llvm::FunctionCallee getSendStretFn2(bool IsSuper) const {
return IsSuper ? getMessageSendSuperStretFn2() : getMessageSendStretFn();
}
- llvm::Constant *getSendFpretFn(bool IsSuper) const {
+ llvm::FunctionCallee getSendFpretFn(bool IsSuper) const {
return IsSuper ? getMessageSendSuperFpretFn() : getMessageSendFpretFn();
}
- llvm::Constant *getSendFpretFn2(bool IsSuper) const {
+ llvm::FunctionCallee getSendFpretFn2(bool IsSuper) const {
return IsSuper ? getMessageSendSuperFpretFn2() : getMessageSendFpretFn();
}
- llvm::Constant *getSendFp2retFn(bool IsSuper) const {
+ llvm::FunctionCallee getSendFp2retFn(bool IsSuper) const {
return IsSuper ? getMessageSendSuperFn() : getMessageSendFp2retFn();
}
- llvm::Constant *getSendFp2RetFn2(bool IsSuper) const {
+ llvm::FunctionCallee getSendFp2RetFn2(bool IsSuper) const {
return IsSuper ? getMessageSendSuperFn2() : getMessageSendFp2retFn();
}
@@ -553,7 +549,7 @@ public:
llvm::StructType *ExceptionDataTy;
/// ExceptionTryEnterFn - LLVM objc_exception_try_enter function.
- llvm::Constant *getExceptionTryEnterFn() {
+ llvm::FunctionCallee getExceptionTryEnterFn() {
llvm::Type *params[] = { ExceptionDataTy->getPointerTo() };
return CGM.CreateRuntimeFunction(
llvm::FunctionType::get(CGM.VoidTy, params, false),
@@ -561,7 +557,7 @@ public:
}
/// ExceptionTryExitFn - LLVM objc_exception_try_exit function.
- llvm::Constant *getExceptionTryExitFn() {
+ llvm::FunctionCallee getExceptionTryExitFn() {
llvm::Type *params[] = { ExceptionDataTy->getPointerTo() };
return CGM.CreateRuntimeFunction(
llvm::FunctionType::get(CGM.VoidTy, params, false),
@@ -569,7 +565,7 @@ public:
}
/// ExceptionExtractFn - LLVM objc_exception_extract function.
- llvm::Constant *getExceptionExtractFn() {
+ llvm::FunctionCallee getExceptionExtractFn() {
llvm::Type *params[] = { ExceptionDataTy->getPointerTo() };
return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
params, false),
@@ -577,7 +573,7 @@ public:
}
/// ExceptionMatchFn - LLVM objc_exception_match function.
- llvm::Constant *getExceptionMatchFn() {
+ llvm::FunctionCallee getExceptionMatchFn() {
llvm::Type *params[] = { ClassPtrTy, ObjectPtrTy };
return CGM.CreateRuntimeFunction(
llvm::FunctionType::get(CGM.Int32Ty, params, false),
@@ -585,7 +581,7 @@ public:
}
/// SetJmpFn - LLVM _setjmp function.
- llvm::Constant *getSetJmpFn() {
+ llvm::FunctionCallee getSetJmpFn() {
// This is specifically the prototype for x86.
llvm::Type *params[] = { CGM.Int32Ty->getPointerTo() };
return CGM.CreateRuntimeFunction(
@@ -671,7 +667,7 @@ public:
// SuperMessageRefPtrTy - LLVM for struct _super_message_ref_t*
llvm::PointerType *SuperMessageRefPtrTy;
- llvm::Constant *getMessageSendFixupFn() {
+ llvm::FunctionCallee getMessageSendFixupFn() {
// id objc_msgSend_fixup(id, struct message_ref_t*, ...)
llvm::Type *params[] = { ObjectPtrTy, MessageRefPtrTy };
return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
@@ -679,7 +675,7 @@ public:
"objc_msgSend_fixup");
}
- llvm::Constant *getMessageSendFpretFixupFn() {
+ llvm::FunctionCallee getMessageSendFpretFixupFn() {
// id objc_msgSend_fpret_fixup(id, struct message_ref_t*, ...)
llvm::Type *params[] = { ObjectPtrTy, MessageRefPtrTy };
return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
@@ -687,7 +683,7 @@ public:
"objc_msgSend_fpret_fixup");
}
- llvm::Constant *getMessageSendStretFixupFn() {
+ llvm::FunctionCallee getMessageSendStretFixupFn() {
// id objc_msgSend_stret_fixup(id, struct message_ref_t*, ...)
llvm::Type *params[] = { ObjectPtrTy, MessageRefPtrTy };
return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
@@ -695,7 +691,7 @@ public:
"objc_msgSend_stret_fixup");
}
- llvm::Constant *getMessageSendSuper2FixupFn() {
+ llvm::FunctionCallee getMessageSendSuper2FixupFn() {
// id objc_msgSendSuper2_fixup (struct objc_super *,
// struct _super_message_ref_t*, ...)
llvm::Type *params[] = { SuperPtrTy, SuperMessageRefPtrTy };
@@ -704,7 +700,7 @@ public:
"objc_msgSendSuper2_fixup");
}
- llvm::Constant *getMessageSendSuper2StretFixupFn() {
+ llvm::FunctionCallee getMessageSendSuper2StretFixupFn() {
// id objc_msgSendSuper2_stret_fixup(struct objc_super *,
// struct _super_message_ref_t*, ...)
llvm::Type *params[] = { SuperPtrTy, SuperMessageRefPtrTy };
@@ -713,19 +709,45 @@ public:
"objc_msgSendSuper2_stret_fixup");
}
- llvm::Constant *getObjCEndCatchFn() {
+ llvm::FunctionCallee getObjCEndCatchFn() {
return CGM.CreateRuntimeFunction(llvm::FunctionType::get(CGM.VoidTy, false),
"objc_end_catch");
-
}
- llvm::Constant *getObjCBeginCatchFn() {
+ llvm::FunctionCallee getObjCBeginCatchFn() {
llvm::Type *params[] = { Int8PtrTy };
return CGM.CreateRuntimeFunction(llvm::FunctionType::get(Int8PtrTy,
params, false),
"objc_begin_catch");
}
+ /// Class objc_loadClassref (void *)
+ ///
+ /// Loads from a classref. For Objective-C stub classes, this invokes the
+ /// initialization callback stored inside the stub. For all other classes
+ /// this simply dereferences the pointer.
+ llvm::FunctionCallee getLoadClassrefFn() const {
+ // Add the non-lazy-bind attribute, since objc_loadClassref is likely to
+ // be called a lot.
+ //
+ // Also it is safe to make it readnone, since we never load or store the
+ // classref except by calling this function.
+ llvm::Type *params[] = { Int8PtrPtrTy };
+ llvm::FunctionCallee F = CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(ClassnfABIPtrTy, params, false),
+ "objc_loadClassref",
+ llvm::AttributeList::get(CGM.getLLVMContext(),
+ llvm::AttributeList::FunctionIndex,
+ {llvm::Attribute::NonLazyBind,
+ llvm::Attribute::ReadNone,
+ llvm::Attribute::NoUnwind}));
+ if (!CGM.getTriple().isOSBinFormatCOFF())
+ cast<llvm::Function>(F.getCallee())->setLinkage(
+ llvm::Function::ExternalWeakLinkage);
+
+ return F;
+ }
+
llvm::StructType *EHTypeTy;
llvm::Type *EHTypePtrTy;
@@ -882,6 +904,9 @@ protected:
/// DefinedCategories - List of defined categories.
SmallVector<llvm::GlobalValue*, 16> DefinedCategories;
+ /// DefinedStubCategories - List of defined categories on class stubs.
+ SmallVector<llvm::GlobalValue*, 16> DefinedStubCategories;
+
/// DefinedNonLazyCategories - List of defined "non-lazy" categories.
SmallVector<llvm::GlobalValue*, 16> DefinedNonLazyCategories;
@@ -1325,15 +1350,15 @@ public:
llvm::Value *GenerateProtocolRef(CodeGenFunction &CGF,
const ObjCProtocolDecl *PD) override;
- llvm::Constant *GetPropertyGetFunction() override;
- llvm::Constant *GetPropertySetFunction() override;
- llvm::Constant *GetOptimizedPropertySetFunction(bool atomic,
- bool copy) override;
- llvm::Constant *GetGetStructFunction() override;
- llvm::Constant *GetSetStructFunction() override;
- llvm::Constant *GetCppAtomicObjectGetFunction() override;
- llvm::Constant *GetCppAtomicObjectSetFunction() override;
- llvm::Constant *EnumerationMutationFunction() override;
+ llvm::FunctionCallee GetPropertyGetFunction() override;
+ llvm::FunctionCallee GetPropertySetFunction() override;
+ llvm::FunctionCallee GetOptimizedPropertySetFunction(bool atomic,
+ bool copy) override;
+ llvm::FunctionCallee GetGetStructFunction() override;
+ llvm::FunctionCallee GetSetStructFunction() override;
+ llvm::FunctionCallee GetCppAtomicObjectGetFunction() override;
+ llvm::FunctionCallee GetCppAtomicObjectSetFunction() override;
+ llvm::FunctionCallee EnumerationMutationFunction() override;
void EmitTryStmt(CodeGen::CodeGenFunction &CGF,
const ObjCAtTryStmt &S) override;
@@ -1469,6 +1494,12 @@ private:
bool isMetaclass,
ForDefinition_t isForDefinition);
+ llvm::Constant *GetClassGlobalForClassRef(const ObjCInterfaceDecl *ID);
+
+ llvm::Value *EmitLoadOfClassRef(CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *ID,
+ llvm::GlobalVariable *Entry);
+
/// EmitClassRef - Return a Value*, of type ObjCTypes.ClassPtrTy,
/// for the given class reference.
llvm::Value *EmitClassRef(CodeGenFunction &CGF,
@@ -1550,6 +1581,15 @@ private:
return false;
}
+ bool isClassLayoutKnownStatically(const ObjCInterfaceDecl *ID) {
+ // NSObject is a fixed size. If we can see the @implementation of a class
+ // which inherits from NSObject then we know that all it's offsets also must
+ // be fixed. FIXME: Can we do this if see a chain of super classes with
+ // implementations leading to NSObject?
+ return ID->getImplementation() && ID->getSuperClass() &&
+ ID->getSuperClass()->getName() == "NSObject";
+ }
+
public:
CGObjCNonFragileABIMac(CodeGen::CodeGenModule &cgm);
@@ -1598,35 +1638,35 @@ public:
llvm::Constant *GetEHType(QualType T) override;
- llvm::Constant *GetPropertyGetFunction() override {
+ llvm::FunctionCallee GetPropertyGetFunction() override {
return ObjCTypes.getGetPropertyFn();
}
- llvm::Constant *GetPropertySetFunction() override {
+ llvm::FunctionCallee GetPropertySetFunction() override {
return ObjCTypes.getSetPropertyFn();
}
- llvm::Constant *GetOptimizedPropertySetFunction(bool atomic,
- bool copy) override {
+ llvm::FunctionCallee GetOptimizedPropertySetFunction(bool atomic,
+ bool copy) override {
return ObjCTypes.getOptimizedSetPropertyFn(atomic, copy);
}
- llvm::Constant *GetSetStructFunction() override {
+ llvm::FunctionCallee GetSetStructFunction() override {
return ObjCTypes.getCopyStructFn();
}
- llvm::Constant *GetGetStructFunction() override {
+ llvm::FunctionCallee GetGetStructFunction() override {
return ObjCTypes.getCopyStructFn();
}
- llvm::Constant *GetCppAtomicObjectSetFunction() override {
+ llvm::FunctionCallee GetCppAtomicObjectSetFunction() override {
return ObjCTypes.getCppAtomicObjectFunction();
}
- llvm::Constant *GetCppAtomicObjectGetFunction() override {
+ llvm::FunctionCallee GetCppAtomicObjectGetFunction() override {
return ObjCTypes.getCppAtomicObjectFunction();
}
- llvm::Constant *EnumerationMutationFunction() override {
+ llvm::FunctionCallee EnumerationMutationFunction() override {
return ObjCTypes.getEnumerationMutationFn();
}
@@ -1805,6 +1845,28 @@ static bool hasObjCExceptionAttribute(ASTContext &Context,
return false;
}
+static llvm::GlobalValue::LinkageTypes
+getLinkageTypeForObjCMetadata(CodeGenModule &CGM, StringRef Section) {
+ if (CGM.getTriple().isOSBinFormatMachO() &&
+ (Section.empty() || Section.startswith("__DATA")))
+ return llvm::GlobalValue::InternalLinkage;
+ return llvm::GlobalValue::PrivateLinkage;
+}
+
+/// A helper function to create an internal or private global variable.
+static llvm::GlobalVariable *
+finishAndCreateGlobal(ConstantInitBuilder::StructBuilder &Builder,
+ const llvm::Twine &Name, CodeGenModule &CGM) {
+ std::string SectionName;
+ if (CGM.getTriple().isOSBinFormatMachO())
+ SectionName = "__DATA, __objc_const";
+ auto *GV = Builder.finishAndCreateGlobal(
+ Name, CGM.getPointerAlign(), /*constant*/ false,
+ getLinkageTypeForObjCMetadata(CGM, SectionName));
+ GV->setSection(SectionName);
+ return GV;
+}
+
/* *** CGObjCMac Public Interface *** */
CGObjCMac::CGObjCMac(CodeGen::CodeGenModule &cgm) : CGObjCCommonMac(cgm),
@@ -1907,7 +1969,7 @@ llvm::Constant *CGObjCNonFragileABIMac::getNSConstantStringClassRef() {
std::string str =
StringClass.empty() ? "OBJC_CLASS_$_NSConstantString"
: "OBJC_CLASS_$_" + StringClass;
- auto GV = GetClassGlobal(str, NotForDefinition);
+ llvm::Constant *GV = GetClassGlobal(str, NotForDefinition);
// Make sure the result is of the correct type.
auto V = llvm::ConstantExpr::getBitCast(GV, CGM.IntTy->getPointerTo());
@@ -2004,9 +2066,8 @@ CGObjCMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
"objc_super");
llvm::Value *ReceiverAsObject =
CGF.Builder.CreateBitCast(Receiver, ObjCTypes.ObjectPtrTy);
- CGF.Builder.CreateStore(
- ReceiverAsObject,
- CGF.Builder.CreateStructGEP(ObjCSuper, 0, CharUnits::Zero()));
+ CGF.Builder.CreateStore(ReceiverAsObject,
+ CGF.Builder.CreateStructGEP(ObjCSuper, 0));
// If this is a class message the metaclass is passed as the target.
llvm::Value *Target;
@@ -2041,8 +2102,7 @@ CGObjCMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
llvm::Type *ClassTy =
CGM.getTypes().ConvertType(CGF.getContext().getObjCClassType());
Target = CGF.Builder.CreateBitCast(Target, ClassTy);
- CGF.Builder.CreateStore(Target,
- CGF.Builder.CreateStructGEP(ObjCSuper, 1, CGF.getPointerSize()));
+ CGF.Builder.CreateStore(Target, CGF.Builder.CreateStructGEP(ObjCSuper, 1));
return EmitMessageSend(CGF, Return, ResultType,
EmitSelector(CGF, Sel),
ObjCSuper.getPointer(), ObjCTypes.SuperPtrCTy,
@@ -2129,7 +2189,7 @@ CGObjCCommonMac::EmitMessageSend(CodeGen::CodeGenFunction &CGF,
bool RequiresNullCheck = false;
- llvm::Constant *Fn = nullptr;
+ llvm::FunctionCallee Fn = nullptr;
if (CGM.ReturnSlotInterferesWithArgs(MSI.CallInfo)) {
if (ReceiverCanBeNull) RequiresNullCheck = true;
Fn = (ObjCABI == 2) ? ObjCTypes.getSendStretFn2(IsSuper)
@@ -2149,6 +2209,10 @@ CGObjCCommonMac::EmitMessageSend(CodeGen::CodeGenFunction &CGF,
: ObjCTypes.getSendFn(IsSuper);
}
+ // Cast function to proper signature
+ llvm::Constant *BitcastFn = cast<llvm::Constant>(
+ CGF.Builder.CreateBitCast(Fn.getCallee(), MSI.MessengerType));
+
// We don't need to emit a null check to zero out an indirect result if the
// result is ignored.
if (Return.isUnused())
@@ -2169,16 +2233,15 @@ CGObjCCommonMac::EmitMessageSend(CodeGen::CodeGenFunction &CGF,
nullReturn.init(CGF, Arg0);
}
- llvm::Instruction *CallSite;
- Fn = llvm::ConstantExpr::getBitCast(Fn, MSI.MessengerType);
- CGCallee Callee = CGCallee::forDirect(Fn);
+ llvm::CallBase *CallSite;
+ CGCallee Callee = CGCallee::forDirect(BitcastFn);
RValue rvalue = CGF.EmitCall(MSI.CallInfo, Callee, Return, ActualArgs,
&CallSite);
// Mark the call as noreturn if the method is marked noreturn and the
// receiver cannot be null.
if (Method && Method->hasAttr<NoReturnAttr>() && !ReceiverCanBeNull) {
- llvm::CallSite(CallSite).setDoesNotReturn();
+ CallSite->setDoesNotReturn();
}
return nullReturn.complete(CGF, Return, rvalue, ResultType, CallArgs,
@@ -2954,7 +3017,7 @@ llvm::Value *CGObjCCommonMac::EmitClassRefViaRuntime(
CodeGenFunction &CGF,
const ObjCInterfaceDecl *ID,
ObjCCommonTypesHelper &ObjCTypes) {
- llvm::Constant *lookUpClassFn = ObjCTypes.getLookUpClassFn();
+ llvm::FunctionCallee lookUpClassFn = ObjCTypes.getLookUpClassFn();
llvm::Value *className =
CGF.CGM.GetAddrOfConstantCString(ID->getObjCRuntimeNameAsString())
@@ -3100,7 +3163,7 @@ CGObjCMac::EmitProtocolExtension(const ObjCProtocolDecl *PD,
values.add(classProperties);
// No special section, but goes in llvm.used
- return CreateMetadataVar("\01l_OBJC_PROTOCOLEXT_" + PD->getName(), values,
+ return CreateMetadataVar("_OBJC_PROTOCOLEXT_" + PD->getName(), values,
StringRef(), CGM.getPointerAlign(), true);
}
@@ -3333,9 +3396,9 @@ void CGObjCMac::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
// If there is no category @interface then there can be no properties.
if (Category) {
- Values.add(EmitPropertyList("\01l_OBJC_$_PROP_LIST_" + ExtName.str(),
+ Values.add(EmitPropertyList("_OBJC_$_PROP_LIST_" + ExtName.str(),
OCD, Category, ObjCTypes, false));
- Values.add(EmitPropertyList("\01l_OBJC_$_CLASS_PROP_LIST_" + ExtName.str(),
+ Values.add(EmitPropertyList("_OBJC_$_CLASS_PROP_LIST_" + ExtName.str(),
OCD, Category, ObjCTypes, true));
} else {
Values.addNullPointer(ObjCTypes.PropertyListPtrTy);
@@ -3681,8 +3744,8 @@ CGObjCMac::EmitClassExtension(const ObjCImplementationDecl *ID,
// Properties.
llvm::Constant *propertyList =
- EmitPropertyList((isMetaclass ? Twine("\01l_OBJC_$_CLASS_PROP_LIST_")
- : Twine("\01l_OBJC_$_PROP_LIST_"))
+ EmitPropertyList((isMetaclass ? Twine("_OBJC_$_CLASS_PROP_LIST_")
+ : Twine("_OBJC_$_PROP_LIST_"))
+ ID->getName(),
ID, ID->getClassInterface(), ObjCTypes, isMetaclass);
@@ -3930,9 +3993,10 @@ llvm::GlobalVariable *CGObjCCommonMac::CreateMetadataVar(Twine Name,
StringRef Section,
CharUnits Align,
bool AddToUsed) {
+ llvm::GlobalValue::LinkageTypes LT =
+ getLinkageTypeForObjCMetadata(CGM, Section);
llvm::GlobalVariable *GV =
- Init.finishAndCreateGlobal(Name, Align, /*constant*/ false,
- llvm::GlobalValue::PrivateLinkage);
+ Init.finishAndCreateGlobal(Name, Align, /*constant*/ false, LT);
if (!Section.empty())
GV->setSection(Section);
if (AddToUsed)
@@ -3946,9 +4010,10 @@ llvm::GlobalVariable *CGObjCCommonMac::CreateMetadataVar(Twine Name,
CharUnits Align,
bool AddToUsed) {
llvm::Type *Ty = Init->getType();
+ llvm::GlobalValue::LinkageTypes LT =
+ getLinkageTypeForObjCMetadata(CGM, Section);
llvm::GlobalVariable *GV =
- new llvm::GlobalVariable(CGM.getModule(), Ty, false,
- llvm::GlobalValue::PrivateLinkage, Init, Name);
+ new llvm::GlobalVariable(CGM.getModule(), Ty, false, LT, Init, Name);
if (!Section.empty())
GV->setSection(Section);
GV->setAlignment(Align.getQuantity());
@@ -4011,36 +4076,36 @@ llvm::Function *CGObjCMac::ModuleInitFunction() {
return nullptr;
}
-llvm::Constant *CGObjCMac::GetPropertyGetFunction() {
+llvm::FunctionCallee CGObjCMac::GetPropertyGetFunction() {
return ObjCTypes.getGetPropertyFn();
}
-llvm::Constant *CGObjCMac::GetPropertySetFunction() {
+llvm::FunctionCallee CGObjCMac::GetPropertySetFunction() {
return ObjCTypes.getSetPropertyFn();
}
-llvm::Constant *CGObjCMac::GetOptimizedPropertySetFunction(bool atomic,
- bool copy) {
+llvm::FunctionCallee CGObjCMac::GetOptimizedPropertySetFunction(bool atomic,
+ bool copy) {
return ObjCTypes.getOptimizedSetPropertyFn(atomic, copy);
}
-llvm::Constant *CGObjCMac::GetGetStructFunction() {
+llvm::FunctionCallee CGObjCMac::GetGetStructFunction() {
return ObjCTypes.getCopyStructFn();
}
-llvm::Constant *CGObjCMac::GetSetStructFunction() {
+llvm::FunctionCallee CGObjCMac::GetSetStructFunction() {
return ObjCTypes.getCopyStructFn();
}
-llvm::Constant *CGObjCMac::GetCppAtomicObjectGetFunction() {
+llvm::FunctionCallee CGObjCMac::GetCppAtomicObjectGetFunction() {
return ObjCTypes.getCppAtomicObjectFunction();
}
-llvm::Constant *CGObjCMac::GetCppAtomicObjectSetFunction() {
+llvm::FunctionCallee CGObjCMac::GetCppAtomicObjectSetFunction() {
return ObjCTypes.getCppAtomicObjectFunction();
}
-llvm::Constant *CGObjCMac::EnumerationMutationFunction() {
+llvm::FunctionCallee CGObjCMac::EnumerationMutationFunction() {
return ObjCTypes.getEnumerationMutationFn();
}
@@ -4216,14 +4281,15 @@ void FragileHazards::emitHazardsInNewBlocks() {
// Ignore instructions that aren't non-intrinsic calls.
// These are the only calls that can possibly call longjmp.
- if (!isa<llvm::CallInst>(I) && !isa<llvm::InvokeInst>(I)) continue;
+ if (!isa<llvm::CallInst>(I) && !isa<llvm::InvokeInst>(I))
+ continue;
if (isa<llvm::IntrinsicInst>(I))
continue;
// Ignore call sites marked nounwind. This may be questionable,
// since 'nounwind' doesn't necessarily mean 'does not call longjmp'.
- llvm::CallSite CS(&I);
- if (CS.doesNotThrow()) continue;
+ if (cast<llvm::CallBase>(I).doesNotThrow())
+ continue;
// Insert a read hazard before the call. This will ensure that
// any writes to the locals are performed before making the
@@ -4855,7 +4921,7 @@ llvm::Value *CGObjCMac::EmitIvarOffset(CodeGen::CodeGenFunction &CGF,
std::string CGObjCCommonMac::GetSectionName(StringRef Section,
StringRef MachOAttributes) {
switch (CGM.getTriple().getObjectFormat()) {
- default:
+ case llvm::Triple::UnknownObjectFormat:
llvm_unreachable("unexpected object file format");
case llvm::Triple::MachO: {
if (MachOAttributes.empty())
@@ -4870,7 +4936,13 @@ std::string CGObjCCommonMac::GetSectionName(StringRef Section,
assert(Section.substr(0, 2) == "__" &&
"expected the name to begin with __");
return ("." + Section.substr(2) + "$B").str();
+ case llvm::Triple::Wasm:
+ case llvm::Triple::XCOFF:
+ llvm::report_fatal_error(
+ "Objective-C support is unimplemented for object file format.");
}
+
+ llvm_unreachable("Unhandled llvm::Triple::ObjectFormatType enum");
}
/// EmitImageInfo - Emit the image info marker used to encode some module
@@ -5994,10 +6066,15 @@ void CGObjCNonFragileABIMac::AddModuleClassList(
Symbols.size()),
Symbols);
+ // Section name is obtained by calling GetSectionName, which returns
+ // sections in the __DATA segment on MachO.
+ assert((!CGM.getTriple().isOSBinFormatMachO() ||
+ SectionName.startswith("__DATA")) &&
+ "SectionName expected to start with __DATA on MachO");
+ llvm::GlobalValue::LinkageTypes LT =
+ getLinkageTypeForObjCMetadata(CGM, SectionName);
llvm::GlobalVariable *GV =
- new llvm::GlobalVariable(CGM.getModule(), Init->getType(), false,
- llvm::GlobalValue::PrivateLinkage,
- Init,
+ new llvm::GlobalVariable(CGM.getModule(), Init->getType(), false, LT, Init,
SymbolName);
GV->setAlignment(CGM.getDataLayout().getABITypeAlignment(Init->getType()));
GV->setSection(SectionName);
@@ -6034,6 +6111,9 @@ void CGObjCNonFragileABIMac::FinishNonFragileABIModule() {
AddModuleClassList(DefinedCategories, "OBJC_LABEL_CATEGORY_$",
GetSectionName("__objc_catlist",
"regular,no_dead_strip"));
+ AddModuleClassList(DefinedStubCategories, "OBJC_LABEL_STUB_CATEGORY_$",
+ GetSectionName("__objc_catlist2",
+ "regular,no_dead_strip"));
AddModuleClassList(DefinedNonLazyCategories, "OBJC_LABEL_NONLAZY_CATEGORY_$",
GetSectionName("__objc_nlcatlist",
"regular,no_dead_strip"));
@@ -6176,7 +6256,7 @@ llvm::GlobalVariable * CGObjCNonFragileABIMac::BuildClassRoTInitializer(
const ObjCInterfaceDecl *OID = ID->getClassInterface();
assert(OID && "CGObjCNonFragileABIMac::BuildClassRoTInitializer");
- values.add(EmitProtocolList("\01l_OBJC_CLASS_PROTOCOLS_$_"
+ values.add(EmitProtocolList("_OBJC_CLASS_PROTOCOLS_$_"
+ OID->getObjCRuntimeNameAsString(),
OID->all_referenced_protocol_begin(),
OID->all_referenced_protocol_end()));
@@ -6185,29 +6265,23 @@ llvm::GlobalVariable * CGObjCNonFragileABIMac::BuildClassRoTInitializer(
values.addNullPointer(ObjCTypes.IvarListnfABIPtrTy);
values.add(GetIvarLayoutName(nullptr, ObjCTypes));
values.add(EmitPropertyList(
- "\01l_OBJC_$_CLASS_PROP_LIST_" + ID->getObjCRuntimeNameAsString(),
+ "_OBJC_$_CLASS_PROP_LIST_" + ID->getObjCRuntimeNameAsString(),
ID, ID->getClassInterface(), ObjCTypes, true));
} else {
values.add(EmitIvarList(ID));
values.add(BuildWeakIvarLayout(ID, beginInstance, endInstance, hasMRCWeak));
values.add(EmitPropertyList(
- "\01l_OBJC_$_PROP_LIST_" + ID->getObjCRuntimeNameAsString(),
+ "_OBJC_$_PROP_LIST_" + ID->getObjCRuntimeNameAsString(),
ID, ID->getClassInterface(), ObjCTypes, false));
}
llvm::SmallString<64> roLabel;
llvm::raw_svector_ostream(roLabel)
- << ((flags & NonFragileABI_Class_Meta) ? "\01l_OBJC_METACLASS_RO_$_"
- : "\01l_OBJC_CLASS_RO_$_")
+ << ((flags & NonFragileABI_Class_Meta) ? "_OBJC_METACLASS_RO_$_"
+ : "_OBJC_CLASS_RO_$_")
<< ClassName;
- llvm::GlobalVariable *CLASS_RO_GV =
- values.finishAndCreateGlobal(roLabel, CGM.getPointerAlign(),
- /*constant*/ false,
- llvm::GlobalValue::PrivateLinkage);
- if (CGM.getTriple().isOSBinFormatMachO())
- CLASS_RO_GV->setSection("__DATA, __objc_const");
- return CLASS_RO_GV;
+ return finishAndCreateGlobal(values, roLabel, CGM);
}
/// Build the metaclass object for a class.
@@ -6253,9 +6327,11 @@ CGObjCNonFragileABIMac::BuildClassObject(const ObjCInterfaceDecl *CI,
return GV;
}
-bool
-CGObjCNonFragileABIMac::ImplementationIsNonLazy(const ObjCImplDecl *OD) const {
- return OD->getClassMethod(GetNullarySelector("load")) != nullptr;
+bool CGObjCNonFragileABIMac::ImplementationIsNonLazy(
+ const ObjCImplDecl *OD) const {
+ return OD->getClassMethod(GetNullarySelector("load")) != nullptr ||
+ OD->getClassInterface()->hasAttr<ObjCNonLazyClassAttr>() ||
+ OD->hasAttr<ObjCNonLazyClassAttr>();
}
void CGObjCNonFragileABIMac::GetClassSizeInfo(const ObjCImplementationDecl *OID,
@@ -6437,7 +6513,7 @@ llvm::Value *CGObjCNonFragileABIMac::GenerateProtocolRef(CodeGenFunction &CGF,
llvm::ConstantExpr::getBitCast(GetOrEmitProtocol(PD),
ObjCTypes.getExternalProtocolPtrTy());
- std::string ProtocolName("\01l_OBJC_PROTOCOL_REFERENCE_$_");
+ std::string ProtocolName("_OBJC_PROTOCOL_REFERENCE_$_");
ProtocolName += PD->getObjCRuntimeNameAsString();
CharUnits Align = CGF.getPointerAlign();
@@ -6472,7 +6548,7 @@ llvm::Value *CGObjCNonFragileABIMac::GenerateProtocolRef(CodeGenFunction &CGF,
///
void CGObjCNonFragileABIMac::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
const ObjCInterfaceDecl *Interface = OCD->getClassInterface();
- const char *Prefix = "\01l_OBJC_$_CATEGORY_";
+ const char *Prefix = "_OBJC_$_CATEGORY_";
llvm::SmallString<64> ExtCatName(Prefix);
ExtCatName += Interface->getObjCRuntimeNameAsString();
@@ -6508,14 +6584,14 @@ void CGObjCNonFragileABIMac::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
SmallString<256> ExtName;
llvm::raw_svector_ostream(ExtName) << Interface->getObjCRuntimeNameAsString() << "_$_"
<< OCD->getName();
- values.add(EmitProtocolList("\01l_OBJC_CATEGORY_PROTOCOLS_$_"
+ values.add(EmitProtocolList("_OBJC_CATEGORY_PROTOCOLS_$_"
+ Interface->getObjCRuntimeNameAsString() + "_$_"
+ Category->getName(),
Category->protocol_begin(),
Category->protocol_end()));
- values.add(EmitPropertyList("\01l_OBJC_$_PROP_LIST_" + ExtName.str(),
+ values.add(EmitPropertyList("_OBJC_$_PROP_LIST_" + ExtName.str(),
OCD, Category, ObjCTypes, false));
- values.add(EmitPropertyList("\01l_OBJC_$_CLASS_PROP_LIST_" + ExtName.str(),
+ values.add(EmitPropertyList("_OBJC_$_CLASS_PROP_LIST_" + ExtName.str(),
OCD, Category, ObjCTypes, true));
} else {
values.addNullPointer(ObjCTypes.ProtocolListnfABIPtrTy);
@@ -6527,13 +6603,12 @@ void CGObjCNonFragileABIMac::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
values.addInt(ObjCTypes.IntTy, Size);
llvm::GlobalVariable *GCATV =
- values.finishAndCreateGlobal(ExtCatName.str(), CGM.getPointerAlign(),
- /*constant*/ false,
- llvm::GlobalValue::PrivateLinkage);
- if (CGM.getTriple().isOSBinFormatMachO())
- GCATV->setSection("__DATA, __objc_const");
+ finishAndCreateGlobal(values, ExtCatName.str(), CGM);
CGM.addCompilerUsedGlobal(GCATV);
- DefinedCategories.push_back(GCATV);
+ if (Interface->hasAttr<ObjCClassStubAttr>())
+ DefinedStubCategories.push_back(GCATV);
+ else
+ DefinedCategories.push_back(GCATV);
// Determine if this category is also "non-lazy".
if (ImplementationIsNonLazy(OCD))
@@ -6590,36 +6665,36 @@ CGObjCNonFragileABIMac::emitMethodList(Twine name, MethodListType kind,
bool forProtocol;
switch (kind) {
case MethodListType::CategoryInstanceMethods:
- prefix = "\01l_OBJC_$_CATEGORY_INSTANCE_METHODS_";
+ prefix = "_OBJC_$_CATEGORY_INSTANCE_METHODS_";
forProtocol = false;
break;
case MethodListType::CategoryClassMethods:
- prefix = "\01l_OBJC_$_CATEGORY_CLASS_METHODS_";
+ prefix = "_OBJC_$_CATEGORY_CLASS_METHODS_";
forProtocol = false;
break;
case MethodListType::InstanceMethods:
- prefix = "\01l_OBJC_$_INSTANCE_METHODS_";
+ prefix = "_OBJC_$_INSTANCE_METHODS_";
forProtocol = false;
break;
case MethodListType::ClassMethods:
- prefix = "\01l_OBJC_$_CLASS_METHODS_";
+ prefix = "_OBJC_$_CLASS_METHODS_";
forProtocol = false;
break;
case MethodListType::ProtocolInstanceMethods:
- prefix = "\01l_OBJC_$_PROTOCOL_INSTANCE_METHODS_";
+ prefix = "_OBJC_$_PROTOCOL_INSTANCE_METHODS_";
forProtocol = true;
break;
case MethodListType::ProtocolClassMethods:
- prefix = "\01l_OBJC_$_PROTOCOL_CLASS_METHODS_";
+ prefix = "_OBJC_$_PROTOCOL_CLASS_METHODS_";
forProtocol = true;
break;
case MethodListType::OptionalProtocolInstanceMethods:
- prefix = "\01l_OBJC_$_PROTOCOL_INSTANCE_METHODS_OPT_";
+ prefix = "_OBJC_$_PROTOCOL_INSTANCE_METHODS_OPT_";
forProtocol = true;
break;
case MethodListType::OptionalProtocolClassMethods:
- prefix = "\01l_OBJC_$_PROTOCOL_CLASS_METHODS_OPT_";
+ prefix = "_OBJC_$_PROTOCOL_CLASS_METHODS_OPT_";
forProtocol = true;
break;
}
@@ -6638,11 +6713,7 @@ CGObjCNonFragileABIMac::emitMethodList(Twine name, MethodListType kind,
}
methodArray.finishAndAddTo(values);
- auto *GV = values.finishAndCreateGlobal(prefix + name, CGM.getPointerAlign(),
- /*constant*/ false,
- llvm::GlobalValue::PrivateLinkage);
- if (CGM.getTriple().isOSBinFormatMachO())
- GV->setSection("__DATA, __objc_const");
+ llvm::GlobalVariable *GV = finishAndCreateGlobal(values, prefix + name, CGM);
CGM.addCompilerUsedGlobal(GV);
return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.MethodListnfABIPtrTy);
}
@@ -6702,6 +6773,12 @@ CGObjCNonFragileABIMac::EmitIvarOffsetVar(const ObjCInterfaceDecl *ID,
IvarOffsetGV->setVisibility(llvm::GlobalValue::DefaultVisibility);
}
+ // If ID's layout is known, then make the global constant. This serves as a
+ // useful assertion: we'll never use this variable to calculate ivar offsets,
+ // so if the runtime tries to patch it then we should crash.
+ if (isClassLayoutKnownStatically(ID))
+ IvarOffsetGV->setConstant(true);
+
if (CGM.getTriple().isOSBinFormatMachO())
IvarOffsetGV->setSection("__DATA, __objc_ivar");
return IvarOffsetGV;
@@ -6776,13 +6853,9 @@ llvm::Constant *CGObjCNonFragileABIMac::EmitIvarList(
ivars.finishAndAddTo(ivarList);
ivarList.fillPlaceholderWithInt(ivarCountSlot, ObjCTypes.IntTy, ivarCount);
- const char *Prefix = "\01l_OBJC_$_INSTANCE_VARIABLES_";
- llvm::GlobalVariable *GV =
- ivarList.finishAndCreateGlobal(Prefix + OID->getObjCRuntimeNameAsString(),
- CGM.getPointerAlign(), /*constant*/ false,
- llvm::GlobalValue::PrivateLinkage);
- if (CGM.getTriple().isOSBinFormatMachO())
- GV->setSection("__DATA, __objc_const");
+ const char *Prefix = "_OBJC_$_INSTANCE_VARIABLES_";
+ llvm::GlobalVariable *GV = finishAndCreateGlobal(
+ ivarList, Prefix + OID->getObjCRuntimeNameAsString(), CGM);
CGM.addCompilerUsedGlobal(GV);
return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.IvarListnfABIPtrTy);
}
@@ -6796,7 +6869,7 @@ llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocolRef(
// reference or not. At module finalization we add the empty
// contents for protocols which were referenced but never defined.
llvm::SmallString<64> Protocol;
- llvm::raw_svector_ostream(Protocol) << "\01l_OBJC_PROTOCOL_$_"
+ llvm::raw_svector_ostream(Protocol) << "_OBJC_PROTOCOL_$_"
<< PD->getObjCRuntimeNameAsString();
Entry = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ProtocolnfABITy,
@@ -6850,7 +6923,7 @@ llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocol(
// isa is NULL
values.addNullPointer(ObjCTypes.ObjectPtrTy);
values.add(GetClassName(PD->getObjCRuntimeNameAsString()));
- values.add(EmitProtocolList("\01l_OBJC_$_PROTOCOL_REFS_"
+ values.add(EmitProtocolList("_OBJC_$_PROTOCOL_REFS_"
+ PD->getObjCRuntimeNameAsString(),
PD->protocol_begin(),
PD->protocol_end()));
@@ -6863,13 +6936,13 @@ llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocol(
values.add(methodLists.emitMethodList(this, PD,
ProtocolMethodLists::OptionalClassMethods));
values.add(EmitPropertyList(
- "\01l_OBJC_$_PROP_LIST_" + PD->getObjCRuntimeNameAsString(),
+ "_OBJC_$_PROP_LIST_" + PD->getObjCRuntimeNameAsString(),
nullptr, PD, ObjCTypes, false));
uint32_t Size =
CGM.getDataLayout().getTypeAllocSize(ObjCTypes.ProtocolnfABITy);
values.addInt(ObjCTypes.IntTy, Size);
values.addInt(ObjCTypes.IntTy, 0);
- values.add(EmitProtocolMethodTypes("\01l_OBJC_$_PROTOCOL_METHOD_TYPES_"
+ values.add(EmitProtocolMethodTypes("_OBJC_$_PROTOCOL_METHOD_TYPES_"
+ PD->getObjCRuntimeNameAsString(),
methodLists.emitExtendedTypesArray(this),
ObjCTypes));
@@ -6878,7 +6951,7 @@ llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocol(
values.addNullPointer(ObjCTypes.Int8PtrTy);
values.add(EmitPropertyList(
- "\01l_OBJC_$_CLASS_PROP_LIST_" + PD->getObjCRuntimeNameAsString(),
+ "_OBJC_$_CLASS_PROP_LIST_" + PD->getObjCRuntimeNameAsString(),
nullptr, PD, ObjCTypes, true));
if (Entry) {
@@ -6888,7 +6961,7 @@ llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocol(
} else {
llvm::SmallString<64> symbolName;
llvm::raw_svector_ostream(symbolName)
- << "\01l_OBJC_PROTOCOL_$_" << PD->getObjCRuntimeNameAsString();
+ << "_OBJC_PROTOCOL_$_" << PD->getObjCRuntimeNameAsString();
Entry = values.finishAndCreateGlobal(symbolName, CGM.getPointerAlign(),
/*constant*/ false,
@@ -6904,7 +6977,7 @@ llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocol(
// Use this protocol meta-data to build protocol list table in section
// __DATA, __objc_protolist
llvm::SmallString<64> ProtocolRef;
- llvm::raw_svector_ostream(ProtocolRef) << "\01l_OBJC_LABEL_PROTOCOL_$_"
+ llvm::raw_svector_ostream(ProtocolRef) << "_OBJC_LABEL_PROTOCOL_$_"
<< PD->getObjCRuntimeNameAsString();
llvm::GlobalVariable *PTGV =
@@ -6962,11 +7035,7 @@ CGObjCNonFragileABIMac::EmitProtocolList(Twine Name,
array.finishAndAddTo(values);
values.fillPlaceholderWithInt(countSlot, ObjCTypes.LongTy, count);
- GV = values.finishAndCreateGlobal(Name, CGM.getPointerAlign(),
- /*constant*/ false,
- llvm::GlobalValue::PrivateLinkage);
- if (CGM.getTriple().isOSBinFormatMachO())
- GV->setSection("__DATA, __objc_const");
+ GV = finishAndCreateGlobal(values, Name, CGM);
CGM.addCompilerUsedGlobal(GV);
return llvm::ConstantExpr::getBitCast(GV,
ObjCTypes.ProtocolListnfABIPtrTy);
@@ -6990,17 +7059,24 @@ LValue CGObjCNonFragileABIMac::EmitObjCValueForIvar(
Offset);
}
-llvm::Value *CGObjCNonFragileABIMac::EmitIvarOffset(
- CodeGen::CodeGenFunction &CGF,
- const ObjCInterfaceDecl *Interface,
- const ObjCIvarDecl *Ivar) {
- llvm::Value *IvarOffsetValue = ObjCIvarOffsetVariable(Interface, Ivar);
- IvarOffsetValue = CGF.Builder.CreateAlignedLoad(IvarOffsetValue,
- CGF.getSizeAlign(), "ivar");
- if (IsIvarOffsetKnownIdempotent(CGF, Ivar))
- cast<llvm::LoadInst>(IvarOffsetValue)
- ->setMetadata(CGM.getModule().getMDKindID("invariant.load"),
- llvm::MDNode::get(VMContext, None));
+llvm::Value *
+CGObjCNonFragileABIMac::EmitIvarOffset(CodeGen::CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar) {
+ llvm::Value *IvarOffsetValue;
+ if (isClassLayoutKnownStatically(Interface)) {
+ IvarOffsetValue = llvm::ConstantInt::get(
+ ObjCTypes.IvarOffsetVarTy,
+ ComputeIvarBaseOffset(CGM, Interface->getImplementation(), Ivar));
+ } else {
+ llvm::GlobalVariable *GV = ObjCIvarOffsetVariable(Interface, Ivar);
+ IvarOffsetValue =
+ CGF.Builder.CreateAlignedLoad(GV, CGF.getSizeAlign(), "ivar");
+ if (IsIvarOffsetKnownIdempotent(CGF, Ivar))
+ cast<llvm::LoadInst>(IvarOffsetValue)
+ ->setMetadata(CGM.getModule().getMDKindID("invariant.load"),
+ llvm::MDNode::get(VMContext, None));
+ }
// This could be 32bit int or 64bit integer depending on the architecture.
// Cast it to 64bit integer value, if it is a 32bit integer ivar offset value
@@ -7069,8 +7145,8 @@ CGObjCNonFragileABIMac::EmitVTableMessageSend(CodeGenFunction &CGF,
// The runtime currently never uses vtable dispatch for anything
// except normal, non-super message-sends.
// FIXME: don't use this for that.
- llvm::Constant *fn = nullptr;
- std::string messageRefName("\01l_");
+ llvm::FunctionCallee fn = nullptr;
+ std::string messageRefName("_");
if (CGM.ReturnSlotInterferesWithArgs(MSI.CallInfo)) {
if (isSuper) {
fn = ObjCTypes.getMessageSendSuper2StretFixupFn();
@@ -7105,7 +7181,7 @@ CGObjCNonFragileABIMac::EmitVTableMessageSend(CodeGenFunction &CGF,
// Build the message ref structure.
ConstantInitBuilder builder(CGM);
auto values = builder.beginStruct();
- values.add(fn);
+ values.add(cast<llvm::Constant>(fn.getCallee()));
values.add(GetMethodVarName(selector));
messageRef = values.finishAndCreateGlobal(messageRefName,
CharUnits::fromQuantity(16),
@@ -7134,8 +7210,7 @@ CGObjCNonFragileABIMac::EmitVTableMessageSend(CodeGenFunction &CGF,
args[1].setRValue(RValue::get(mref.getPointer()));
// Load the function to call from the message ref table.
- Address calleeAddr =
- CGF.Builder.CreateStructGEP(mref, 0, CharUnits::Zero());
+ Address calleeAddr = CGF.Builder.CreateStructGEP(mref, 0);
llvm::Value *calleePtr = CGF.Builder.CreateLoad(calleeAddr, "msgSend_fn");
calleePtr = CGF.Builder.CreateBitCast(calleePtr, MSI.MessengerType);
@@ -7209,31 +7284,68 @@ CGObjCNonFragileABIMac::GetClassGlobal(StringRef Name,
return GV;
}
+llvm::Constant *
+CGObjCNonFragileABIMac::GetClassGlobalForClassRef(const ObjCInterfaceDecl *ID) {
+ llvm::Constant *ClassGV = GetClassGlobal(ID, /*metaclass*/ false,
+ NotForDefinition);
+
+ if (!ID->hasAttr<ObjCClassStubAttr>())
+ return ClassGV;
+
+ ClassGV = llvm::ConstantExpr::getPointerCast(ClassGV, ObjCTypes.Int8PtrTy);
+
+ // Stub classes are pointer-aligned. Classrefs pointing at stub classes
+ // must set the least significant bit set to 1.
+ auto *Idx = llvm::ConstantInt::get(CGM.Int32Ty, 1);
+ return llvm::ConstantExpr::getGetElementPtr(CGM.Int8Ty, ClassGV, Idx);
+}
+
+llvm::Value *
+CGObjCNonFragileABIMac::EmitLoadOfClassRef(CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *ID,
+ llvm::GlobalVariable *Entry) {
+ if (ID && ID->hasAttr<ObjCClassStubAttr>()) {
+ // Classrefs pointing at Objective-C stub classes must be loaded by calling
+ // a special runtime function.
+ return CGF.EmitRuntimeCall(
+ ObjCTypes.getLoadClassrefFn(), Entry, "load_classref_result");
+ }
+
+ CharUnits Align = CGF.getPointerAlign();
+ return CGF.Builder.CreateAlignedLoad(Entry, Align);
+}
+
llvm::Value *
CGObjCNonFragileABIMac::EmitClassRefFromId(CodeGenFunction &CGF,
IdentifierInfo *II,
const ObjCInterfaceDecl *ID) {
- CharUnits Align = CGF.getPointerAlign();
llvm::GlobalVariable *&Entry = ClassReferences[II];
if (!Entry) {
llvm::Constant *ClassGV;
if (ID) {
- ClassGV = GetClassGlobal(ID, /*metaclass*/ false, NotForDefinition);
+ ClassGV = GetClassGlobalForClassRef(ID);
} else {
ClassGV = GetClassGlobal((getClassSymbolPrefix() + II->getName()).str(),
NotForDefinition);
+ assert(ClassGV->getType() == ObjCTypes.ClassnfABIPtrTy &&
+ "classref was emitted with the wrong type?");
}
- Entry = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABIPtrTy,
- false, llvm::GlobalValue::PrivateLinkage,
- ClassGV, "OBJC_CLASSLIST_REFERENCES_$_");
- Entry->setAlignment(Align.getQuantity());
- Entry->setSection(GetSectionName("__objc_classrefs",
- "regular,no_dead_strip"));
+ std::string SectionName =
+ GetSectionName("__objc_classrefs", "regular,no_dead_strip");
+ Entry = new llvm::GlobalVariable(
+ CGM.getModule(), ClassGV->getType(), false,
+ getLinkageTypeForObjCMetadata(CGM, SectionName), ClassGV,
+ "OBJC_CLASSLIST_REFERENCES_$_");
+ Entry->setAlignment(CGF.getPointerAlign().getQuantity());
+ if (!ID || !ID->hasAttr<ObjCClassStubAttr>())
+ Entry->setSection(SectionName);
+
CGM.addCompilerUsedGlobal(Entry);
}
- return CGF.Builder.CreateAlignedLoad(Entry, Align);
+
+ return EmitLoadOfClassRef(CGF, ID, Entry);
}
llvm::Value *CGObjCNonFragileABIMac::EmitClassRef(CodeGenFunction &CGF,
@@ -7255,20 +7367,22 @@ llvm::Value *CGObjCNonFragileABIMac::EmitNSAutoreleasePoolClassRef(
llvm::Value *
CGObjCNonFragileABIMac::EmitSuperClassRef(CodeGenFunction &CGF,
const ObjCInterfaceDecl *ID) {
- CharUnits Align = CGF.getPointerAlign();
llvm::GlobalVariable *&Entry = SuperClassReferences[ID->getIdentifier()];
if (!Entry) {
- auto ClassGV = GetClassGlobal(ID, /*metaclass*/ false, NotForDefinition);
- Entry = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABIPtrTy,
- false, llvm::GlobalValue::PrivateLinkage,
- ClassGV, "OBJC_CLASSLIST_SUP_REFS_$_");
- Entry->setAlignment(Align.getQuantity());
- Entry->setSection(GetSectionName("__objc_superrefs",
- "regular,no_dead_strip"));
+ llvm::Constant *ClassGV = GetClassGlobalForClassRef(ID);
+ std::string SectionName =
+ GetSectionName("__objc_superrefs", "regular,no_dead_strip");
+ Entry = new llvm::GlobalVariable(
+ CGM.getModule(), ClassGV->getType(), false,
+ getLinkageTypeForObjCMetadata(CGM, SectionName), ClassGV,
+ "OBJC_CLASSLIST_SUP_REFS_$_");
+ Entry->setAlignment(CGF.getPointerAlign().getQuantity());
+ Entry->setSection(SectionName);
CGM.addCompilerUsedGlobal(Entry);
}
- return CGF.Builder.CreateAlignedLoad(Entry, Align);
+
+ return EmitLoadOfClassRef(CGF, ID, Entry);
}
/// EmitMetaClassRef - Return a Value * of the address of _class_t
@@ -7281,14 +7395,14 @@ llvm::Value *CGObjCNonFragileABIMac::EmitMetaClassRef(CodeGenFunction &CGF,
llvm::GlobalVariable * &Entry = MetaClassReferences[ID->getIdentifier()];
if (!Entry) {
auto MetaClassGV = GetClassGlobal(ID, /*metaclass*/ true, NotForDefinition);
-
- Entry = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABIPtrTy,
- false, llvm::GlobalValue::PrivateLinkage,
- MetaClassGV, "OBJC_CLASSLIST_SUP_REFS_$_");
+ std::string SectionName =
+ GetSectionName("__objc_superrefs", "regular,no_dead_strip");
+ Entry = new llvm::GlobalVariable(
+ CGM.getModule(), ObjCTypes.ClassnfABIPtrTy, false,
+ getLinkageTypeForObjCMetadata(CGM, SectionName), MetaClassGV,
+ "OBJC_CLASSLIST_SUP_REFS_$_");
Entry->setAlignment(Align.getQuantity());
-
- Entry->setSection(GetSectionName("__objc_superrefs",
- "regular,no_dead_strip"));
+ Entry->setSection(SectionName);
CGM.addCompilerUsedGlobal(Entry);
}
@@ -7332,9 +7446,8 @@ CGObjCNonFragileABIMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
llvm::Value *ReceiverAsObject =
CGF.Builder.CreateBitCast(Receiver, ObjCTypes.ObjectPtrTy);
- CGF.Builder.CreateStore(
- ReceiverAsObject,
- CGF.Builder.CreateStructGEP(ObjCSuper, 0, CharUnits::Zero()));
+ CGF.Builder.CreateStore(ReceiverAsObject,
+ CGF.Builder.CreateStructGEP(ObjCSuper, 0));
// If this is a class message the metaclass is passed as the target.
llvm::Value *Target;
@@ -7348,8 +7461,7 @@ CGObjCNonFragileABIMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
llvm::Type *ClassTy =
CGM.getTypes().ConvertType(CGF.getContext().getObjCClassType());
Target = CGF.Builder.CreateBitCast(Target, ClassTy);
- CGF.Builder.CreateStore(
- Target, CGF.Builder.CreateStructGEP(ObjCSuper, 1, CGF.getPointerSize()));
+ CGF.Builder.CreateStore(Target, CGF.Builder.CreateStructGEP(ObjCSuper, 1));
return (isVTableDispatchedSelector(Sel))
? EmitVTableMessageSend(CGF, Return, ResultType, Sel,
@@ -7380,12 +7492,14 @@ Address CGObjCNonFragileABIMac::EmitSelectorAddr(CodeGenFunction &CGF,
llvm::Constant *Casted =
llvm::ConstantExpr::getBitCast(GetMethodVarName(Sel),
ObjCTypes.SelectorPtrTy);
- Entry = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.SelectorPtrTy,
- false, llvm::GlobalValue::PrivateLinkage,
- Casted, "OBJC_SELECTOR_REFERENCES_");
+ std::string SectionName =
+ GetSectionName("__objc_selrefs", "literal_pointers,no_dead_strip");
+ Entry = new llvm::GlobalVariable(
+ CGM.getModule(), ObjCTypes.SelectorPtrTy, false,
+ getLinkageTypeForObjCMetadata(CGM, SectionName), Casted,
+ "OBJC_SELECTOR_REFERENCES_");
Entry->setExternallyInitialized(true);
- Entry->setSection(GetSectionName("__objc_selrefs",
- "literal_pointers,no_dead_strip"));
+ Entry->setSection(SectionName);
Entry->setAlignment(Align.getQuantity());
CGM.addCompilerUsedGlobal(Entry);
}
@@ -7509,9 +7623,8 @@ void CGObjCNonFragileABIMac::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
void
CGObjCNonFragileABIMac::EmitSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
const ObjCAtSynchronizedStmt &S) {
- EmitAtSynchronizedStmt(CGF, S,
- cast<llvm::Function>(ObjCTypes.getSyncEnterFn()),
- cast<llvm::Function>(ObjCTypes.getSyncExitFn()));
+ EmitAtSynchronizedStmt(CGF, S, ObjCTypes.getSyncEnterFn(),
+ ObjCTypes.getSyncExitFn());
}
llvm::Constant *
@@ -7542,10 +7655,9 @@ CGObjCNonFragileABIMac::GetEHType(QualType T) {
void CGObjCNonFragileABIMac::EmitTryStmt(CodeGen::CodeGenFunction &CGF,
const ObjCAtTryStmt &S) {
- EmitTryCatchStmt(CGF, S,
- cast<llvm::Function>(ObjCTypes.getObjCBeginCatchFn()),
- cast<llvm::Function>(ObjCTypes.getObjCEndCatchFn()),
- cast<llvm::Function>(ObjCTypes.getExceptionRethrowFn()));
+ EmitTryCatchStmt(CGF, S, ObjCTypes.getObjCBeginCatchFn(),
+ ObjCTypes.getObjCEndCatchFn(),
+ ObjCTypes.getExceptionRethrowFn());
}
/// EmitThrowStmt - Generate code for a throw statement.
@@ -7555,11 +7667,13 @@ void CGObjCNonFragileABIMac::EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
if (const Expr *ThrowExpr = S.getThrowExpr()) {
llvm::Value *Exception = CGF.EmitObjCThrowOperand(ThrowExpr);
Exception = CGF.Builder.CreateBitCast(Exception, ObjCTypes.ObjectPtrTy);
- CGF.EmitRuntimeCallOrInvoke(ObjCTypes.getExceptionThrowFn(), Exception)
- .setDoesNotReturn();
+ llvm::CallBase *Call =
+ CGF.EmitRuntimeCallOrInvoke(ObjCTypes.getExceptionThrowFn(), Exception);
+ Call->setDoesNotReturn();
} else {
- CGF.EmitRuntimeCallOrInvoke(ObjCTypes.getExceptionRethrowFn())
- .setDoesNotReturn();
+ llvm::CallBase *Call =
+ CGF.EmitRuntimeCallOrInvoke(ObjCTypes.getExceptionRethrowFn());
+ Call->setDoesNotReturn();
}
CGF.Builder.CreateUnreachable();
diff --git a/lib/CodeGen/CGObjCRuntime.cpp b/lib/CodeGen/CGObjCRuntime.cpp
index 4b6f24a03f27..f8b831d0e9be 100644
--- a/lib/CodeGen/CGObjCRuntime.cpp
+++ b/lib/CodeGen/CGObjCRuntime.cpp
@@ -1,9 +1,8 @@
//==- CGObjCRuntime.cpp - Interface to Shared Objective-C Runtime Features ==//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -22,7 +21,6 @@
#include "clang/AST/RecordLayout.h"
#include "clang/AST/StmtObjC.h"
#include "clang/CodeGen/CGFunctionInfo.h"
-#include "llvm/IR/CallSite.h"
#include "llvm/Support/SaveAndRestore.h"
using namespace clang;
@@ -127,10 +125,10 @@ namespace {
};
struct CallObjCEndCatch final : EHScopeStack::Cleanup {
- CallObjCEndCatch(bool MightThrow, llvm::Value *Fn)
+ CallObjCEndCatch(bool MightThrow, llvm::FunctionCallee Fn)
: MightThrow(MightThrow), Fn(Fn) {}
bool MightThrow;
- llvm::Value *Fn;
+ llvm::FunctionCallee Fn;
void Emit(CodeGenFunction &CGF, Flags flags) override {
if (MightThrow)
@@ -141,12 +139,11 @@ namespace {
};
}
-
void CGObjCRuntime::EmitTryCatchStmt(CodeGenFunction &CGF,
const ObjCAtTryStmt &S,
- llvm::Constant *beginCatchFn,
- llvm::Constant *endCatchFn,
- llvm::Constant *exceptionRethrowFn) {
+ llvm::FunctionCallee beginCatchFn,
+ llvm::FunctionCallee endCatchFn,
+ llvm::FunctionCallee exceptionRethrowFn) {
// Jump destination for falling out of catch bodies.
CodeGenFunction::JumpDest Cont;
if (S.getNumCatchStmts())
@@ -313,10 +310,10 @@ void CGObjCRuntime::EmitInitOfCatchParam(CodeGenFunction &CGF,
namespace {
struct CallSyncExit final : EHScopeStack::Cleanup {
- llvm::Value *SyncExitFn;
+ llvm::FunctionCallee SyncExitFn;
llvm::Value *SyncArg;
- CallSyncExit(llvm::Value *SyncExitFn, llvm::Value *SyncArg)
- : SyncExitFn(SyncExitFn), SyncArg(SyncArg) {}
+ CallSyncExit(llvm::FunctionCallee SyncExitFn, llvm::Value *SyncArg)
+ : SyncExitFn(SyncExitFn), SyncArg(SyncArg) {}
void Emit(CodeGenFunction &CGF, Flags flags) override {
CGF.EmitNounwindRuntimeCall(SyncExitFn, SyncArg);
@@ -326,8 +323,8 @@ namespace {
void CGObjCRuntime::EmitAtSynchronizedStmt(CodeGenFunction &CGF,
const ObjCAtSynchronizedStmt &S,
- llvm::Function *syncEnterFn,
- llvm::Function *syncExitFn) {
+ llvm::FunctionCallee syncEnterFn,
+ llvm::FunctionCallee syncExitFn) {
CodeGenFunction::RunCleanupsScope cleanups(CGF);
// Evaluate the lock operand. This is guaranteed to dominate the
diff --git a/lib/CodeGen/CGObjCRuntime.h b/lib/CodeGen/CGObjCRuntime.h
index fa16c198adbc..471816cb5988 100644
--- a/lib/CodeGen/CGObjCRuntime.h
+++ b/lib/CodeGen/CGObjCRuntime.h
@@ -1,9 +1,8 @@
//===----- CGObjCRuntime.h - Interface to ObjC Runtimes ---------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -96,11 +95,10 @@ protected:
/// used to rethrow exceptions. If the begin and end catch functions are
/// NULL, then the function assumes that the EH personality function provides
/// the thrown object directly.
- void EmitTryCatchStmt(CodeGenFunction &CGF,
- const ObjCAtTryStmt &S,
- llvm::Constant *beginCatchFn,
- llvm::Constant *endCatchFn,
- llvm::Constant *exceptionRethrowFn);
+ void EmitTryCatchStmt(CodeGenFunction &CGF, const ObjCAtTryStmt &S,
+ llvm::FunctionCallee beginCatchFn,
+ llvm::FunctionCallee endCatchFn,
+ llvm::FunctionCallee exceptionRethrowFn);
void EmitInitOfCatchParam(CodeGenFunction &CGF, llvm::Value *exn,
const VarDecl *paramDecl);
@@ -110,9 +108,9 @@ protected:
/// the object. This function can be called by subclasses that use
/// zero-cost exception handling.
void EmitAtSynchronizedStmt(CodeGenFunction &CGF,
- const ObjCAtSynchronizedStmt &S,
- llvm::Function *syncEnterFn,
- llvm::Function *syncExitFn);
+ const ObjCAtSynchronizedStmt &S,
+ llvm::FunctionCallee syncEnterFn,
+ llvm::FunctionCallee syncExitFn);
public:
virtual ~CGObjCRuntime();
@@ -208,25 +206,25 @@ public:
const ObjCContainerDecl *CD) = 0;
/// Return the runtime function for getting properties.
- virtual llvm::Constant *GetPropertyGetFunction() = 0;
+ virtual llvm::FunctionCallee GetPropertyGetFunction() = 0;
/// Return the runtime function for setting properties.
- virtual llvm::Constant *GetPropertySetFunction() = 0;
+ virtual llvm::FunctionCallee GetPropertySetFunction() = 0;
/// Return the runtime function for optimized setting properties.
- virtual llvm::Constant *GetOptimizedPropertySetFunction(bool atomic,
- bool copy) = 0;
+ virtual llvm::FunctionCallee GetOptimizedPropertySetFunction(bool atomic,
+ bool copy) = 0;
// API for atomic copying of qualified aggregates in getter.
- virtual llvm::Constant *GetGetStructFunction() = 0;
+ virtual llvm::FunctionCallee GetGetStructFunction() = 0;
// API for atomic copying of qualified aggregates in setter.
- virtual llvm::Constant *GetSetStructFunction() = 0;
+ virtual llvm::FunctionCallee GetSetStructFunction() = 0;
/// API for atomic copying of qualified aggregates with non-trivial copy
/// assignment (c++) in setter.
- virtual llvm::Constant *GetCppAtomicObjectSetFunction() = 0;
+ virtual llvm::FunctionCallee GetCppAtomicObjectSetFunction() = 0;
/// API for atomic copying of qualified aggregates with non-trivial copy
/// assignment (c++) in getter.
- virtual llvm::Constant *GetCppAtomicObjectGetFunction() = 0;
+ virtual llvm::FunctionCallee GetCppAtomicObjectGetFunction() = 0;
/// GetClass - Return a reference to the class for the given
/// interface decl.
@@ -240,7 +238,7 @@ public:
/// EnumerationMutationFunction - Return the function that's called by the
/// compiler when a mutation is detected during foreach iteration.
- virtual llvm::Constant *EnumerationMutationFunction() = 0;
+ virtual llvm::FunctionCallee EnumerationMutationFunction() = 0;
virtual void EmitSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
const ObjCAtSynchronizedStmt &S) = 0;
diff --git a/lib/CodeGen/CGOpenCLRuntime.cpp b/lib/CodeGen/CGOpenCLRuntime.cpp
index 7f6f595dd5d1..191a95c62992 100644
--- a/lib/CodeGen/CGOpenCLRuntime.cpp
+++ b/lib/CodeGen/CGOpenCLRuntime.cpp
@@ -1,9 +1,8 @@
//===----- CGOpenCLRuntime.cpp - Interface to OpenCL Runtimes -------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -123,6 +122,23 @@ llvm::PointerType *CGOpenCLRuntime::getGenericVoidPointerType() {
CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic));
}
+// Get the block literal from an expression derived from the block expression.
+// OpenCL v2.0 s6.12.5:
+// Block variable declarations are implicitly qualified with const. Therefore
+// all block variables must be initialized at declaration time and may not be
+// reassigned.
+static const BlockExpr *getBlockExpr(const Expr *E) {
+ const Expr *Prev = nullptr; // to make sure we do not stuck in infinite loop.
+ while(!isa<BlockExpr>(E) && E != Prev) {
+ Prev = E;
+ E = E->IgnoreCasts();
+ if (auto DR = dyn_cast<DeclRefExpr>(E)) {
+ E = cast<VarDecl>(DR->getDecl())->getInit();
+ }
+ }
+ return cast<BlockExpr>(E);
+}
+
/// Record emitted llvm invoke function and llvm block literal for the
/// corresponding block expression.
void CGOpenCLRuntime::recordBlockInfo(const BlockExpr *E,
@@ -137,20 +153,17 @@ void CGOpenCLRuntime::recordBlockInfo(const BlockExpr *E,
EnqueuedBlockMap[E].Kernel = nullptr;
}
+llvm::Function *CGOpenCLRuntime::getInvokeFunction(const Expr *E) {
+ return EnqueuedBlockMap[getBlockExpr(E)].InvokeFunc;
+}
+
CGOpenCLRuntime::EnqueuedBlockInfo
CGOpenCLRuntime::emitOpenCLEnqueuedBlock(CodeGenFunction &CGF, const Expr *E) {
CGF.EmitScalarExpr(E);
// The block literal may be assigned to a const variable. Chasing down
// to get the block literal.
- if (auto DR = dyn_cast<DeclRefExpr>(E)) {
- E = cast<VarDecl>(DR->getDecl())->getInit();
- }
- E = E->IgnoreImplicit();
- if (auto Cast = dyn_cast<CastExpr>(E)) {
- E = Cast->getSubExpr();
- }
- auto *Block = cast<BlockExpr>(E);
+ const BlockExpr *Block = getBlockExpr(E);
assert(EnqueuedBlockMap.find(Block) != EnqueuedBlockMap.end() &&
"Block expression not emitted");
diff --git a/lib/CodeGen/CGOpenCLRuntime.h b/lib/CodeGen/CGOpenCLRuntime.h
index 750721f1b80f..3f7aa9b0d8dc 100644
--- a/lib/CodeGen/CGOpenCLRuntime.h
+++ b/lib/CodeGen/CGOpenCLRuntime.h
@@ -1,9 +1,8 @@
//===----- CGOpenCLRuntime.h - Interface to OpenCL Runtimes -----*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -92,6 +91,10 @@ public:
/// \param Block block literal emitted for the block expression.
void recordBlockInfo(const BlockExpr *E, llvm::Function *InvokeF,
llvm::Value *Block);
+
+ /// \return LLVM block invoke function emitted for an expression derived from
+ /// the block expression.
+ llvm::Function *getInvokeFunction(const Expr *E);
};
}
diff --git a/lib/CodeGen/CGOpenMPRuntime.cpp b/lib/CodeGen/CGOpenMPRuntime.cpp
index 20eb0b29f427..27e7175da841 100644
--- a/lib/CodeGen/CGOpenMPRuntime.cpp
+++ b/lib/CodeGen/CGOpenMPRuntime.cpp
@@ -1,9 +1,8 @@
//===----- CGOpenMPRuntime.cpp - Interface to OpenMP Runtimes -------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -22,7 +21,6 @@
#include "clang/Basic/BitmaskEnum.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/Bitcode/BitcodeReader.h"
-#include "llvm/IR/CallSite.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/Value.h"
@@ -432,7 +430,7 @@ public:
/// Values for bit flags used in the ident_t to describe the fields.
/// All enumeric elements are named and described in accordance with the code
-/// from http://llvm.org/svn/llvm-project/openmp/trunk/runtime/src/kmp.h
+/// from https://github.com/llvm/llvm-project/blob/master/openmp/runtime/src/kmp.h
enum OpenMPLocationFlags : unsigned {
/// Use trampoline for internal microtask.
OMP_IDENT_IMD = 0x01,
@@ -459,9 +457,35 @@ enum OpenMPLocationFlags : unsigned {
LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/OMP_IDENT_WORK_DISTRIBUTE)
};
+namespace {
+LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE();
+/// Values for bit flags for marking which requires clauses have been used.
+enum OpenMPOffloadingRequiresDirFlags : int64_t {
+ /// flag undefined.
+ OMP_REQ_UNDEFINED = 0x000,
+ /// no requires clause present.
+ OMP_REQ_NONE = 0x001,
+ /// reverse_offload clause.
+ OMP_REQ_REVERSE_OFFLOAD = 0x002,
+ /// unified_address clause.
+ OMP_REQ_UNIFIED_ADDRESS = 0x004,
+ /// unified_shared_memory clause.
+ OMP_REQ_UNIFIED_SHARED_MEMORY = 0x008,
+ /// dynamic_allocators clause.
+ OMP_REQ_DYNAMIC_ALLOCATORS = 0x010,
+ LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/OMP_REQ_DYNAMIC_ALLOCATORS)
+};
+
+enum OpenMPOffloadingReservedDeviceIDs {
+ /// Device ID if the device was not defined, runtime should get it
+ /// from environment variables in the spec.
+ OMP_DEVICEID_UNDEF = -1,
+};
+} // anonymous namespace
+
/// Describes ident structure that describes a source location.
/// All descriptions are taken from
-/// http://llvm.org/svn/llvm-project/openmp/trunk/runtime/src/kmp.h
+/// https://github.com/llvm/llvm-project/blob/master/openmp/runtime/src/kmp.h
/// Original structure:
/// typedef struct ident {
/// kmp_int32 reserved_1; /**< might be used in Fortran;
@@ -586,6 +610,11 @@ enum OpenMPRTLFunction {
// kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
// kmp_routine_entry_t *task_entry);
OMPRTL__kmpc_omp_task_alloc,
+ // Call to kmp_task_t * __kmpc_omp_target_task_alloc(ident_t *,
+ // kmp_int32 gtid, kmp_int32 flags, size_t sizeof_kmp_task_t,
+ // size_t sizeof_shareds, kmp_routine_entry_t *task_entry,
+ // kmp_int64 device_id);
+ OMPRTL__kmpc_omp_target_task_alloc,
// Call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t *
// new_task);
OMPRTL__kmpc_omp_task,
@@ -669,6 +698,10 @@ enum OpenMPRTLFunction {
// Call to void *__kmpc_task_reduction_get_th_data(int gtid, void *tg, void
// *d);
OMPRTL__kmpc_task_reduction_get_th_data,
+ // Call to void *__kmpc_alloc(int gtid, size_t sz, omp_allocator_handle_t al);
+ OMPRTL__kmpc_alloc,
+ // Call to void __kmpc_free(int gtid, void *ptr, omp_allocator_handle_t al);
+ OMPRTL__kmpc_free,
//
// Offloading related calls
@@ -677,44 +710,46 @@ enum OpenMPRTLFunction {
// size);
OMPRTL__kmpc_push_target_tripcount,
// Call to int32_t __tgt_target(int64_t device_id, void *host_ptr, int32_t
- // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
+ // arg_num, void** args_base, void **args, int64_t *arg_sizes, int64_t
// *arg_types);
OMPRTL__tgt_target,
// Call to int32_t __tgt_target_nowait(int64_t device_id, void *host_ptr,
- // int32_t arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
+ // int32_t arg_num, void** args_base, void **args, int64_t *arg_sizes, int64_t
// *arg_types);
OMPRTL__tgt_target_nowait,
// Call to int32_t __tgt_target_teams(int64_t device_id, void *host_ptr,
- // int32_t arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
+ // int32_t arg_num, void** args_base, void **args, int64_t *arg_sizes, int64_t
// *arg_types, int32_t num_teams, int32_t thread_limit);
OMPRTL__tgt_target_teams,
// Call to int32_t __tgt_target_teams_nowait(int64_t device_id, void
- // *host_ptr, int32_t arg_num, void** args_base, void **args, size_t
+ // *host_ptr, int32_t arg_num, void** args_base, void **args, int64_t
// *arg_sizes, int64_t *arg_types, int32_t num_teams, int32_t thread_limit);
OMPRTL__tgt_target_teams_nowait,
+ // Call to void __tgt_register_requires(int64_t flags);
+ OMPRTL__tgt_register_requires,
// Call to void __tgt_register_lib(__tgt_bin_desc *desc);
OMPRTL__tgt_register_lib,
// Call to void __tgt_unregister_lib(__tgt_bin_desc *desc);
OMPRTL__tgt_unregister_lib,
// Call to void __tgt_target_data_begin(int64_t device_id, int32_t arg_num,
- // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types);
+ // void** args_base, void **args, int64_t *arg_sizes, int64_t *arg_types);
OMPRTL__tgt_target_data_begin,
// Call to void __tgt_target_data_begin_nowait(int64_t device_id, int32_t
- // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
+ // arg_num, void** args_base, void **args, int64_t *arg_sizes, int64_t
// *arg_types);
OMPRTL__tgt_target_data_begin_nowait,
// Call to void __tgt_target_data_end(int64_t device_id, int32_t arg_num,
// void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types);
OMPRTL__tgt_target_data_end,
// Call to void __tgt_target_data_end_nowait(int64_t device_id, int32_t
- // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
+ // arg_num, void** args_base, void **args, int64_t *arg_sizes, int64_t
// *arg_types);
OMPRTL__tgt_target_data_end_nowait,
// Call to void __tgt_target_data_update(int64_t device_id, int32_t arg_num,
- // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types);
+ // void** args_base, void **args, int64_t *arg_sizes, int64_t *arg_types);
OMPRTL__tgt_target_data_update,
// Call to void __tgt_target_data_update_nowait(int64_t device_id, int32_t
- // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
+ // arg_num, void** args_base, void **args, int64_t *arg_sizes, int64_t
// *arg_types);
OMPRTL__tgt_target_data_update_nowait,
};
@@ -1272,9 +1307,11 @@ emitCombinerOrInitializer(CodeGenModule &CGM, QualType Ty,
auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage,
Name, &CGM.getModule());
CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FnInfo);
- Fn->removeFnAttr(llvm::Attribute::NoInline);
- Fn->removeFnAttr(llvm::Attribute::OptimizeNone);
- Fn->addFnAttr(llvm::Attribute::AlwaysInline);
+ if (CGM.getLangOpts().Optimize) {
+ Fn->removeFnAttr(llvm::Attribute::NoInline);
+ Fn->removeFnAttr(llvm::Attribute::OptimizeNone);
+ Fn->addFnAttr(llvm::Attribute::AlwaysInline);
+ }
CodeGenFunction CGF(CGM);
// Map "T omp_in;" variable to "*omp_in_parm" value in all expressions.
// Map "T omp_out;" variable to "*omp_out_parm" value in all expressions.
@@ -1340,7 +1377,7 @@ CGOpenMPRuntime::getUserDefinedReduction(const OMPDeclareReductionDecl *D) {
return UDRMap.lookup(D);
}
-static llvm::Value *emitParallelOrTeamsOutlinedFunction(
+static llvm::Function *emitParallelOrTeamsOutlinedFunction(
CodeGenModule &CGM, const OMPExecutableDirective &D, const CapturedStmt *CS,
const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind,
const StringRef OutlinedHelperName, const RegionCodeGenTy &CodeGen) {
@@ -1370,7 +1407,7 @@ static llvm::Value *emitParallelOrTeamsOutlinedFunction(
return CGF.GenerateOpenMPCapturedStmtFunction(*CS);
}
-llvm::Value *CGOpenMPRuntime::emitParallelOutlinedFunction(
+llvm::Function *CGOpenMPRuntime::emitParallelOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
const CapturedStmt *CS = D.getCapturedStmt(OMPD_parallel);
@@ -1378,7 +1415,7 @@ llvm::Value *CGOpenMPRuntime::emitParallelOutlinedFunction(
CGM, D, CS, ThreadIDVar, InnermostKind, getOutlinedHelperName(), CodeGen);
}
-llvm::Value *CGOpenMPRuntime::emitTeamsOutlinedFunction(
+llvm::Function *CGOpenMPRuntime::emitTeamsOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
const CapturedStmt *CS = D.getCapturedStmt(OMPD_teams);
@@ -1386,7 +1423,7 @@ llvm::Value *CGOpenMPRuntime::emitTeamsOutlinedFunction(
CGM, D, CS, ThreadIDVar, InnermostKind, getOutlinedHelperName(), CodeGen);
}
-llvm::Value *CGOpenMPRuntime::emitTaskOutlinedFunction(
+llvm::Function *CGOpenMPRuntime::emitTaskOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
const VarDecl *PartIDVar, const VarDecl *TaskTVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
@@ -1417,7 +1454,7 @@ llvm::Value *CGOpenMPRuntime::emitTaskOutlinedFunction(
InnermostKind,
TD ? TD->hasCancel() : false, Action);
CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
- llvm::Value *Res = CGF.GenerateCapturedStmtFunction(*CS);
+ llvm::Function *Res = CGF.GenerateCapturedStmtFunction(*CS);
if (!Tied)
NumberOfParts = Action.getNumberOfParts();
return Res;
@@ -1478,7 +1515,7 @@ Address CGOpenMPRuntime::getOrCreateDefaultLocation(unsigned Flags) {
// Initialize default location for psource field of ident_t structure of
// all ident_t objects. Format is ";file;function;line;column;;".
// Taken from
- // http://llvm.org/svn/llvm-project/openmp/trunk/runtime/src/kmp_str.c
+ // https://github.com/llvm/llvm-project/blob/master/openmp/runtime/src/kmp_str.cpp
DefaultOpenMPPSource =
CGM.GetAddrOfConstantCString(";unknown;unknown;0;0;;").getPointer();
DefaultOpenMPPSource =
@@ -1665,9 +1702,8 @@ llvm::Type *CGOpenMPRuntime::getKmpc_MicroPointerTy() {
return llvm::PointerType::getUnqual(Kmpc_MicroTy);
}
-llvm::Constant *
-CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
- llvm::Constant *RTLFn = nullptr;
+llvm::FunctionCallee CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
+ llvm::FunctionCallee RTLFn = nullptr;
switch (static_cast<OpenMPRTLFunction>(Function)) {
case OMPRTL__kmpc_fork_call: {
// Build void __kmpc_fork_call(ident_t *loc, kmp_int32 argc, kmpc_micro
@@ -1677,6 +1713,22 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ true);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_fork_call");
+ if (auto *F = dyn_cast<llvm::Function>(RTLFn.getCallee())) {
+ if (!F->hasMetadata(llvm::LLVMContext::MD_callback)) {
+ llvm::LLVMContext &Ctx = F->getContext();
+ llvm::MDBuilder MDB(Ctx);
+ // Annotate the callback behavior of the __kmpc_fork_call:
+ // - The callback callee is argument number 2 (microtask).
+ // - The first two arguments of the callback callee are unknown (-1).
+ // - All variadic arguments to the __kmpc_fork_call are passed to the
+ // callback callee.
+ F->addMetadata(
+ llvm::LLVMContext::MD_callback,
+ *llvm::MDNode::get(Ctx, {MDB.createCallbackEncoding(
+ 2, {-1, -1},
+ /* VarArgsArePassed */ true)}));
+ }
+ }
break;
}
case OMPRTL__kmpc_global_thread_num: {
@@ -1871,6 +1923,21 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task_alloc");
break;
}
+ case OMPRTL__kmpc_omp_target_task_alloc: {
+ // Build kmp_task_t *__kmpc_omp_target_task_alloc(ident_t *, kmp_int32 gtid,
+ // kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
+ // kmp_routine_entry_t *task_entry, kmp_int64 device_id);
+ assert(KmpRoutineEntryPtrTy != nullptr &&
+ "Type kmp_routine_entry_t must be created.");
+ llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty,
+ CGM.SizeTy, CGM.SizeTy, KmpRoutineEntryPtrTy,
+ CGM.Int64Ty};
+ // Return void * and then cast to particular kmp_task_t type.
+ auto *FnTy =
+ llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_target_task_alloc");
+ break;
+ }
case OMPRTL__kmpc_omp_task: {
// Build kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t
// *new_task);
@@ -2084,6 +2151,22 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ true);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_fork_teams");
+ if (auto *F = dyn_cast<llvm::Function>(RTLFn.getCallee())) {
+ if (!F->hasMetadata(llvm::LLVMContext::MD_callback)) {
+ llvm::LLVMContext &Ctx = F->getContext();
+ llvm::MDBuilder MDB(Ctx);
+ // Annotate the callback behavior of the __kmpc_fork_teams:
+ // - The callback callee is argument number 2 (microtask).
+ // - The first two arguments of the callback callee are unknown (-1).
+ // - All variadic arguments to the __kmpc_fork_teams are passed to the
+ // callback callee.
+ F->addMetadata(
+ llvm::LLVMContext::MD_callback,
+ *llvm::MDNode::get(Ctx, {MDB.createCallbackEncoding(
+ 2, {-1, -1},
+ /* VarArgsArePassed */ true)}));
+ }
+ }
break;
}
case OMPRTL__kmpc_taskloop: {
@@ -2166,6 +2249,24 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
FnTy, /*Name=*/"__kmpc_task_reduction_get_th_data");
break;
}
+ case OMPRTL__kmpc_alloc: {
+ // Build to void *__kmpc_alloc(int gtid, size_t sz, omp_allocator_handle_t
+ // al); omp_allocator_handle_t type is void *.
+ llvm::Type *TypeParams[] = {CGM.IntTy, CGM.SizeTy, CGM.VoidPtrTy};
+ auto *FnTy =
+ llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_alloc");
+ break;
+ }
+ case OMPRTL__kmpc_free: {
+ // Build to void __kmpc_free(int gtid, void *ptr, omp_allocator_handle_t
+ // al); omp_allocator_handle_t type is void *.
+ llvm::Type *TypeParams[] = {CGM.IntTy, CGM.VoidPtrTy, CGM.VoidPtrTy};
+ auto *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_free");
+ break;
+ }
case OMPRTL__kmpc_push_target_tripcount: {
// Build void __kmpc_push_target_tripcount(int64_t device_id, kmp_uint64
// size);
@@ -2177,14 +2278,14 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
}
case OMPRTL__tgt_target: {
// Build int32_t __tgt_target(int64_t device_id, void *host_ptr, int32_t
- // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
+ // arg_num, void** args_base, void **args, int64_t *arg_sizes, int64_t
// *arg_types);
llvm::Type *TypeParams[] = {CGM.Int64Ty,
CGM.VoidPtrTy,
CGM.Int32Ty,
CGM.VoidPtrPtrTy,
CGM.VoidPtrPtrTy,
- CGM.SizeTy->getPointerTo(),
+ CGM.Int64Ty->getPointerTo(),
CGM.Int64Ty->getPointerTo()};
auto *FnTy =
llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
@@ -2193,14 +2294,14 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
}
case OMPRTL__tgt_target_nowait: {
// Build int32_t __tgt_target_nowait(int64_t device_id, void *host_ptr,
- // int32_t arg_num, void** args_base, void **args, size_t *arg_sizes,
+ // int32_t arg_num, void** args_base, void **args, int64_t *arg_sizes,
// int64_t *arg_types);
llvm::Type *TypeParams[] = {CGM.Int64Ty,
CGM.VoidPtrTy,
CGM.Int32Ty,
CGM.VoidPtrPtrTy,
CGM.VoidPtrPtrTy,
- CGM.SizeTy->getPointerTo(),
+ CGM.Int64Ty->getPointerTo(),
CGM.Int64Ty->getPointerTo()};
auto *FnTy =
llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
@@ -2209,14 +2310,14 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
}
case OMPRTL__tgt_target_teams: {
// Build int32_t __tgt_target_teams(int64_t device_id, void *host_ptr,
- // int32_t arg_num, void** args_base, void **args, size_t *arg_sizes,
+ // int32_t arg_num, void** args_base, void **args, int64_t *arg_sizes,
// int64_t *arg_types, int32_t num_teams, int32_t thread_limit);
llvm::Type *TypeParams[] = {CGM.Int64Ty,
CGM.VoidPtrTy,
CGM.Int32Ty,
CGM.VoidPtrPtrTy,
CGM.VoidPtrPtrTy,
- CGM.SizeTy->getPointerTo(),
+ CGM.Int64Ty->getPointerTo(),
CGM.Int64Ty->getPointerTo(),
CGM.Int32Ty,
CGM.Int32Ty};
@@ -2227,14 +2328,14 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
}
case OMPRTL__tgt_target_teams_nowait: {
// Build int32_t __tgt_target_teams_nowait(int64_t device_id, void
- // *host_ptr, int32_t arg_num, void** args_base, void **args, size_t
+ // *host_ptr, int32_t arg_num, void** args_base, void **args, int64_t
// *arg_sizes, int64_t *arg_types, int32_t num_teams, int32_t thread_limit);
llvm::Type *TypeParams[] = {CGM.Int64Ty,
CGM.VoidPtrTy,
CGM.Int32Ty,
CGM.VoidPtrPtrTy,
CGM.VoidPtrPtrTy,
- CGM.SizeTy->getPointerTo(),
+ CGM.Int64Ty->getPointerTo(),
CGM.Int64Ty->getPointerTo(),
CGM.Int32Ty,
CGM.Int32Ty};
@@ -2243,6 +2344,14 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_teams_nowait");
break;
}
+ case OMPRTL__tgt_register_requires: {
+ // Build void __tgt_register_requires(int64_t flags);
+ llvm::Type *TypeParams[] = {CGM.Int64Ty};
+ auto *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_register_requires");
+ break;
+ }
case OMPRTL__tgt_register_lib: {
// Build void __tgt_register_lib(__tgt_bin_desc *desc);
QualType ParamTy =
@@ -2265,12 +2374,12 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
}
case OMPRTL__tgt_target_data_begin: {
// Build void __tgt_target_data_begin(int64_t device_id, int32_t arg_num,
- // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types);
+ // void** args_base, void **args, int64_t *arg_sizes, int64_t *arg_types);
llvm::Type *TypeParams[] = {CGM.Int64Ty,
CGM.Int32Ty,
CGM.VoidPtrPtrTy,
CGM.VoidPtrPtrTy,
- CGM.SizeTy->getPointerTo(),
+ CGM.Int64Ty->getPointerTo(),
CGM.Int64Ty->getPointerTo()};
auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
@@ -2279,13 +2388,13 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
}
case OMPRTL__tgt_target_data_begin_nowait: {
// Build void __tgt_target_data_begin_nowait(int64_t device_id, int32_t
- // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
+ // arg_num, void** args_base, void **args, int64_t *arg_sizes, int64_t
// *arg_types);
llvm::Type *TypeParams[] = {CGM.Int64Ty,
CGM.Int32Ty,
CGM.VoidPtrPtrTy,
CGM.VoidPtrPtrTy,
- CGM.SizeTy->getPointerTo(),
+ CGM.Int64Ty->getPointerTo(),
CGM.Int64Ty->getPointerTo()};
auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
@@ -2294,12 +2403,12 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
}
case OMPRTL__tgt_target_data_end: {
// Build void __tgt_target_data_end(int64_t device_id, int32_t arg_num,
- // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types);
+ // void** args_base, void **args, int64_t *arg_sizes, int64_t *arg_types);
llvm::Type *TypeParams[] = {CGM.Int64Ty,
CGM.Int32Ty,
CGM.VoidPtrPtrTy,
CGM.VoidPtrPtrTy,
- CGM.SizeTy->getPointerTo(),
+ CGM.Int64Ty->getPointerTo(),
CGM.Int64Ty->getPointerTo()};
auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
@@ -2308,13 +2417,13 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
}
case OMPRTL__tgt_target_data_end_nowait: {
// Build void __tgt_target_data_end_nowait(int64_t device_id, int32_t
- // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
+ // arg_num, void** args_base, void **args, int64_t *arg_sizes, int64_t
// *arg_types);
llvm::Type *TypeParams[] = {CGM.Int64Ty,
CGM.Int32Ty,
CGM.VoidPtrPtrTy,
CGM.VoidPtrPtrTy,
- CGM.SizeTy->getPointerTo(),
+ CGM.Int64Ty->getPointerTo(),
CGM.Int64Ty->getPointerTo()};
auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
@@ -2323,12 +2432,12 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
}
case OMPRTL__tgt_target_data_update: {
// Build void __tgt_target_data_update(int64_t device_id, int32_t arg_num,
- // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types);
+ // void** args_base, void **args, int64_t *arg_sizes, int64_t *arg_types);
llvm::Type *TypeParams[] = {CGM.Int64Ty,
CGM.Int32Ty,
CGM.VoidPtrPtrTy,
CGM.VoidPtrPtrTy,
- CGM.SizeTy->getPointerTo(),
+ CGM.Int64Ty->getPointerTo(),
CGM.Int64Ty->getPointerTo()};
auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
@@ -2337,13 +2446,13 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
}
case OMPRTL__tgt_target_data_update_nowait: {
// Build void __tgt_target_data_update_nowait(int64_t device_id, int32_t
- // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
+ // arg_num, void** args_base, void **args, int64_t *arg_sizes, int64_t
// *arg_types);
llvm::Type *TypeParams[] = {CGM.Int64Ty,
CGM.Int32Ty,
CGM.VoidPtrPtrTy,
CGM.VoidPtrPtrTy,
- CGM.SizeTy->getPointerTo(),
+ CGM.Int64Ty->getPointerTo(),
CGM.Int64Ty->getPointerTo()};
auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
@@ -2355,8 +2464,8 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
return RTLFn;
}
-llvm::Constant *CGOpenMPRuntime::createForStaticInitFunction(unsigned IVSize,
- bool IVSigned) {
+llvm::FunctionCallee
+CGOpenMPRuntime::createForStaticInitFunction(unsigned IVSize, bool IVSigned) {
assert((IVSize == 32 || IVSize == 64) &&
"IV size is not compatible with the omp runtime");
StringRef Name = IVSize == 32 ? (IVSigned ? "__kmpc_for_static_init_4"
@@ -2381,8 +2490,8 @@ llvm::Constant *CGOpenMPRuntime::createForStaticInitFunction(unsigned IVSize,
return CGM.CreateRuntimeFunction(FnTy, Name);
}
-llvm::Constant *CGOpenMPRuntime::createDispatchInitFunction(unsigned IVSize,
- bool IVSigned) {
+llvm::FunctionCallee
+CGOpenMPRuntime::createDispatchInitFunction(unsigned IVSize, bool IVSigned) {
assert((IVSize == 32 || IVSize == 64) &&
"IV size is not compatible with the omp runtime");
StringRef Name =
@@ -2403,8 +2512,8 @@ llvm::Constant *CGOpenMPRuntime::createDispatchInitFunction(unsigned IVSize,
return CGM.CreateRuntimeFunction(FnTy, Name);
}
-llvm::Constant *CGOpenMPRuntime::createDispatchFiniFunction(unsigned IVSize,
- bool IVSigned) {
+llvm::FunctionCallee
+CGOpenMPRuntime::createDispatchFiniFunction(unsigned IVSize, bool IVSigned) {
assert((IVSize == 32 || IVSize == 64) &&
"IV size is not compatible with the omp runtime");
StringRef Name =
@@ -2420,8 +2529,8 @@ llvm::Constant *CGOpenMPRuntime::createDispatchFiniFunction(unsigned IVSize,
return CGM.CreateRuntimeFunction(FnTy, Name);
}
-llvm::Constant *CGOpenMPRuntime::createDispatchNextFunction(unsigned IVSize,
- bool IVSigned) {
+llvm::FunctionCallee
+CGOpenMPRuntime::createDispatchNextFunction(unsigned IVSize, bool IVSigned) {
assert((IVSize == 32 || IVSize == 64) &&
"IV size is not compatible with the omp runtime");
StringRef Name =
@@ -2443,16 +2552,18 @@ llvm::Constant *CGOpenMPRuntime::createDispatchNextFunction(unsigned IVSize,
return CGM.CreateRuntimeFunction(FnTy, Name);
}
-Address CGOpenMPRuntime::getAddrOfDeclareTargetLink(const VarDecl *VD) {
+Address CGOpenMPRuntime::getAddrOfDeclareTargetVar(const VarDecl *VD) {
if (CGM.getLangOpts().OpenMPSimd)
return Address::invalid();
llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
- if (Res && *Res == OMPDeclareTargetDeclAttr::MT_Link) {
+ if (Res && (*Res == OMPDeclareTargetDeclAttr::MT_Link ||
+ (*Res == OMPDeclareTargetDeclAttr::MT_To &&
+ HasRequiresUnifiedSharedMemory))) {
SmallString<64> PtrName;
{
llvm::raw_svector_ostream OS(PtrName);
- OS << CGM.getMangledName(GlobalDecl(VD)) << "_decl_tgt_link_ptr";
+ OS << CGM.getMangledName(GlobalDecl(VD)) << "_decl_tgt_ref_ptr";
}
llvm::Value *Ptr = CGM.getModule().getNamedValue(PtrName);
if (!Ptr) {
@@ -2669,7 +2780,9 @@ bool CGOpenMPRuntime::emitDeclareTargetVarDefinition(const VarDecl *VD,
bool PerformInit) {
Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
- if (!Res || *Res == OMPDeclareTargetDeclAttr::MT_Link)
+ if (!Res || *Res == OMPDeclareTargetDeclAttr::MT_Link ||
+ (*Res == OMPDeclareTargetDeclAttr::MT_To &&
+ HasRequiresUnifiedSharedMemory))
return CGM.getLangOpts().OpenMPIsDevice;
VD = VD->getDefinition(CGM.getContext());
if (VD && !DeclareTargetWithDefinition.insert(CGM.getMangledName(VD)).second)
@@ -2785,7 +2898,7 @@ Address CGOpenMPRuntime::getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF,
getThreadID(CGF, SourceLocation()),
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(GAddr, CGM.VoidPtrTy),
CGF.Builder.CreateIntCast(CGF.getTypeSize(VarType), CGM.SizeTy,
- /*IsSigned=*/false),
+ /*isSigned=*/false),
getOrCreateInternalVariable(
CGM.VoidPtrPtrTy, Twine(Name).concat(Suffix).concat(CacheSuffix))};
return Address(
@@ -2836,7 +2949,7 @@ void CGOpenMPRuntime::emitOMPIfClause(CodeGenFunction &CGF, const Expr *Cond,
}
void CGOpenMPRuntime::emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
- llvm::Value *OutlinedFn,
+ llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars,
const Expr *IfCond) {
if (!CGF.HaveInsertPoint())
@@ -2854,7 +2967,8 @@ void CGOpenMPRuntime::emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
RealArgs.append(std::begin(Args), std::end(Args));
RealArgs.append(CapturedVars.begin(), CapturedVars.end());
- llvm::Value *RTLFn = RT.createRuntimeFunction(OMPRTL__kmpc_fork_call);
+ llvm::FunctionCallee RTLFn =
+ RT.createRuntimeFunction(OMPRTL__kmpc_fork_call);
CGF.EmitRuntimeCall(RTLFn, RealArgs);
};
auto &&ElseGen = [OutlinedFn, CapturedVars, RTLoc, Loc](CodeGenFunction &CGF,
@@ -2915,9 +3029,8 @@ Address CGOpenMPRuntime::emitThreadIDAddress(CodeGenFunction &CGF,
return ThreadIDTemp;
}
-llvm::Constant *
-CGOpenMPRuntime::getOrCreateInternalVariable(llvm::Type *Ty,
- const llvm::Twine &Name) {
+llvm::Constant *CGOpenMPRuntime::getOrCreateInternalVariable(
+ llvm::Type *Ty, const llvm::Twine &Name, unsigned AddressSpace) {
SmallString<256> Buffer;
llvm::raw_svector_ostream Out(Buffer);
Out << Name;
@@ -2932,7 +3045,8 @@ CGOpenMPRuntime::getOrCreateInternalVariable(llvm::Type *Ty,
return Elem.second = new llvm::GlobalVariable(
CGM.getModule(), Ty, /*IsConstant*/ false,
llvm::GlobalValue::CommonLinkage, llvm::Constant::getNullValue(Ty),
- Elem.first());
+ Elem.first(), /*InsertBefore=*/nullptr,
+ llvm::GlobalValue::NotThreadLocal, AddressSpace);
}
llvm::Value *CGOpenMPRuntime::getCriticalRegionLock(StringRef CriticalName) {
@@ -2944,17 +3058,18 @@ llvm::Value *CGOpenMPRuntime::getCriticalRegionLock(StringRef CriticalName) {
namespace {
/// Common pre(post)-action for different OpenMP constructs.
class CommonActionTy final : public PrePostActionTy {
- llvm::Value *EnterCallee;
+ llvm::FunctionCallee EnterCallee;
ArrayRef<llvm::Value *> EnterArgs;
- llvm::Value *ExitCallee;
+ llvm::FunctionCallee ExitCallee;
ArrayRef<llvm::Value *> ExitArgs;
bool Conditional;
llvm::BasicBlock *ContBlock = nullptr;
public:
- CommonActionTy(llvm::Value *EnterCallee, ArrayRef<llvm::Value *> EnterArgs,
- llvm::Value *ExitCallee, ArrayRef<llvm::Value *> ExitArgs,
- bool Conditional = false)
+ CommonActionTy(llvm::FunctionCallee EnterCallee,
+ ArrayRef<llvm::Value *> EnterArgs,
+ llvm::FunctionCallee ExitCallee,
+ ArrayRef<llvm::Value *> ExitArgs, bool Conditional = false)
: EnterCallee(EnterCallee), EnterArgs(EnterArgs), ExitCallee(ExitCallee),
ExitArgs(ExitArgs), Conditional(Conditional) {}
void Enter(CodeGenFunction &CGF) override {
@@ -3059,8 +3174,7 @@ void CGOpenMPRuntime::emitTaskgroupRegion(CodeGenFunction &CGF,
static Address emitAddrOfVarFromArray(CodeGenFunction &CGF, Address Array,
unsigned Index, const VarDecl *Var) {
// Pull out the pointer to the variable.
- Address PtrAddr =
- CGF.Builder.CreateConstArrayGEP(Array, Index, CGF.getPointerSize());
+ Address PtrAddr = CGF.Builder.CreateConstArrayGEP(Array, Index);
llvm::Value *Ptr = CGF.Builder.CreateLoad(PtrAddr);
Address Addr = Address(Ptr, CGF.getContext().getDeclAlign(Var));
@@ -3176,8 +3290,7 @@ void CGOpenMPRuntime::emitSingleRegion(CodeGenFunction &CGF,
Address CopyprivateList =
CGF.CreateMemTemp(CopyprivateArrayTy, ".omp.copyprivate.cpr_list");
for (unsigned I = 0, E = CopyprivateVars.size(); I < E; ++I) {
- Address Elem = CGF.Builder.CreateConstArrayGEP(
- CopyprivateList, I, CGF.getPointerSize());
+ Address Elem = CGF.Builder.CreateConstArrayGEP(CopyprivateList, I);
CGF.Builder.CreateStore(
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
CGF.EmitLValue(CopyprivateVars[I]).getPointer(), CGF.VoidPtrTy),
@@ -3241,6 +3354,24 @@ unsigned CGOpenMPRuntime::getDefaultFlagsForBarriers(OpenMPDirectiveKind Kind) {
return Flags;
}
+void CGOpenMPRuntime::getDefaultScheduleAndChunk(
+ CodeGenFunction &CGF, const OMPLoopDirective &S,
+ OpenMPScheduleClauseKind &ScheduleKind, const Expr *&ChunkExpr) const {
+ // Check if the loop directive is actually a doacross loop directive. In this
+ // case choose static, 1 schedule.
+ if (llvm::any_of(
+ S.getClausesOfKind<OMPOrderedClause>(),
+ [](const OMPOrderedClause *C) { return C->getNumForLoops(); })) {
+ ScheduleKind = OMPC_SCHEDULE_static;
+ // Chunk size is 1 in this case.
+ llvm::APInt ChunkSize(32, 1);
+ ChunkExpr = IntegerLiteral::Create(
+ CGF.getContext(), ChunkSize,
+ CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/0),
+ SourceLocation());
+ }
+}
+
void CGOpenMPRuntime::emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind Kind, bool EmitChecks,
bool ForceSimpleCall) {
@@ -3412,7 +3543,7 @@ void CGOpenMPRuntime::emitForDispatchInit(
static void emitForStaticInitCall(
CodeGenFunction &CGF, llvm::Value *UpdateLocation, llvm::Value *ThreadId,
- llvm::Constant *ForStaticInitFunction, OpenMPSchedType Schedule,
+ llvm::FunctionCallee ForStaticInitFunction, OpenMPSchedType Schedule,
OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
const CGOpenMPRuntime::StaticRTInput &Values) {
if (!CGF.HaveInsertPoint())
@@ -3473,7 +3604,7 @@ void CGOpenMPRuntime::emitForStaticInit(CodeGenFunction &CGF,
? OMP_IDENT_WORK_LOOP
: OMP_IDENT_WORK_SECTIONS);
llvm::Value *ThreadId = getThreadID(CGF, Loc);
- llvm::Constant *StaticInitFunction =
+ llvm::FunctionCallee StaticInitFunction =
createForStaticInitFunction(Values.IVSize, Values.IVSigned);
emitForStaticInitCall(CGF, UpdatedLocation, ThreadId, StaticInitFunction,
ScheduleNum, ScheduleKind.M1, ScheduleKind.M2, Values);
@@ -3488,7 +3619,7 @@ void CGOpenMPRuntime::emitDistributeStaticInit(
llvm::Value *UpdatedLocation =
emitUpdateLocation(CGF, Loc, OMP_IDENT_WORK_DISTRIBUTE);
llvm::Value *ThreadId = getThreadID(CGF, Loc);
- llvm::Constant *StaticInitFunction =
+ llvm::FunctionCallee StaticInitFunction =
createForStaticInitFunction(Values.IVSize, Values.IVSigned);
emitForStaticInitCall(CGF, UpdatedLocation, ThreadId, StaticInitFunction,
ScheduleNum, OMPC_SCHEDULE_MODIFIER_unknown,
@@ -3731,14 +3862,29 @@ void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
"Entry not initialized!");
assert((!Entry.getAddress() || Entry.getAddress() == Addr) &&
"Resetting with the new address.");
- if (Entry.getAddress() && hasDeviceGlobalVarEntryInfo(VarName))
+ if (Entry.getAddress() && hasDeviceGlobalVarEntryInfo(VarName)) {
+ if (Entry.getVarSize().isZero()) {
+ Entry.setVarSize(VarSize);
+ Entry.setLinkage(Linkage);
+ }
return;
- Entry.setAddress(Addr);
+ }
Entry.setVarSize(VarSize);
Entry.setLinkage(Linkage);
+ Entry.setAddress(Addr);
} else {
- if (hasDeviceGlobalVarEntryInfo(VarName))
+ if (hasDeviceGlobalVarEntryInfo(VarName)) {
+ auto &Entry = OffloadEntriesDeviceGlobalVar[VarName];
+ assert(Entry.isValid() && Entry.getFlags() == Flags &&
+ "Entry not initialized!");
+ assert((!Entry.getAddress() || Entry.getAddress() == Addr) &&
+ "Resetting with the new address.");
+ if (Entry.getVarSize().isZero()) {
+ Entry.setVarSize(VarSize);
+ Entry.setLinkage(Linkage);
+ }
return;
+ }
OffloadEntriesDeviceGlobalVar.try_emplace(
VarName, OffloadingEntriesNum, Addr, VarSize, Flags, Linkage);
++OffloadingEntriesNum;
@@ -4052,6 +4198,9 @@ void CGOpenMPRuntime::createOffloadEntriesAndInfoMetadata() {
CE->getFlags());
switch (Flags) {
case OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryTo: {
+ if (CGM.getLangOpts().OpenMPIsDevice &&
+ CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory())
+ continue;
if (!CE->getAddress()) {
unsigned DiagID = CGM.getDiags().getCustomDiagID(
DiagnosticsEngine::Error,
@@ -4364,12 +4513,12 @@ createKmpTaskTWithPrivatesRecordDecl(CodeGenModule &CGM, QualType KmpTaskTQTy,
/// return 0;
/// }
/// \endcode
-static llvm::Value *
+static llvm::Function *
emitProxyTaskFunction(CodeGenModule &CGM, SourceLocation Loc,
OpenMPDirectiveKind Kind, QualType KmpInt32Ty,
QualType KmpTaskTWithPrivatesPtrQTy,
QualType KmpTaskTWithPrivatesQTy, QualType KmpTaskTQTy,
- QualType SharedsPtrTy, llvm::Value *TaskFunction,
+ QualType SharedsPtrTy, llvm::Function *TaskFunction,
llvm::Value *TaskPrivatesMap) {
ASTContext &C = CGM.getContext();
FunctionArgList Args;
@@ -4587,9 +4736,11 @@ emitTaskPrivateMappingFunction(CodeGenModule &CGM, SourceLocation Loc,
&CGM.getModule());
CGM.SetInternalFunctionAttributes(GlobalDecl(), TaskPrivatesMap,
TaskPrivatesMapFnInfo);
- TaskPrivatesMap->removeFnAttr(llvm::Attribute::NoInline);
- TaskPrivatesMap->removeFnAttr(llvm::Attribute::OptimizeNone);
- TaskPrivatesMap->addFnAttr(llvm::Attribute::AlwaysInline);
+ if (CGM.getLangOpts().Optimize) {
+ TaskPrivatesMap->removeFnAttr(llvm::Attribute::NoInline);
+ TaskPrivatesMap->removeFnAttr(llvm::Attribute::OptimizeNone);
+ TaskPrivatesMap->addFnAttr(llvm::Attribute::AlwaysInline);
+ }
CodeGenFunction CGF(CGM);
CGF.StartFunction(GlobalDecl(), C.VoidTy, TaskPrivatesMap,
TaskPrivatesMapFnInfo, Args, Loc, Loc);
@@ -4614,11 +4765,6 @@ emitTaskPrivateMappingFunction(CodeGenModule &CGM, SourceLocation Loc,
return TaskPrivatesMap;
}
-static bool stable_sort_comparator(const PrivateDataTy P1,
- const PrivateDataTy P2) {
- return P1.first > P2.first;
-}
-
/// Emit initialization for private variables in task-based directives.
static void emitPrivatesInit(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
@@ -4661,7 +4807,7 @@ static void emitPrivatesInit(CodeGenFunction &CGF,
// Check if the variable is the target-based BasePointersArray,
// PointersArray or SizesArray.
LValue SharedRefLValue;
- QualType Type = OriginalVD->getType();
+ QualType Type = PrivateLValue.getType();
const FieldDecl *SharedField = CapturesInfo.lookup(OriginalVD);
if (IsTargetTask && !SharedField) {
assert(isa<ImplicitParamDecl>(OriginalVD) &&
@@ -4837,7 +4983,7 @@ checkDestructorsRequired(const RecordDecl *KmpTaskTWithPrivatesQTyRD) {
CGOpenMPRuntime::TaskResultTy
CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
const OMPExecutableDirective &D,
- llvm::Value *TaskFunction, QualType SharedsTy,
+ llvm::Function *TaskFunction, QualType SharedsTy,
Address Shareds, const OMPTaskDataTy &Data) {
ASTContext &C = CGM.getContext();
llvm::SmallVector<PrivateDataTy, 4> Privates;
@@ -4872,7 +5018,9 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
/*PrivateElemInit=*/nullptr));
++I;
}
- std::stable_sort(Privates.begin(), Privates.end(), stable_sort_comparator);
+ llvm::stable_sort(Privates, [](PrivateDataTy L, PrivateDataTy R) {
+ return L.first > R.first;
+ });
QualType KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
// Build type kmp_routine_entry_t (if not built yet).
emitKmpRoutineEntryT(KmpInt32Ty);
@@ -4911,7 +5059,7 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
// Emit initial values for private copies (if any).
llvm::Value *TaskPrivatesMap = nullptr;
llvm::Type *TaskPrivatesMapTy =
- std::next(cast<llvm::Function>(TaskFunction)->arg_begin(), 3)->getType();
+ std::next(TaskFunction->arg_begin(), 3)->getType();
if (!Privates.empty()) {
auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());
TaskPrivatesMap = emitTaskPrivateMappingFunction(
@@ -4925,7 +5073,7 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
}
// Build a proxy function kmp_int32 .omp_task_entry.(kmp_int32 gtid,
// kmp_task_t *tt);
- llvm::Value *TaskEntry = emitProxyTaskFunction(
+ llvm::Function *TaskEntry = emitProxyTaskFunction(
CGM, Loc, D.getDirectiveKind(), KmpInt32Ty, KmpTaskTWithPrivatesPtrQTy,
KmpTaskTWithPrivatesQTy, KmpTaskTQTy, SharedsPtrTy, TaskFunction,
TaskPrivatesMap);
@@ -4934,7 +5082,7 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
// kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
// kmp_routine_entry_t *task_entry);
// Task flags. Format is taken from
- // http://llvm.org/svn/llvm-project/openmp/trunk/runtime/src/kmp.h,
+ // https://github.com/llvm/llvm-project/blob/master/openmp/runtime/src/kmp.h,
// description of kmp_tasking_flags struct.
enum {
TiedFlag = 0x1,
@@ -4959,13 +5107,30 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
: CGF.Builder.getInt32(Data.Final.getInt() ? FinalFlag : 0);
TaskFlags = CGF.Builder.CreateOr(TaskFlags, CGF.Builder.getInt32(Flags));
llvm::Value *SharedsSize = CGM.getSize(C.getTypeSizeInChars(SharedsTy));
- llvm::Value *AllocArgs[] = {emitUpdateLocation(CGF, Loc),
- getThreadID(CGF, Loc), TaskFlags,
- KmpTaskTWithPrivatesTySize, SharedsSize,
- CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- TaskEntry, KmpRoutineEntryPtrTy)};
- llvm::Value *NewTask = CGF.EmitRuntimeCall(
+ SmallVector<llvm::Value *, 8> AllocArgs = {emitUpdateLocation(CGF, Loc),
+ getThreadID(CGF, Loc), TaskFlags, KmpTaskTWithPrivatesTySize,
+ SharedsSize, CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ TaskEntry, KmpRoutineEntryPtrTy)};
+ llvm::Value *NewTask;
+ if (D.hasClausesOfKind<OMPNowaitClause>()) {
+ // Check if we have any device clause associated with the directive.
+ const Expr *Device = nullptr;
+ if (auto *C = D.getSingleClause<OMPDeviceClause>())
+ Device = C->getDevice();
+ // Emit device ID if any otherwise use default value.
+ llvm::Value *DeviceID;
+ if (Device)
+ DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device),
+ CGF.Int64Ty, /*isSigned=*/true);
+ else
+ DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
+ AllocArgs.push_back(DeviceID);
+ NewTask = CGF.EmitRuntimeCall(
+ createRuntimeFunction(OMPRTL__kmpc_omp_target_task_alloc), AllocArgs);
+ } else {
+ NewTask = CGF.EmitRuntimeCall(
createRuntimeFunction(OMPRTL__kmpc_omp_task_alloc), AllocArgs);
+ }
llvm::Value *NewTaskNewTaskTTy =
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
NewTask, KmpTaskTWithPrivatesPtrTy);
@@ -5037,7 +5202,7 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
void CGOpenMPRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
const OMPExecutableDirective &D,
- llvm::Value *TaskFunction,
+ llvm::Function *TaskFunction,
QualType SharedsTy, Address Shareds,
const Expr *IfCond,
const OMPTaskDataTy &Data) {
@@ -5047,7 +5212,7 @@ void CGOpenMPRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
TaskResultTy Result =
emitTaskInit(CGF, Loc, D, TaskFunction, SharedsTy, Shareds, Data);
llvm::Value *NewTask = Result.NewTask;
- llvm::Value *TaskEntry = Result.TaskEntry;
+ llvm::Function *TaskEntry = Result.TaskEntry;
llvm::Value *NewTaskNewTaskTTy = Result.NewTaskNewTaskTTy;
LValue TDBase = Result.TDBase;
const RecordDecl *KmpTaskTQTyRD = Result.KmpTaskTQTyRD;
@@ -5057,7 +5222,7 @@ void CGOpenMPRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
unsigned NumDependencies = Data.Dependences.size();
if (NumDependencies) {
// Dependence kind for RTL.
- enum RTLDependenceKindTy { DepIn = 0x01, DepInOut = 0x3 };
+ enum RTLDependenceKindTy { DepIn = 0x01, DepInOut = 0x3, DepMutexInOutSet = 0x4 };
enum RTLDependInfoFieldsTy { BaseAddr, Len, Flags };
RecordDecl *KmpDependInfoRD;
QualType FlagsTy =
@@ -5074,7 +5239,6 @@ void CGOpenMPRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
} else {
KmpDependInfoRD = cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
}
- CharUnits DependencySize = C.getTypeSizeInChars(KmpDependInfoTy);
// Define type kmp_depend_info[<Dependences.size()>];
QualType KmpDependInfoArrayTy = C.getConstantArrayType(
KmpDependInfoTy, llvm::APInt(/*numBits=*/64, NumDependencies),
@@ -5090,7 +5254,7 @@ void CGOpenMPRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
if (const auto *ASE =
dyn_cast<OMPArraySectionExpr>(E->IgnoreParenImpCasts())) {
LValue UpAddrLVal =
- CGF.EmitOMPArraySectionExpr(ASE, /*LowerBound=*/false);
+ CGF.EmitOMPArraySectionExpr(ASE, /*IsLowerBound=*/false);
llvm::Value *UpAddr =
CGF.Builder.CreateConstGEP1_32(UpAddrLVal.getPointer(), /*Idx0=*/1);
llvm::Value *LowIntPtr =
@@ -5101,7 +5265,7 @@ void CGOpenMPRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
Size = CGF.getTypeSize(Ty);
}
LValue Base = CGF.MakeAddrLValue(
- CGF.Builder.CreateConstArrayGEP(DependenciesArray, I, DependencySize),
+ CGF.Builder.CreateConstArrayGEP(DependenciesArray, I),
KmpDependInfoTy);
// deps[i].base_addr = &<Dependences[i].second>;
LValue BaseAddrLVal = CGF.EmitLValueForField(
@@ -5124,6 +5288,9 @@ void CGOpenMPRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
case OMPC_DEPEND_inout:
DepKind = DepInOut;
break;
+ case OMPC_DEPEND_mutexinoutset:
+ DepKind = DepMutexInOutSet;
+ break;
case OMPC_DEPEND_source:
case OMPC_DEPEND_sink:
case OMPC_DEPEND_unknown:
@@ -5135,8 +5302,7 @@ void CGOpenMPRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
FlagsLVal);
}
DependenciesArray = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- CGF.Builder.CreateStructGEP(DependenciesArray, 0, CharUnits::Zero()),
- CGF.VoidPtrTy);
+ CGF.Builder.CreateConstArrayGEP(DependenciesArray, 0), CGF.VoidPtrTy);
}
// NOTE: routine and part_id fields are initialized by __kmpc_omp_task_alloc()
@@ -5231,7 +5397,7 @@ void CGOpenMPRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
void CGOpenMPRuntime::emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc,
const OMPLoopDirective &D,
- llvm::Value *TaskFunction,
+ llvm::Function *TaskFunction,
QualType SharedsTy, Address Shareds,
const Expr *IfCond,
const OMPTaskDataTy &Data) {
@@ -5411,10 +5577,10 @@ static void emitReductionCombiner(CodeGenFunction &CGF,
CGF.EmitIgnoredExpr(ReductionOp);
}
-llvm::Value *CGOpenMPRuntime::emitReductionFunction(
- CodeGenModule &CGM, SourceLocation Loc, llvm::Type *ArgsType,
- ArrayRef<const Expr *> Privates, ArrayRef<const Expr *> LHSExprs,
- ArrayRef<const Expr *> RHSExprs, ArrayRef<const Expr *> ReductionOps) {
+llvm::Function *CGOpenMPRuntime::emitReductionFunction(
+ SourceLocation Loc, llvm::Type *ArgsType, ArrayRef<const Expr *> Privates,
+ ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs,
+ ArrayRef<const Expr *> ReductionOps) {
ASTContext &C = CGM.getContext();
// void reduction_func(void *LHSArg, void *RHSArg);
@@ -5466,8 +5632,7 @@ llvm::Value *CGOpenMPRuntime::emitReductionFunction(
if (PrivTy->isVariablyModifiedType()) {
// Get array size and emit VLA type.
++Idx;
- Address Elem =
- CGF.Builder.CreateConstArrayGEP(LHS, Idx, CGF.getPointerSize());
+ Address Elem = CGF.Builder.CreateConstArrayGEP(LHS, Idx);
llvm::Value *Ptr = CGF.Builder.CreateLoad(Elem);
const VariableArrayType *VLA =
CGF.getContext().getAsVariableArrayType(PrivTy);
@@ -5605,8 +5770,7 @@ void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
auto IPriv = Privates.begin();
unsigned Idx = 0;
for (unsigned I = 0, E = RHSExprs.size(); I < E; ++I, ++IPriv, ++Idx) {
- Address Elem =
- CGF.Builder.CreateConstArrayGEP(ReductionList, Idx, CGF.getPointerSize());
+ Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
CGF.Builder.CreateStore(
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
CGF.EmitLValue(RHSExprs[I]).getPointer(), CGF.VoidPtrTy),
@@ -5614,8 +5778,7 @@ void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
if ((*IPriv)->getType()->isVariablyModifiedType()) {
// Store array size.
++Idx;
- Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx,
- CGF.getPointerSize());
+ Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
llvm::Value *Size = CGF.Builder.CreateIntCast(
CGF.getVLASize(
CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
@@ -5627,9 +5790,9 @@ void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
}
// 2. Emit reduce_func().
- llvm::Value *ReductionFn = emitReductionFunction(
- CGM, Loc, CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo(),
- Privates, LHSExprs, RHSExprs, ReductionOps);
+ llvm::Function *ReductionFn = emitReductionFunction(
+ Loc, CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo(), Privates,
+ LHSExprs, RHSExprs, ReductionOps);
// 3. Create static kmp_critical_name lock = { 0 };
std::string Name = getName({"reduction"});
@@ -6130,7 +6293,7 @@ llvm::Value *CGOpenMPRuntime::emitTaskReductionInit(
LValue FlagsLVal = CGF.EmitLValueForField(ElemLVal, FlagsFD);
if (DelayedCreation) {
CGF.EmitStoreOfScalar(
- llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/1, /*IsSigned=*/true),
+ llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/1, /*isSigned=*/true),
FlagsLVal);
} else
CGF.EmitNullInitialization(FlagsLVal.getAddress(), FlagsLVal.getType());
@@ -6322,6 +6485,7 @@ void CGOpenMPRuntime::emitTargetOutlinedFunction(
llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) {
assert(!ParentName.empty() && "Invalid target region parent name!");
+ HasEmittedTargetRegion = true;
emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
IsOffloadEntry, CodeGen);
}
@@ -6393,12 +6557,59 @@ void CGOpenMPRuntime::emitTargetOutlinedFunctionHelper(
OffloadEntriesInfoManagerTy::OMPTargetRegionEntryTargetRegion);
}
-/// discard all CompoundStmts intervening between two constructs
-static const Stmt *ignoreCompoundStmts(const Stmt *Body) {
- while (const auto *CS = dyn_cast_or_null<CompoundStmt>(Body))
- Body = CS->body_front();
+/// Checks if the expression is constant or does not have non-trivial function
+/// calls.
+static bool isTrivial(ASTContext &Ctx, const Expr * E) {
+ // We can skip constant expressions.
+ // We can skip expressions with trivial calls or simple expressions.
+ return (E->isEvaluatable(Ctx, Expr::SE_AllowUndefinedBehavior) ||
+ !E->hasNonTrivialCall(Ctx)) &&
+ !E->HasSideEffects(Ctx, /*IncludePossibleEffects=*/true);
+}
- return Body;
+const Stmt *CGOpenMPRuntime::getSingleCompoundChild(ASTContext &Ctx,
+ const Stmt *Body) {
+ const Stmt *Child = Body->IgnoreContainers();
+ while (const auto *C = dyn_cast_or_null<CompoundStmt>(Child)) {
+ Child = nullptr;
+ for (const Stmt *S : C->body()) {
+ if (const auto *E = dyn_cast<Expr>(S)) {
+ if (isTrivial(Ctx, E))
+ continue;
+ }
+ // Some of the statements can be ignored.
+ if (isa<AsmStmt>(S) || isa<NullStmt>(S) || isa<OMPFlushDirective>(S) ||
+ isa<OMPBarrierDirective>(S) || isa<OMPTaskyieldDirective>(S))
+ continue;
+ // Analyze declarations.
+ if (const auto *DS = dyn_cast<DeclStmt>(S)) {
+ if (llvm::all_of(DS->decls(), [&Ctx](const Decl *D) {
+ if (isa<EmptyDecl>(D) || isa<DeclContext>(D) ||
+ isa<TypeDecl>(D) || isa<PragmaCommentDecl>(D) ||
+ isa<PragmaDetectMismatchDecl>(D) || isa<UsingDecl>(D) ||
+ isa<UsingDirectiveDecl>(D) ||
+ isa<OMPDeclareReductionDecl>(D) ||
+ isa<OMPThreadPrivateDecl>(D) || isa<OMPAllocateDecl>(D))
+ return true;
+ const auto *VD = dyn_cast<VarDecl>(D);
+ if (!VD)
+ return false;
+ return VD->isConstexpr() ||
+ ((VD->getType().isTrivialType(Ctx) ||
+ VD->getType()->isReferenceType()) &&
+ (!VD->hasInit() || isTrivial(Ctx, VD->getInit())));
+ }))
+ continue;
+ }
+ // Found multiple children - cannot get the one child only.
+ if (Child)
+ return nullptr;
+ Child = S;
+ }
+ if (Child)
+ Child = Child->IgnoreContainers();
+ }
+ return Child;
}
/// Emit the number of teams for a target directive. Inspect the num_teams
@@ -6410,63 +6621,208 @@ static const Stmt *ignoreCompoundStmts(const Stmt *Body) {
///
/// Otherwise, return nullptr.
static llvm::Value *
-emitNumTeamsForTargetDirective(CGOpenMPRuntime &OMPRuntime,
- CodeGenFunction &CGF,
+emitNumTeamsForTargetDirective(CodeGenFunction &CGF,
const OMPExecutableDirective &D) {
- assert(!CGF.getLangOpts().OpenMPIsDevice && "Clauses associated with the "
- "teams directive expected to be "
- "emitted only for the host!");
-
+ assert(!CGF.getLangOpts().OpenMPIsDevice &&
+ "Clauses associated with the teams directive expected to be emitted "
+ "only for the host!");
+ OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
+ assert(isOpenMPTargetExecutionDirective(DirectiveKind) &&
+ "Expected target-based executable directive.");
CGBuilderTy &Bld = CGF.Builder;
-
- // If the target directive is combined with a teams directive:
- // Return the value in the num_teams clause, if any.
- // Otherwise, return 0 to denote the runtime default.
- if (isOpenMPTeamsDirective(D.getDirectiveKind())) {
- if (const auto *NumTeamsClause = D.getSingleClause<OMPNumTeamsClause>()) {
+ switch (DirectiveKind) {
+ case OMPD_target: {
+ const auto *CS = D.getInnermostCapturedStmt();
+ const auto *Body =
+ CS->getCapturedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true);
+ const Stmt *ChildStmt =
+ CGOpenMPRuntime::getSingleCompoundChild(CGF.getContext(), Body);
+ if (const auto *NestedDir =
+ dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
+ if (isOpenMPTeamsDirective(NestedDir->getDirectiveKind())) {
+ if (NestedDir->hasClausesOfKind<OMPNumTeamsClause>()) {
+ CGOpenMPInnerExprInfo CGInfo(CGF, *CS);
+ CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
+ const Expr *NumTeams =
+ NestedDir->getSingleClause<OMPNumTeamsClause>()->getNumTeams();
+ llvm::Value *NumTeamsVal =
+ CGF.EmitScalarExpr(NumTeams,
+ /*IgnoreResultAssign*/ true);
+ return Bld.CreateIntCast(NumTeamsVal, CGF.Int32Ty,
+ /*isSigned=*/true);
+ }
+ return Bld.getInt32(0);
+ }
+ if (isOpenMPParallelDirective(NestedDir->getDirectiveKind()) ||
+ isOpenMPSimdDirective(NestedDir->getDirectiveKind()))
+ return Bld.getInt32(1);
+ return Bld.getInt32(0);
+ }
+ return nullptr;
+ }
+ case OMPD_target_teams:
+ case OMPD_target_teams_distribute:
+ case OMPD_target_teams_distribute_simd:
+ case OMPD_target_teams_distribute_parallel_for:
+ case OMPD_target_teams_distribute_parallel_for_simd: {
+ if (D.hasClausesOfKind<OMPNumTeamsClause>()) {
CodeGenFunction::RunCleanupsScope NumTeamsScope(CGF);
- llvm::Value *NumTeams = CGF.EmitScalarExpr(NumTeamsClause->getNumTeams(),
- /*IgnoreResultAssign*/ true);
- return Bld.CreateIntCast(NumTeams, CGF.Int32Ty,
- /*IsSigned=*/true);
+ const Expr *NumTeams =
+ D.getSingleClause<OMPNumTeamsClause>()->getNumTeams();
+ llvm::Value *NumTeamsVal =
+ CGF.EmitScalarExpr(NumTeams,
+ /*IgnoreResultAssign*/ true);
+ return Bld.CreateIntCast(NumTeamsVal, CGF.Int32Ty,
+ /*isSigned=*/true);
}
-
- // The default value is 0.
return Bld.getInt32(0);
}
-
- // If the target directive is combined with a parallel directive but not a
- // teams directive, start one team.
- if (isOpenMPParallelDirective(D.getDirectiveKind()))
+ case OMPD_target_parallel:
+ case OMPD_target_parallel_for:
+ case OMPD_target_parallel_for_simd:
+ case OMPD_target_simd:
return Bld.getInt32(1);
-
- // If the current target region has a teams region enclosed, we need to get
- // the number of teams to pass to the runtime function call. This is done
- // by generating the expression in a inlined region. This is required because
- // the expression is captured in the enclosing target environment when the
- // teams directive is not combined with target.
-
- const CapturedStmt &CS = *D.getCapturedStmt(OMPD_target);
-
- if (const auto *TeamsDir = dyn_cast_or_null<OMPExecutableDirective>(
- ignoreCompoundStmts(CS.getCapturedStmt()))) {
- if (isOpenMPTeamsDirective(TeamsDir->getDirectiveKind())) {
- if (const auto *NTE = TeamsDir->getSingleClause<OMPNumTeamsClause>()) {
- CGOpenMPInnerExprInfo CGInfo(CGF, CS);
+ case OMPD_parallel:
+ case OMPD_for:
+ case OMPD_parallel_for:
+ case OMPD_parallel_sections:
+ case OMPD_for_simd:
+ case OMPD_parallel_for_simd:
+ case OMPD_cancel:
+ case OMPD_cancellation_point:
+ case OMPD_ordered:
+ case OMPD_threadprivate:
+ case OMPD_allocate:
+ case OMPD_task:
+ case OMPD_simd:
+ case OMPD_sections:
+ case OMPD_section:
+ case OMPD_single:
+ case OMPD_master:
+ case OMPD_critical:
+ case OMPD_taskyield:
+ case OMPD_barrier:
+ case OMPD_taskwait:
+ case OMPD_taskgroup:
+ case OMPD_atomic:
+ case OMPD_flush:
+ case OMPD_teams:
+ case OMPD_target_data:
+ case OMPD_target_exit_data:
+ case OMPD_target_enter_data:
+ case OMPD_distribute:
+ case OMPD_distribute_simd:
+ case OMPD_distribute_parallel_for:
+ case OMPD_distribute_parallel_for_simd:
+ case OMPD_teams_distribute:
+ case OMPD_teams_distribute_simd:
+ case OMPD_teams_distribute_parallel_for:
+ case OMPD_teams_distribute_parallel_for_simd:
+ case OMPD_target_update:
+ case OMPD_declare_simd:
+ case OMPD_declare_target:
+ case OMPD_end_declare_target:
+ case OMPD_declare_reduction:
+ case OMPD_declare_mapper:
+ case OMPD_taskloop:
+ case OMPD_taskloop_simd:
+ case OMPD_requires:
+ case OMPD_unknown:
+ break;
+ }
+ llvm_unreachable("Unexpected directive kind.");
+}
+
+static llvm::Value *getNumThreads(CodeGenFunction &CGF, const CapturedStmt *CS,
+ llvm::Value *DefaultThreadLimitVal) {
+ const Stmt *Child = CGOpenMPRuntime::getSingleCompoundChild(
+ CGF.getContext(), CS->getCapturedStmt());
+ if (const auto *Dir = dyn_cast_or_null<OMPExecutableDirective>(Child)) {
+ if (isOpenMPParallelDirective(Dir->getDirectiveKind())) {
+ llvm::Value *NumThreads = nullptr;
+ llvm::Value *CondVal = nullptr;
+ // Handle if clause. If if clause present, the number of threads is
+ // calculated as <cond> ? (<numthreads> ? <numthreads> : 0 ) : 1.
+ if (Dir->hasClausesOfKind<OMPIfClause>()) {
+ CGOpenMPInnerExprInfo CGInfo(CGF, *CS);
CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
- llvm::Value *NumTeams = CGF.EmitScalarExpr(NTE->getNumTeams());
- return Bld.CreateIntCast(NumTeams, CGF.Int32Ty,
- /*IsSigned=*/true);
+ const OMPIfClause *IfClause = nullptr;
+ for (const auto *C : Dir->getClausesOfKind<OMPIfClause>()) {
+ if (C->getNameModifier() == OMPD_unknown ||
+ C->getNameModifier() == OMPD_parallel) {
+ IfClause = C;
+ break;
+ }
+ }
+ if (IfClause) {
+ const Expr *Cond = IfClause->getCondition();
+ bool Result;
+ if (Cond->EvaluateAsBooleanCondition(Result, CGF.getContext())) {
+ if (!Result)
+ return CGF.Builder.getInt32(1);
+ } else {
+ CodeGenFunction::LexicalScope Scope(CGF, Cond->getSourceRange());
+ if (const auto *PreInit =
+ cast_or_null<DeclStmt>(IfClause->getPreInitStmt())) {
+ for (const auto *I : PreInit->decls()) {
+ if (!I->hasAttr<OMPCaptureNoInitAttr>()) {
+ CGF.EmitVarDecl(cast<VarDecl>(*I));
+ } else {
+ CodeGenFunction::AutoVarEmission Emission =
+ CGF.EmitAutoVarAlloca(cast<VarDecl>(*I));
+ CGF.EmitAutoVarCleanups(Emission);
+ }
+ }
+ }
+ CondVal = CGF.EvaluateExprAsBool(Cond);
+ }
+ }
}
-
- // If we have an enclosed teams directive but no num_teams clause we use
- // the default value 0.
- return Bld.getInt32(0);
+ // Check the value of num_threads clause iff if clause was not specified
+ // or is not evaluated to false.
+ if (Dir->hasClausesOfKind<OMPNumThreadsClause>()) {
+ CGOpenMPInnerExprInfo CGInfo(CGF, *CS);
+ CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
+ const auto *NumThreadsClause =
+ Dir->getSingleClause<OMPNumThreadsClause>();
+ CodeGenFunction::LexicalScope Scope(
+ CGF, NumThreadsClause->getNumThreads()->getSourceRange());
+ if (const auto *PreInit =
+ cast_or_null<DeclStmt>(NumThreadsClause->getPreInitStmt())) {
+ for (const auto *I : PreInit->decls()) {
+ if (!I->hasAttr<OMPCaptureNoInitAttr>()) {
+ CGF.EmitVarDecl(cast<VarDecl>(*I));
+ } else {
+ CodeGenFunction::AutoVarEmission Emission =
+ CGF.EmitAutoVarAlloca(cast<VarDecl>(*I));
+ CGF.EmitAutoVarCleanups(Emission);
+ }
+ }
+ }
+ NumThreads = CGF.EmitScalarExpr(NumThreadsClause->getNumThreads());
+ NumThreads = CGF.Builder.CreateIntCast(NumThreads, CGF.Int32Ty,
+ /*isSigned=*/false);
+ if (DefaultThreadLimitVal)
+ NumThreads = CGF.Builder.CreateSelect(
+ CGF.Builder.CreateICmpULT(DefaultThreadLimitVal, NumThreads),
+ DefaultThreadLimitVal, NumThreads);
+ } else {
+ NumThreads = DefaultThreadLimitVal ? DefaultThreadLimitVal
+ : CGF.Builder.getInt32(0);
+ }
+ // Process condition of the if clause.
+ if (CondVal) {
+ NumThreads = CGF.Builder.CreateSelect(CondVal, NumThreads,
+ CGF.Builder.getInt32(1));
+ }
+ return NumThreads;
}
+ if (isOpenMPSimdDirective(Dir->getDirectiveKind()))
+ return CGF.Builder.getInt32(1);
+ return DefaultThreadLimitVal;
}
-
- // No teams associated with the directive.
- return nullptr;
+ return DefaultThreadLimitVal ? DefaultThreadLimitVal
+ : CGF.Builder.getInt32(0);
}
/// Emit the number of threads for a target directive. Inspect the
@@ -6478,98 +6834,208 @@ emitNumTeamsForTargetDirective(CGOpenMPRuntime &OMPRuntime,
///
/// Otherwise, return nullptr.
static llvm::Value *
-emitNumThreadsForTargetDirective(CGOpenMPRuntime &OMPRuntime,
- CodeGenFunction &CGF,
+emitNumThreadsForTargetDirective(CodeGenFunction &CGF,
const OMPExecutableDirective &D) {
- assert(!CGF.getLangOpts().OpenMPIsDevice && "Clauses associated with the "
- "teams directive expected to be "
- "emitted only for the host!");
-
+ assert(!CGF.getLangOpts().OpenMPIsDevice &&
+ "Clauses associated with the teams directive expected to be emitted "
+ "only for the host!");
+ OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
+ assert(isOpenMPTargetExecutionDirective(DirectiveKind) &&
+ "Expected target-based executable directive.");
CGBuilderTy &Bld = CGF.Builder;
-
- //
- // If the target directive is combined with a teams directive:
- // Return the value in the thread_limit clause, if any.
- //
- // If the target directive is combined with a parallel directive:
- // Return the value in the num_threads clause, if any.
- //
- // If both clauses are set, select the minimum of the two.
- //
- // If neither teams or parallel combined directives set the number of threads
- // in a team, return 0 to denote the runtime default.
- //
- // If this is not a teams directive return nullptr.
-
- if (isOpenMPTeamsDirective(D.getDirectiveKind()) ||
- isOpenMPParallelDirective(D.getDirectiveKind())) {
- llvm::Value *DefaultThreadLimitVal = Bld.getInt32(0);
- llvm::Value *NumThreadsVal = nullptr;
- llvm::Value *ThreadLimitVal = nullptr;
-
- if (const auto *ThreadLimitClause =
- D.getSingleClause<OMPThreadLimitClause>()) {
+ llvm::Value *ThreadLimitVal = nullptr;
+ llvm::Value *NumThreadsVal = nullptr;
+ switch (DirectiveKind) {
+ case OMPD_target: {
+ const CapturedStmt *CS = D.getInnermostCapturedStmt();
+ if (llvm::Value *NumThreads = getNumThreads(CGF, CS, ThreadLimitVal))
+ return NumThreads;
+ const Stmt *Child = CGOpenMPRuntime::getSingleCompoundChild(
+ CGF.getContext(), CS->getCapturedStmt());
+ if (const auto *Dir = dyn_cast_or_null<OMPExecutableDirective>(Child)) {
+ if (Dir->hasClausesOfKind<OMPThreadLimitClause>()) {
+ CGOpenMPInnerExprInfo CGInfo(CGF, *CS);
+ CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
+ const auto *ThreadLimitClause =
+ Dir->getSingleClause<OMPThreadLimitClause>();
+ CodeGenFunction::LexicalScope Scope(
+ CGF, ThreadLimitClause->getThreadLimit()->getSourceRange());
+ if (const auto *PreInit =
+ cast_or_null<DeclStmt>(ThreadLimitClause->getPreInitStmt())) {
+ for (const auto *I : PreInit->decls()) {
+ if (!I->hasAttr<OMPCaptureNoInitAttr>()) {
+ CGF.EmitVarDecl(cast<VarDecl>(*I));
+ } else {
+ CodeGenFunction::AutoVarEmission Emission =
+ CGF.EmitAutoVarAlloca(cast<VarDecl>(*I));
+ CGF.EmitAutoVarCleanups(Emission);
+ }
+ }
+ }
+ llvm::Value *ThreadLimit = CGF.EmitScalarExpr(
+ ThreadLimitClause->getThreadLimit(), /*IgnoreResultAssign=*/true);
+ ThreadLimitVal =
+ Bld.CreateIntCast(ThreadLimit, CGF.Int32Ty, /*isSigned=*/false);
+ }
+ if (isOpenMPTeamsDirective(Dir->getDirectiveKind()) &&
+ !isOpenMPDistributeDirective(Dir->getDirectiveKind())) {
+ CS = Dir->getInnermostCapturedStmt();
+ const Stmt *Child = CGOpenMPRuntime::getSingleCompoundChild(
+ CGF.getContext(), CS->getCapturedStmt());
+ Dir = dyn_cast_or_null<OMPExecutableDirective>(Child);
+ }
+ if (Dir && isOpenMPDistributeDirective(Dir->getDirectiveKind()) &&
+ !isOpenMPSimdDirective(Dir->getDirectiveKind())) {
+ CS = Dir->getInnermostCapturedStmt();
+ if (llvm::Value *NumThreads = getNumThreads(CGF, CS, ThreadLimitVal))
+ return NumThreads;
+ }
+ if (Dir && isOpenMPSimdDirective(Dir->getDirectiveKind()))
+ return Bld.getInt32(1);
+ }
+ return ThreadLimitVal ? ThreadLimitVal : Bld.getInt32(0);
+ }
+ case OMPD_target_teams: {
+ if (D.hasClausesOfKind<OMPThreadLimitClause>()) {
CodeGenFunction::RunCleanupsScope ThreadLimitScope(CGF);
- llvm::Value *ThreadLimit =
- CGF.EmitScalarExpr(ThreadLimitClause->getThreadLimit(),
- /*IgnoreResultAssign*/ true);
- ThreadLimitVal = Bld.CreateIntCast(ThreadLimit, CGF.Int32Ty,
- /*IsSigned=*/true);
+ const auto *ThreadLimitClause = D.getSingleClause<OMPThreadLimitClause>();
+ llvm::Value *ThreadLimit = CGF.EmitScalarExpr(
+ ThreadLimitClause->getThreadLimit(), /*IgnoreResultAssign=*/true);
+ ThreadLimitVal =
+ Bld.CreateIntCast(ThreadLimit, CGF.Int32Ty, /*isSigned=*/false);
}
-
- if (const auto *NumThreadsClause =
- D.getSingleClause<OMPNumThreadsClause>()) {
+ const CapturedStmt *CS = D.getInnermostCapturedStmt();
+ if (llvm::Value *NumThreads = getNumThreads(CGF, CS, ThreadLimitVal))
+ return NumThreads;
+ const Stmt *Child = CGOpenMPRuntime::getSingleCompoundChild(
+ CGF.getContext(), CS->getCapturedStmt());
+ if (const auto *Dir = dyn_cast_or_null<OMPExecutableDirective>(Child)) {
+ if (Dir->getDirectiveKind() == OMPD_distribute) {
+ CS = Dir->getInnermostCapturedStmt();
+ if (llvm::Value *NumThreads = getNumThreads(CGF, CS, ThreadLimitVal))
+ return NumThreads;
+ }
+ }
+ return ThreadLimitVal ? ThreadLimitVal : Bld.getInt32(0);
+ }
+ case OMPD_target_teams_distribute:
+ if (D.hasClausesOfKind<OMPThreadLimitClause>()) {
+ CodeGenFunction::RunCleanupsScope ThreadLimitScope(CGF);
+ const auto *ThreadLimitClause = D.getSingleClause<OMPThreadLimitClause>();
+ llvm::Value *ThreadLimit = CGF.EmitScalarExpr(
+ ThreadLimitClause->getThreadLimit(), /*IgnoreResultAssign=*/true);
+ ThreadLimitVal =
+ Bld.CreateIntCast(ThreadLimit, CGF.Int32Ty, /*isSigned=*/false);
+ }
+ return getNumThreads(CGF, D.getInnermostCapturedStmt(), ThreadLimitVal);
+ case OMPD_target_parallel:
+ case OMPD_target_parallel_for:
+ case OMPD_target_parallel_for_simd:
+ case OMPD_target_teams_distribute_parallel_for:
+ case OMPD_target_teams_distribute_parallel_for_simd: {
+ llvm::Value *CondVal = nullptr;
+ // Handle if clause. If if clause present, the number of threads is
+ // calculated as <cond> ? (<numthreads> ? <numthreads> : 0 ) : 1.
+ if (D.hasClausesOfKind<OMPIfClause>()) {
+ const OMPIfClause *IfClause = nullptr;
+ for (const auto *C : D.getClausesOfKind<OMPIfClause>()) {
+ if (C->getNameModifier() == OMPD_unknown ||
+ C->getNameModifier() == OMPD_parallel) {
+ IfClause = C;
+ break;
+ }
+ }
+ if (IfClause) {
+ const Expr *Cond = IfClause->getCondition();
+ bool Result;
+ if (Cond->EvaluateAsBooleanCondition(Result, CGF.getContext())) {
+ if (!Result)
+ return Bld.getInt32(1);
+ } else {
+ CodeGenFunction::RunCleanupsScope Scope(CGF);
+ CondVal = CGF.EvaluateExprAsBool(Cond);
+ }
+ }
+ }
+ if (D.hasClausesOfKind<OMPThreadLimitClause>()) {
+ CodeGenFunction::RunCleanupsScope ThreadLimitScope(CGF);
+ const auto *ThreadLimitClause = D.getSingleClause<OMPThreadLimitClause>();
+ llvm::Value *ThreadLimit = CGF.EmitScalarExpr(
+ ThreadLimitClause->getThreadLimit(), /*IgnoreResultAssign=*/true);
+ ThreadLimitVal =
+ Bld.CreateIntCast(ThreadLimit, CGF.Int32Ty, /*isSigned=*/false);
+ }
+ if (D.hasClausesOfKind<OMPNumThreadsClause>()) {
CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF);
- llvm::Value *NumThreads =
- CGF.EmitScalarExpr(NumThreadsClause->getNumThreads(),
- /*IgnoreResultAssign*/ true);
+ const auto *NumThreadsClause = D.getSingleClause<OMPNumThreadsClause>();
+ llvm::Value *NumThreads = CGF.EmitScalarExpr(
+ NumThreadsClause->getNumThreads(), /*IgnoreResultAssign=*/true);
NumThreadsVal =
- Bld.CreateIntCast(NumThreads, CGF.Int32Ty, /*IsSigned=*/true);
- }
-
- // Select the lesser of thread_limit and num_threads.
- if (NumThreadsVal)
+ Bld.CreateIntCast(NumThreads, CGF.Int32Ty, /*isSigned=*/false);
ThreadLimitVal = ThreadLimitVal
- ? Bld.CreateSelect(Bld.CreateICmpSLT(NumThreadsVal,
+ ? Bld.CreateSelect(Bld.CreateICmpULT(NumThreadsVal,
ThreadLimitVal),
NumThreadsVal, ThreadLimitVal)
: NumThreadsVal;
-
- // Set default value passed to the runtime if either teams or a target
- // parallel type directive is found but no clause is specified.
+ }
if (!ThreadLimitVal)
- ThreadLimitVal = DefaultThreadLimitVal;
-
+ ThreadLimitVal = Bld.getInt32(0);
+ if (CondVal)
+ return Bld.CreateSelect(CondVal, ThreadLimitVal, Bld.getInt32(1));
return ThreadLimitVal;
}
-
- // If the current target region has a teams region enclosed, we need to get
- // the thread limit to pass to the runtime function call. This is done
- // by generating the expression in a inlined region. This is required because
- // the expression is captured in the enclosing target environment when the
- // teams directive is not combined with target.
-
- const CapturedStmt &CS = *D.getCapturedStmt(OMPD_target);
-
- if (const auto *TeamsDir = dyn_cast_or_null<OMPExecutableDirective>(
- ignoreCompoundStmts(CS.getCapturedStmt()))) {
- if (isOpenMPTeamsDirective(TeamsDir->getDirectiveKind())) {
- if (const auto *TLE = TeamsDir->getSingleClause<OMPThreadLimitClause>()) {
- CGOpenMPInnerExprInfo CGInfo(CGF, CS);
- CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
- llvm::Value *ThreadLimit = CGF.EmitScalarExpr(TLE->getThreadLimit());
- return CGF.Builder.CreateIntCast(ThreadLimit, CGF.Int32Ty,
- /*IsSigned=*/true);
- }
-
- // If we have an enclosed teams directive but no thread_limit clause we
- // use the default value 0.
- return CGF.Builder.getInt32(0);
- }
+ case OMPD_target_teams_distribute_simd:
+ case OMPD_target_simd:
+ return Bld.getInt32(1);
+ case OMPD_parallel:
+ case OMPD_for:
+ case OMPD_parallel_for:
+ case OMPD_parallel_sections:
+ case OMPD_for_simd:
+ case OMPD_parallel_for_simd:
+ case OMPD_cancel:
+ case OMPD_cancellation_point:
+ case OMPD_ordered:
+ case OMPD_threadprivate:
+ case OMPD_allocate:
+ case OMPD_task:
+ case OMPD_simd:
+ case OMPD_sections:
+ case OMPD_section:
+ case OMPD_single:
+ case OMPD_master:
+ case OMPD_critical:
+ case OMPD_taskyield:
+ case OMPD_barrier:
+ case OMPD_taskwait:
+ case OMPD_taskgroup:
+ case OMPD_atomic:
+ case OMPD_flush:
+ case OMPD_teams:
+ case OMPD_target_data:
+ case OMPD_target_exit_data:
+ case OMPD_target_enter_data:
+ case OMPD_distribute:
+ case OMPD_distribute_simd:
+ case OMPD_distribute_parallel_for:
+ case OMPD_distribute_parallel_for_simd:
+ case OMPD_teams_distribute:
+ case OMPD_teams_distribute_simd:
+ case OMPD_teams_distribute_parallel_for:
+ case OMPD_teams_distribute_parallel_for_simd:
+ case OMPD_target_update:
+ case OMPD_declare_simd:
+ case OMPD_declare_target:
+ case OMPD_end_declare_target:
+ case OMPD_declare_reduction:
+ case OMPD_declare_mapper:
+ case OMPD_taskloop:
+ case OMPD_taskloop_simd:
+ case OMPD_requires:
+ case OMPD_unknown:
+ break;
}
-
- // No teams associated with the directive.
- return nullptr;
+ llvm_unreachable("Unsupported directive kind.");
}
namespace {
@@ -6689,7 +7155,9 @@ private:
CodeGenFunction &CGF;
/// Set of all first private variables in the current directive.
- llvm::SmallPtrSet<const VarDecl *, 8> FirstPrivateDecls;
+ /// bool data is set to true if the variable is implicitly marked as
+ /// firstprivate, false otherwise.
+ llvm::DenseMap<CanonicalDeclPtr<const VarDecl>, bool> FirstPrivateDecls;
/// Map between device pointer declarations and their expression components.
/// The key value for declarations in 'this' is null.
@@ -6993,7 +7461,10 @@ private:
// Track if the map information being generated is the first for a capture.
bool IsCaptureFirstInfo = IsFirstComponentList;
- bool IsLink = false; // Is this variable a "declare target link"?
+ // When the variable is on a declare target link or in a to clause with
+ // unified memory, a reference is needed to hold the host/device address
+ // of the variable.
+ bool RequiresReference = false;
// Scan the components from the base to the complete expression.
auto CI = Components.rbegin();
@@ -7023,11 +7494,14 @@ private:
if (const auto *VD =
dyn_cast_or_null<VarDecl>(I->getAssociatedDeclaration())) {
if (llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
- OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
- if (*Res == OMPDeclareTargetDeclAttr::MT_Link) {
- IsLink = true;
- BP = CGF.CGM.getOpenMPRuntime().getAddrOfDeclareTargetLink(VD);
+ OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD)) {
+ if ((*Res == OMPDeclareTargetDeclAttr::MT_Link) ||
+ (*Res == OMPDeclareTargetDeclAttr::MT_To &&
+ CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory())) {
+ RequiresReference = true;
+ BP = CGF.CGM.getOpenMPRuntime().getAddrOfDeclareTargetVar(VD);
}
+ }
}
// If the variable is a pointer and is being dereferenced (i.e. is not
@@ -7135,7 +7609,7 @@ private:
Address HB = CGF.Builder.CreateConstGEP(
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(LB,
CGF.VoidPtrTy),
- TypeSize.getQuantity() - 1, CharUnits::One());
+ TypeSize.getQuantity() - 1);
PartialStruct.HighestElem = {
std::numeric_limits<decltype(
PartialStruct.HighestElem.first)>::max(),
@@ -7167,19 +7641,19 @@ private:
}
BasePointers.push_back(BP.getPointer());
Pointers.push_back(LB.getPointer());
- Sizes.push_back(Size);
+ Sizes.push_back(CGF.Builder.CreateIntCast(Size, CGF.Int64Ty,
+ /*isSigned=*/true));
Types.push_back(Flags);
- LB = CGF.Builder.CreateConstGEP(ComponentLB, 1,
- CGF.getPointerSize());
+ LB = CGF.Builder.CreateConstGEP(ComponentLB, 1);
}
BasePointers.push_back(BP.getPointer());
Pointers.push_back(LB.getPointer());
Size = CGF.Builder.CreatePtrDiff(
CGF.EmitCastToVoidPtr(
- CGF.Builder.CreateConstGEP(HB, 1, CharUnits::One())
- .getPointer()),
+ CGF.Builder.CreateConstGEP(HB, 1).getPointer()),
CGF.EmitCastToVoidPtr(LB.getPointer()));
- Sizes.push_back(Size);
+ Sizes.push_back(
+ CGF.Builder.CreateIntCast(Size, CGF.Int64Ty, /*isSigned=*/true));
Types.push_back(Flags);
break;
}
@@ -7187,7 +7661,8 @@ private:
if (!IsMemberPointer) {
BasePointers.push_back(BP.getPointer());
Pointers.push_back(LB.getPointer());
- Sizes.push_back(Size);
+ Sizes.push_back(
+ CGF.Builder.CreateIntCast(Size, CGF.Int64Ty, /*isSigned=*/true));
// We need to add a pointer flag for each map that comes from the
// same expression except for the first one. We also need to signal
@@ -7195,7 +7670,8 @@ private:
// (there is a set of entries for each capture).
OpenMPOffloadMappingFlags Flags = getMapTypeBits(
MapType, MapModifiers, IsImplicit,
- !IsExpressionFirstInfo || IsLink, IsCaptureFirstInfo && !IsLink);
+ !IsExpressionFirstInfo || RequiresReference,
+ IsCaptureFirstInfo && !RequiresReference);
if (!IsExpressionFirstInfo) {
// If we have a PTR_AND_OBJ pair where the OBJ is a pointer as well,
@@ -7260,9 +7736,17 @@ private:
// A first private variable captured by reference will use only the
// 'private ptr' and 'map to' flag. Return the right flags if the captured
// declaration is known as first-private in this handler.
- if (FirstPrivateDecls.count(Cap.getCapturedVar()))
+ if (FirstPrivateDecls.count(Cap.getCapturedVar())) {
+ if (Cap.getCapturedVar()->getType().isConstant(CGF.getContext()) &&
+ Cap.getCaptureKind() == CapturedStmt::VCK_ByRef)
+ return MappableExprsHandler::OMP_MAP_ALWAYS |
+ MappableExprsHandler::OMP_MAP_TO;
+ if (Cap.getCapturedVar()->getType()->isAnyPointerType())
+ return MappableExprsHandler::OMP_MAP_TO |
+ MappableExprsHandler::OMP_MAP_PTR_AND_OBJ;
return MappableExprsHandler::OMP_MAP_PRIVATE |
MappableExprsHandler::OMP_MAP_TO;
+ }
return MappableExprsHandler::OMP_MAP_TO |
MappableExprsHandler::OMP_MAP_FROM;
}
@@ -7332,7 +7816,7 @@ private:
for (const auto *Field : RD->fields()) {
// Fill in non-bitfields. (Bitfields always use a zero pattern, which we
// will fill in later.)
- if (!Field->isBitField()) {
+ if (!Field->isBitField() && !Field->isZeroSize(CGF.getContext())) {
unsigned FieldIndex = RL.getLLVMFieldNo(Field);
RecordLayout[FieldIndex] = Field;
}
@@ -7354,8 +7838,8 @@ public:
// Extract firstprivate clause information.
for (const auto *C : Dir.getClausesOfKind<OMPFirstprivateClause>())
for (const auto *D : C->varlists())
- FirstPrivateDecls.insert(
- cast<VarDecl>(cast<DeclRefExpr>(D)->getDecl())->getCanonicalDecl());
+ FirstPrivateDecls.try_emplace(
+ cast<VarDecl>(cast<DeclRefExpr>(D)->getDecl()), C->isImplicit());
// Extract device pointer clause information.
for (const auto *C : Dir.getClausesOfKind<OMPIsDevicePtrClause>())
for (auto L : C->component_lists())
@@ -7380,8 +7864,8 @@ public:
llvm::Value *CLAddr = CGF.Builder.CreatePointerCast(LB, CGF.VoidPtrTy);
llvm::Value *CHAddr = CGF.Builder.CreatePointerCast(HAddr, CGF.VoidPtrTy);
llvm::Value *Diff = CGF.Builder.CreatePtrDiff(CHAddr, CLAddr);
- llvm::Value *Size = CGF.Builder.CreateIntCast(Diff, CGF.SizeTy,
- /*isSinged=*/false);
+ llvm::Value *Size = CGF.Builder.CreateIntCast(Diff, CGF.Int64Ty,
+ /*isSigned=*/false);
Sizes.push_back(Size);
// Map type is always TARGET_PARAM
Types.push_back(OMP_MAP_TARGET_PARAM);
@@ -7497,7 +7981,7 @@ public:
this->CGF.EmitLValue(IE), IE->getExprLoc());
BasePointers.emplace_back(Ptr, VD);
Pointers.push_back(Ptr);
- Sizes.push_back(llvm::Constant::getNullValue(this->CGF.SizeTy));
+ Sizes.push_back(llvm::Constant::getNullValue(this->CGF.Int64Ty));
Types.push_back(OMP_MAP_RETURN_PARAM | OMP_MAP_TARGET_PARAM);
}
}
@@ -7554,7 +8038,7 @@ public:
this->CGF.EmitLValue(L.IE), L.IE->getExprLoc());
CurBasePointers.emplace_back(BasePtr, L.VD);
CurPointers.push_back(Ptr);
- CurSizes.push_back(llvm::Constant::getNullValue(this->CGF.SizeTy));
+ CurSizes.push_back(llvm::Constant::getNullValue(this->CGF.Int64Ty));
// Entry is PTR_AND_OBJ and RETURN_PARAM. Also, set the placeholder
// value MEMBER_OF=FFFF so that the entry is later updated with the
// correct value of MEMBER_OF.
@@ -7602,23 +8086,37 @@ public:
LambdaPointers.try_emplace(ThisLVal.getPointer(), VDLVal.getPointer());
BasePointers.push_back(ThisLVal.getPointer());
Pointers.push_back(ThisLValVal.getPointer());
- Sizes.push_back(CGF.getTypeSize(CGF.getContext().VoidPtrTy));
+ Sizes.push_back(
+ CGF.Builder.CreateIntCast(CGF.getTypeSize(CGF.getContext().VoidPtrTy),
+ CGF.Int64Ty, /*isSigned=*/true));
Types.push_back(OMP_MAP_PTR_AND_OBJ | OMP_MAP_LITERAL |
OMP_MAP_MEMBER_OF | OMP_MAP_IMPLICIT);
}
for (const LambdaCapture &LC : RD->captures()) {
- if (LC.getCaptureKind() != LCK_ByRef)
+ if (!LC.capturesVariable())
continue;
const VarDecl *VD = LC.getCapturedVar();
+ if (LC.getCaptureKind() != LCK_ByRef && !VD->getType()->isPointerType())
+ continue;
auto It = Captures.find(VD);
assert(It != Captures.end() && "Found lambda capture without field.");
LValue VarLVal = CGF.EmitLValueForFieldInitialization(VDLVal, It->second);
- LValue VarLValVal = CGF.EmitLValueForField(VDLVal, It->second);
- LambdaPointers.try_emplace(VarLVal.getPointer(), VDLVal.getPointer());
- BasePointers.push_back(VarLVal.getPointer());
- Pointers.push_back(VarLValVal.getPointer());
- Sizes.push_back(CGF.getTypeSize(
- VD->getType().getCanonicalType().getNonReferenceType()));
+ if (LC.getCaptureKind() == LCK_ByRef) {
+ LValue VarLValVal = CGF.EmitLValueForField(VDLVal, It->second);
+ LambdaPointers.try_emplace(VarLVal.getPointer(), VDLVal.getPointer());
+ BasePointers.push_back(VarLVal.getPointer());
+ Pointers.push_back(VarLValVal.getPointer());
+ Sizes.push_back(CGF.Builder.CreateIntCast(
+ CGF.getTypeSize(
+ VD->getType().getCanonicalType().getNonReferenceType()),
+ CGF.Int64Ty, /*isSigned=*/true));
+ } else {
+ RValue VarRVal = CGF.EmitLoadOfLValue(VarLVal, RD->getLocation());
+ LambdaPointers.try_emplace(VarLVal.getPointer(), VDLVal.getPointer());
+ BasePointers.push_back(VarLVal.getPointer());
+ Pointers.push_back(VarRVal.getScalarVal());
+ Sizes.push_back(llvm::ConstantInt::get(CGF.Int64Ty, 0));
+ }
Types.push_back(OMP_MAP_PTR_AND_OBJ | OMP_MAP_LITERAL |
OMP_MAP_MEMBER_OF | OMP_MAP_IMPLICIT);
}
@@ -7675,7 +8173,9 @@ public:
if (DevPointersMap.count(VD)) {
BasePointers.emplace_back(Arg, VD);
Pointers.push_back(Arg);
- Sizes.push_back(CGF.getTypeSize(CGF.getContext().VoidPtrTy));
+ Sizes.push_back(
+ CGF.Builder.CreateIntCast(CGF.getTypeSize(CGF.getContext().VoidPtrTy),
+ CGF.Int64Ty, /*isSigned=*/true));
Types.push_back(OMP_MAP_LITERAL | OMP_MAP_TARGET_PARAM);
return;
}
@@ -7834,7 +8334,7 @@ public:
MapValuesArrayTy &Sizes,
MapFlagsArrayTy &Types) const {
// Map other list items in the map clause which are not captured variables
- // but "declare target link" global variables.,
+ // but "declare target link" global variables.
for (const auto *C : this->CurDir.getClausesOfKind<OMPMapClause>()) {
for (const auto &L : C->component_lists()) {
if (!L.first)
@@ -7844,7 +8344,8 @@ public:
continue;
llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
- if (!Res || *Res != OMPDeclareTargetDeclAttr::MT_Link)
+ if (CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory() ||
+ !Res || *Res != OMPDeclareTargetDeclAttr::MT_Link)
continue;
StructRangeInfoTy PartialStruct;
generateInfoForComponentList(
@@ -7865,12 +8366,15 @@ public:
MapValuesArrayTy &CurPointers,
MapValuesArrayTy &CurSizes,
MapFlagsArrayTy &CurMapTypes) const {
+ bool IsImplicit = true;
// Do the default mapping.
if (CI.capturesThis()) {
CurBasePointers.push_back(CV);
CurPointers.push_back(CV);
const auto *PtrTy = cast<PointerType>(RI.getType().getTypePtr());
- CurSizes.push_back(CGF.getTypeSize(PtrTy->getPointeeType()));
+ CurSizes.push_back(
+ CGF.Builder.CreateIntCast(CGF.getTypeSize(PtrTy->getPointeeType()),
+ CGF.Int64Ty, /*isSigned=*/true));
// Default map type.
CurMapTypes.push_back(OMP_MAP_TO | OMP_MAP_FROM);
} else if (CI.capturesVariableByCopy()) {
@@ -7880,39 +8384,64 @@ public:
// We have to signal to the runtime captures passed by value that are
// not pointers.
CurMapTypes.push_back(OMP_MAP_LITERAL);
- CurSizes.push_back(CGF.getTypeSize(RI.getType()));
+ CurSizes.push_back(CGF.Builder.CreateIntCast(
+ CGF.getTypeSize(RI.getType()), CGF.Int64Ty, /*isSigned=*/true));
} else {
// Pointers are implicitly mapped with a zero size and no flags
// (other than first map that is added for all implicit maps).
CurMapTypes.push_back(OMP_MAP_NONE);
- CurSizes.push_back(llvm::Constant::getNullValue(CGF.SizeTy));
+ CurSizes.push_back(llvm::Constant::getNullValue(CGF.Int64Ty));
}
+ const VarDecl *VD = CI.getCapturedVar();
+ auto I = FirstPrivateDecls.find(VD);
+ if (I != FirstPrivateDecls.end())
+ IsImplicit = I->getSecond();
} else {
assert(CI.capturesVariable() && "Expected captured reference.");
- CurBasePointers.push_back(CV);
- CurPointers.push_back(CV);
-
const auto *PtrTy = cast<ReferenceType>(RI.getType().getTypePtr());
QualType ElementType = PtrTy->getPointeeType();
- CurSizes.push_back(CGF.getTypeSize(ElementType));
+ CurSizes.push_back(CGF.Builder.CreateIntCast(
+ CGF.getTypeSize(ElementType), CGF.Int64Ty, /*isSigned=*/true));
// The default map type for a scalar/complex type is 'to' because by
// default the value doesn't have to be retrieved. For an aggregate
// type, the default is 'tofrom'.
CurMapTypes.push_back(getMapModifiersForPrivateClauses(CI));
+ const VarDecl *VD = CI.getCapturedVar();
+ auto I = FirstPrivateDecls.find(VD);
+ if (I != FirstPrivateDecls.end() &&
+ VD->getType().isConstant(CGF.getContext())) {
+ llvm::Constant *Addr =
+ CGF.CGM.getOpenMPRuntime().registerTargetFirstprivateCopy(CGF, VD);
+ // Copy the value of the original variable to the new global copy.
+ CGF.Builder.CreateMemCpy(
+ CGF.MakeNaturalAlignAddrLValue(Addr, ElementType).getAddress(),
+ Address(CV, CGF.getContext().getTypeAlignInChars(ElementType)),
+ CurSizes.back(), /*IsVolatile=*/false);
+ // Use new global variable as the base pointers.
+ CurBasePointers.push_back(Addr);
+ CurPointers.push_back(Addr);
+ } else {
+ CurBasePointers.push_back(CV);
+ if (I != FirstPrivateDecls.end() && ElementType->isAnyPointerType()) {
+ Address PtrAddr = CGF.EmitLoadOfReference(CGF.MakeAddrLValue(
+ CV, ElementType, CGF.getContext().getDeclAlign(VD),
+ AlignmentSource::Decl));
+ CurPointers.push_back(PtrAddr.getPointer());
+ } else {
+ CurPointers.push_back(CV);
+ }
+ }
+ if (I != FirstPrivateDecls.end())
+ IsImplicit = I->getSecond();
}
// Every default map produces a single argument which is a target parameter.
CurMapTypes.back() |= OMP_MAP_TARGET_PARAM;
// Add flag stating this is an implicit map.
- CurMapTypes.back() |= OMP_MAP_IMPLICIT;
+ if (IsImplicit)
+ CurMapTypes.back() |= OMP_MAP_IMPLICIT;
}
};
-
-enum OpenMPOffloadingReservedDeviceIDs {
- /// Device ID if the device was not defined, runtime should get it
- /// from environment variables in the spec.
- OMP_DEVICEID_UNDEF = -1,
-};
} // anonymous namespace
/// Emit the arrays used to pass the captures and map information to the
@@ -7955,10 +8484,12 @@ emitOffloadingArrays(CodeGenFunction &CGF,
// If we don't have any VLA types or other types that require runtime
// evaluation, we can use a constant array for the map sizes, otherwise we
// need to fill up the arrays as we do for the pointers.
+ QualType Int64Ty =
+ Ctx.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1);
if (hasRuntimeEvaluationCaptureSize) {
- QualType SizeArrayType = Ctx.getConstantArrayType(
- Ctx.getSizeType(), PointerNumAP, ArrayType::Normal,
- /*IndexTypeQuals=*/0);
+ QualType SizeArrayType =
+ Ctx.getConstantArrayType(Int64Ty, PointerNumAP, ArrayType::Normal,
+ /*IndexTypeQuals=*/0);
Info.SizesArray =
CGF.CreateMemTemp(SizeArrayType, ".offload_sizes").getPointer();
} else {
@@ -7969,7 +8500,7 @@ emitOffloadingArrays(CodeGenFunction &CGF,
ConstSizes.push_back(cast<llvm::Constant>(S));
auto *SizesArrayInit = llvm::ConstantArray::get(
- llvm::ArrayType::get(CGM.SizeTy, ConstSizes.size()), ConstSizes);
+ llvm::ArrayType::get(CGM.Int64Ty, ConstSizes.size()), ConstSizes);
std::string Name = CGM.getOpenMPRuntime().getName({"offload_sizes"});
auto *SizesArrayGbl = new llvm::GlobalVariable(
CGM.getModule(), SizesArrayInit->getType(),
@@ -8019,13 +8550,13 @@ emitOffloadingArrays(CodeGenFunction &CGF,
if (hasRuntimeEvaluationCaptureSize) {
llvm::Value *S = CGF.Builder.CreateConstInBoundsGEP2_32(
- llvm::ArrayType::get(CGM.SizeTy, Info.NumberOfPtrs),
+ llvm::ArrayType::get(CGM.Int64Ty, Info.NumberOfPtrs),
Info.SizesArray,
/*Idx0=*/0,
/*Idx1=*/I);
- Address SAddr(S, Ctx.getTypeAlignInChars(Ctx.getSizeType()));
+ Address SAddr(S, Ctx.getTypeAlignInChars(Int64Ty));
CGF.Builder.CreateStore(
- CGF.Builder.CreateIntCast(Sizes[I], CGM.SizeTy, /*isSigned=*/true),
+ CGF.Builder.CreateIntCast(Sizes[I], CGM.Int64Ty, /*isSigned=*/true),
SAddr);
}
}
@@ -8049,7 +8580,7 @@ static void emitOffloadingArraysArgument(
/*Idx0=*/0,
/*Idx1=*/0);
SizesArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32(
- llvm::ArrayType::get(CGM.SizeTy, Info.NumberOfPtrs), Info.SizesArray,
+ llvm::ArrayType::get(CGM.Int64Ty, Info.NumberOfPtrs), Info.SizesArray,
/*Idx0=*/0, /*Idx1=*/0);
MapTypesArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32(
llvm::ArrayType::get(CGM.Int64Ty, Info.NumberOfPtrs),
@@ -8059,76 +8590,23 @@ static void emitOffloadingArraysArgument(
} else {
BasePointersArrayArg = llvm::ConstantPointerNull::get(CGM.VoidPtrPtrTy);
PointersArrayArg = llvm::ConstantPointerNull::get(CGM.VoidPtrPtrTy);
- SizesArrayArg = llvm::ConstantPointerNull::get(CGM.SizeTy->getPointerTo());
+ SizesArrayArg = llvm::ConstantPointerNull::get(CGM.Int64Ty->getPointerTo());
MapTypesArrayArg =
llvm::ConstantPointerNull::get(CGM.Int64Ty->getPointerTo());
}
}
-/// Checks if the expression is constant or does not have non-trivial function
-/// calls.
-static bool isTrivial(ASTContext &Ctx, const Expr * E) {
- // We can skip constant expressions.
- // We can skip expressions with trivial calls or simple expressions.
- return (E->isEvaluatable(Ctx, Expr::SE_AllowUndefinedBehavior) ||
- !E->hasNonTrivialCall(Ctx)) &&
- !E->HasSideEffects(Ctx, /*IncludePossibleEffects=*/true);
-}
-
-/// Checks if the \p Body is the \a CompoundStmt and returns its child statement
-/// iff there is only one that is not evaluatable at the compile time.
-static const Stmt *getSingleCompoundChild(ASTContext &Ctx, const Stmt *Body) {
- if (const auto *C = dyn_cast<CompoundStmt>(Body)) {
- const Stmt *Child = nullptr;
- for (const Stmt *S : C->body()) {
- if (const auto *E = dyn_cast<Expr>(S)) {
- if (isTrivial(Ctx, E))
- continue;
- }
- // Some of the statements can be ignored.
- if (isa<AsmStmt>(S) || isa<NullStmt>(S) || isa<OMPFlushDirective>(S) ||
- isa<OMPBarrierDirective>(S) || isa<OMPTaskyieldDirective>(S))
- continue;
- // Analyze declarations.
- if (const auto *DS = dyn_cast<DeclStmt>(S)) {
- if (llvm::all_of(DS->decls(), [&Ctx](const Decl *D) {
- if (isa<EmptyDecl>(D) || isa<DeclContext>(D) ||
- isa<TypeDecl>(D) || isa<PragmaCommentDecl>(D) ||
- isa<PragmaDetectMismatchDecl>(D) || isa<UsingDecl>(D) ||
- isa<UsingDirectiveDecl>(D) ||
- isa<OMPDeclareReductionDecl>(D) ||
- isa<OMPThreadPrivateDecl>(D))
- return true;
- const auto *VD = dyn_cast<VarDecl>(D);
- if (!VD)
- return false;
- return VD->isConstexpr() ||
- ((VD->getType().isTrivialType(Ctx) ||
- VD->getType()->isReferenceType()) &&
- (!VD->hasInit() || isTrivial(Ctx, VD->getInit())));
- }))
- continue;
- }
- // Found multiple children - cannot get the one child only.
- if (Child)
- return Body;
- Child = S;
- }
- if (Child)
- return Child;
- }
- return Body;
-}
-
/// Check for inner distribute directive.
static const OMPExecutableDirective *
getNestedDistributeDirective(ASTContext &Ctx, const OMPExecutableDirective &D) {
const auto *CS = D.getInnermostCapturedStmt();
const auto *Body =
CS->getCapturedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true);
- const Stmt *ChildStmt = getSingleCompoundChild(Ctx, Body);
+ const Stmt *ChildStmt =
+ CGOpenMPSIMDRuntime::getSingleCompoundChild(Ctx, Body);
- if (const auto *NestedDir = dyn_cast<OMPExecutableDirective>(ChildStmt)) {
+ if (const auto *NestedDir =
+ dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
OpenMPDirectiveKind DKind = NestedDir->getDirectiveKind();
switch (D.getDirectiveKind()) {
case OMPD_target:
@@ -8139,8 +8617,9 @@ getNestedDistributeDirective(ASTContext &Ctx, const OMPExecutableDirective &D) {
/*IgnoreCaptured=*/true);
if (!Body)
return nullptr;
- ChildStmt = getSingleCompoundChild(Ctx, Body);
- if (const auto *NND = dyn_cast<OMPExecutableDirective>(ChildStmt)) {
+ ChildStmt = CGOpenMPSIMDRuntime::getSingleCompoundChild(Ctx, Body);
+ if (const auto *NND =
+ dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
DKind = NND->getDirectiveKind();
if (isOpenMPDistributeDirective(DKind))
return NND;
@@ -8170,6 +8649,7 @@ getNestedDistributeDirective(ASTContext &Ctx, const OMPExecutableDirective &D) {
case OMPD_cancellation_point:
case OMPD_ordered:
case OMPD_threadprivate:
+ case OMPD_allocate:
case OMPD_task:
case OMPD_simd:
case OMPD_sections:
@@ -8200,6 +8680,7 @@ getNestedDistributeDirective(ASTContext &Ctx, const OMPExecutableDirective &D) {
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_declare_reduction:
+ case OMPD_declare_mapper:
case OMPD_taskloop:
case OMPD_taskloop_simd:
case OMPD_requires:
@@ -8244,7 +8725,7 @@ void CGOpenMPRuntime::emitTargetNumIterationsCall(
void CGOpenMPRuntime::emitTargetCall(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
- llvm::Value *OutlinedFn,
+ llvm::Function *OutlinedFn,
llvm::Value *OutlinedFnID,
const Expr *IfCond, const Expr *Device) {
if (!CGF.HaveInsertPoint())
@@ -8295,8 +8776,8 @@ void CGOpenMPRuntime::emitTargetCall(CodeGenFunction &CGF,
// Return value of the runtime offloading call.
llvm::Value *Return;
- llvm::Value *NumTeams = emitNumTeamsForTargetDirective(*this, CGF, D);
- llvm::Value *NumThreads = emitNumThreadsForTargetDirective(*this, CGF, D);
+ llvm::Value *NumTeams = emitNumTeamsForTargetDirective(CGF, D);
+ llvm::Value *NumThreads = emitNumThreadsForTargetDirective(CGF, D);
bool HasNowait = D.hasClausesOfKind<OMPNowaitClause>();
// The target region is an outlined function launched by the runtime
@@ -8420,10 +8901,12 @@ void CGOpenMPRuntime::emitTargetCall(CodeGenFunction &CGF,
if (CI->capturesVariableArrayType()) {
CurBasePointers.push_back(*CV);
CurPointers.push_back(*CV);
- CurSizes.push_back(CGF.getTypeSize(RI->getType()));
+ CurSizes.push_back(CGF.Builder.CreateIntCast(
+ CGF.getTypeSize(RI->getType()), CGF.Int64Ty, /*isSigned=*/true));
// Copy to the device as an argument. No need to retrieve it.
CurMapTypes.push_back(MappableExprsHandler::OMP_MAP_LITERAL |
- MappableExprsHandler::OMP_MAP_TARGET_PARAM);
+ MappableExprsHandler::OMP_MAP_TARGET_PARAM |
+ MappableExprsHandler::OMP_MAP_IMPLICIT);
} else {
// If we have any information in the map clause, we use it, otherwise we
// just do a default mapping.
@@ -8592,6 +9075,7 @@ void CGOpenMPRuntime::scanForTargetRegionsFunctions(const Stmt *S,
case OMPD_cancellation_point:
case OMPD_ordered:
case OMPD_threadprivate:
+ case OMPD_allocate:
case OMPD_task:
case OMPD_simd:
case OMPD_sections:
@@ -8622,6 +9106,7 @@ void CGOpenMPRuntime::scanForTargetRegionsFunctions(const Stmt *S,
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_declare_reduction:
+ case OMPD_declare_mapper:
case OMPD_taskloop:
case OMPD_taskloop_simd:
case OMPD_requires:
@@ -8691,13 +9176,49 @@ bool CGOpenMPRuntime::emitTargetGlobalVariable(GlobalDecl GD) {
llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(
cast<VarDecl>(GD.getDecl()));
- if (!Res || *Res == OMPDeclareTargetDeclAttr::MT_Link) {
+ if (!Res || *Res == OMPDeclareTargetDeclAttr::MT_Link ||
+ (*Res == OMPDeclareTargetDeclAttr::MT_To &&
+ HasRequiresUnifiedSharedMemory)) {
DeferredGlobalVariables.insert(cast<VarDecl>(GD.getDecl()));
return true;
}
return false;
}
+llvm::Constant *
+CGOpenMPRuntime::registerTargetFirstprivateCopy(CodeGenFunction &CGF,
+ const VarDecl *VD) {
+ assert(VD->getType().isConstant(CGM.getContext()) &&
+ "Expected constant variable.");
+ StringRef VarName;
+ llvm::Constant *Addr;
+ llvm::GlobalValue::LinkageTypes Linkage;
+ QualType Ty = VD->getType();
+ SmallString<128> Buffer;
+ {
+ unsigned DeviceID;
+ unsigned FileID;
+ unsigned Line;
+ getTargetEntryUniqueInfo(CGM.getContext(), VD->getLocation(), DeviceID,
+ FileID, Line);
+ llvm::raw_svector_ostream OS(Buffer);
+ OS << "__omp_offloading_firstprivate_" << llvm::format("_%x", DeviceID)
+ << llvm::format("_%x_", FileID) << VD->getName() << "_l" << Line;
+ VarName = OS.str();
+ }
+ Linkage = llvm::GlobalValue::InternalLinkage;
+ Addr =
+ getOrCreateInternalVariable(CGM.getTypes().ConvertTypeForMem(Ty), VarName,
+ getDefaultFirstprivateAddressSpace());
+ cast<llvm::GlobalValue>(Addr)->setLinkage(Linkage);
+ CharUnits VarSize = CGM.getContext().getTypeSizeInChars(Ty);
+ CGM.addCompilerUsedGlobal(cast<llvm::GlobalValue>(Addr));
+ OffloadEntriesInfoManager.registerDeviceGlobalVarEntryInfo(
+ VarName, Addr, VarSize,
+ OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryTo, Linkage);
+ return Addr;
+}
+
void CGOpenMPRuntime::registerTargetGlobalVariable(const VarDecl *VD,
llvm::Constant *Addr) {
llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
@@ -8716,8 +9237,9 @@ void CGOpenMPRuntime::registerTargetGlobalVariable(const VarDecl *VD,
StringRef VarName;
CharUnits VarSize;
llvm::GlobalValue::LinkageTypes Linkage;
- switch (*Res) {
- case OMPDeclareTargetDeclAttr::MT_To:
+
+ if (*Res == OMPDeclareTargetDeclAttr::MT_To &&
+ !HasRequiresUnifiedSharedMemory) {
Flags = OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryTo;
VarName = CGM.getMangledName(VD);
if (VD->hasDefinition(CGM.getContext()) != VarDecl::DeclarationOnly) {
@@ -8740,20 +9262,27 @@ void CGOpenMPRuntime::registerTargetGlobalVariable(const VarDecl *VD,
CGM.addCompilerUsedGlobal(GVAddrRef);
}
}
- break;
- case OMPDeclareTargetDeclAttr::MT_Link:
- Flags = OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryLink;
+ } else {
+ assert(((*Res == OMPDeclareTargetDeclAttr::MT_Link) ||
+ (*Res == OMPDeclareTargetDeclAttr::MT_To &&
+ HasRequiresUnifiedSharedMemory)) &&
+ "Declare target attribute must link or to with unified memory.");
+ if (*Res == OMPDeclareTargetDeclAttr::MT_Link)
+ Flags = OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryLink;
+ else
+ Flags = OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryTo;
+
if (CGM.getLangOpts().OpenMPIsDevice) {
VarName = Addr->getName();
Addr = nullptr;
} else {
- VarName = getAddrOfDeclareTargetLink(VD).getName();
- Addr = cast<llvm::Constant>(getAddrOfDeclareTargetLink(VD).getPointer());
+ VarName = getAddrOfDeclareTargetVar(VD).getName();
+ Addr = cast<llvm::Constant>(getAddrOfDeclareTargetVar(VD).getPointer());
}
VarSize = CGM.getPointerSize();
Linkage = llvm::GlobalValue::WeakAnyLinkage;
- break;
}
+
OffloadEntriesInfoManager.registerDeviceGlobalVarEntryInfo(
VarName, Addr, VarSize, Flags, Linkage);
}
@@ -8772,12 +9301,15 @@ void CGOpenMPRuntime::emitDeferredTargetDecls() const {
OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
if (!Res)
continue;
- if (*Res == OMPDeclareTargetDeclAttr::MT_To) {
+ if (*Res == OMPDeclareTargetDeclAttr::MT_To &&
+ !HasRequiresUnifiedSharedMemory) {
CGM.EmitGlobal(VD);
} else {
- assert(*Res == OMPDeclareTargetDeclAttr::MT_Link &&
- "Expected to or link clauses.");
- (void)CGM.getOpenMPRuntime().getAddrOfDeclareTargetLink(VD);
+ assert((*Res == OMPDeclareTargetDeclAttr::MT_Link ||
+ (*Res == OMPDeclareTargetDeclAttr::MT_To &&
+ HasRequiresUnifiedSharedMemory)) &&
+ "Expected link clause or to clause with unified memory.");
+ (void)CGM.getOpenMPRuntime().getAddrOfDeclareTargetVar(VD);
}
}
}
@@ -8788,6 +9320,44 @@ void CGOpenMPRuntime::adjustTargetSpecificDataForLambdas(
" Expected target-based directive.");
}
+void CGOpenMPRuntime::checkArchForUnifiedAddressing(
+ const OMPRequiresDecl *D) {
+ for (const OMPClause *Clause : D->clauselists()) {
+ if (Clause->getClauseKind() == OMPC_unified_shared_memory) {
+ HasRequiresUnifiedSharedMemory = true;
+ break;
+ }
+ }
+}
+
+bool CGOpenMPRuntime::hasAllocateAttributeForGlobalVar(const VarDecl *VD,
+ LangAS &AS) {
+ if (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())
+ return false;
+ const auto *A = VD->getAttr<OMPAllocateDeclAttr>();
+ switch(A->getAllocatorType()) {
+ case OMPAllocateDeclAttr::OMPDefaultMemAlloc:
+ // Not supported, fallback to the default mem space.
+ case OMPAllocateDeclAttr::OMPLargeCapMemAlloc:
+ case OMPAllocateDeclAttr::OMPCGroupMemAlloc:
+ case OMPAllocateDeclAttr::OMPHighBWMemAlloc:
+ case OMPAllocateDeclAttr::OMPLowLatMemAlloc:
+ case OMPAllocateDeclAttr::OMPThreadMemAlloc:
+ case OMPAllocateDeclAttr::OMPConstMemAlloc:
+ case OMPAllocateDeclAttr::OMPPTeamMemAlloc:
+ AS = LangAS::Default;
+ return true;
+ case OMPAllocateDeclAttr::OMPUserDefinedMemAlloc:
+ llvm_unreachable("Expected predefined allocator for the variables with the "
+ "static storage.");
+ }
+ return false;
+}
+
+bool CGOpenMPRuntime::hasRequiresUnifiedSharedMemory() const {
+ return HasRequiresUnifiedSharedMemory;
+}
+
CGOpenMPRuntime::DisableAutoDeclareTargetRAII::DisableAutoDeclareTargetRAII(
CodeGenModule &CGM)
: CGM(CGM) {
@@ -8822,6 +9392,47 @@ bool CGOpenMPRuntime::markAsGlobalTarget(GlobalDecl GD) {
return !AlreadyEmittedTargetFunctions.insert(Name).second;
}
+llvm::Function *CGOpenMPRuntime::emitRequiresDirectiveRegFun() {
+ // If we don't have entries or if we are emitting code for the device, we
+ // don't need to do anything.
+ if (CGM.getLangOpts().OMPTargetTriples.empty() ||
+ CGM.getLangOpts().OpenMPSimd || CGM.getLangOpts().OpenMPIsDevice ||
+ (OffloadEntriesInfoManager.empty() &&
+ !HasEmittedDeclareTargetRegion &&
+ !HasEmittedTargetRegion))
+ return nullptr;
+
+ // Create and register the function that handles the requires directives.
+ ASTContext &C = CGM.getContext();
+
+ llvm::Function *RequiresRegFn;
+ {
+ CodeGenFunction CGF(CGM);
+ const auto &FI = CGM.getTypes().arrangeNullaryFunction();
+ llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
+ std::string ReqName = getName({"omp_offloading", "requires_reg"});
+ RequiresRegFn = CGM.CreateGlobalInitOrDestructFunction(FTy, ReqName, FI);
+ CGF.StartFunction(GlobalDecl(), C.VoidTy, RequiresRegFn, FI, {});
+ OpenMPOffloadingRequiresDirFlags Flags = OMP_REQ_NONE;
+ // TODO: check for other requires clauses.
+ // The requires directive takes effect only when a target region is
+ // present in the compilation unit. Otherwise it is ignored and not
+ // passed to the runtime. This avoids the runtime from throwing an error
+ // for mismatching requires clauses across compilation units that don't
+ // contain at least 1 target region.
+ assert((HasEmittedTargetRegion ||
+ HasEmittedDeclareTargetRegion ||
+ !OffloadEntriesInfoManager.empty()) &&
+ "Target or declare target region expected.");
+ if (HasRequiresUnifiedSharedMemory)
+ Flags = OMP_REQ_UNIFIED_SHARED_MEMORY;
+ CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__tgt_register_requires),
+ llvm::ConstantInt::get(CGM.Int64Ty, Flags));
+ CGF.FinishFunction();
+ }
+ return RequiresRegFn;
+}
+
llvm::Function *CGOpenMPRuntime::emitRegistrationFunction() {
// If we have offloading in the current module, we need to emit the entries
// now and register the offloading descriptor.
@@ -8836,7 +9447,7 @@ llvm::Function *CGOpenMPRuntime::emitRegistrationFunction() {
void CGOpenMPRuntime::emitTeamsCall(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
SourceLocation Loc,
- llvm::Value *OutlinedFn,
+ llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars) {
if (!CGF.HaveInsertPoint())
return;
@@ -8853,7 +9464,7 @@ void CGOpenMPRuntime::emitTeamsCall(CodeGenFunction &CGF,
RealArgs.append(std::begin(Args), std::end(Args));
RealArgs.append(CapturedVars.begin(), CapturedVars.end());
- llvm::Value *RTLFn = createRuntimeFunction(OMPRTL__kmpc_fork_teams);
+ llvm::FunctionCallee RTLFn = createRuntimeFunction(OMPRTL__kmpc_fork_teams);
CGF.EmitRuntimeCall(RTLFn, RealArgs);
}
@@ -9075,6 +9686,7 @@ void CGOpenMPRuntime::emitTargetDataStandAloneCall(
case OMPD_cancellation_point:
case OMPD_ordered:
case OMPD_threadprivate:
+ case OMPD_allocate:
case OMPD_task:
case OMPD_simd:
case OMPD_sections:
@@ -9102,6 +9714,7 @@ void CGOpenMPRuntime::emitTargetDataStandAloneCall(
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_declare_reduction:
+ case OMPD_declare_mapper:
case OMPD_taskloop:
case OMPD_taskloop_simd:
case OMPD_target:
@@ -9268,8 +9881,9 @@ emitX86DeclareSimdFunction(const FunctionDecl *FD, llvm::Function *Fn,
llvm::raw_svector_ostream Out(Buffer);
Out << "_ZGV" << Data.ISA << Mask;
if (!VLENVal) {
- Out << llvm::APSInt::getUnsigned(Data.VecRegSize /
- evaluateCDTSize(FD, ParamAttrs));
+ unsigned NumElts = evaluateCDTSize(FD, ParamAttrs);
+ assert(NumElts && "Non-zero simdlen/cdtsize expected");
+ Out << llvm::APSInt::getUnsigned(Data.VecRegSize / NumElts);
} else {
Out << VLENVal;
}
@@ -9299,6 +9913,307 @@ emitX86DeclareSimdFunction(const FunctionDecl *FD, llvm::Function *Fn,
}
}
+// This are the Functions that are needed to mangle the name of the
+// vector functions generated by the compiler, according to the rules
+// defined in the "Vector Function ABI specifications for AArch64",
+// available at
+// https://developer.arm.com/products/software-development-tools/hpc/arm-compiler-for-hpc/vector-function-abi.
+
+/// Maps To Vector (MTV), as defined in 3.1.1 of the AAVFABI.
+///
+/// TODO: Need to implement the behavior for reference marked with a
+/// var or no linear modifiers (1.b in the section). For this, we
+/// need to extend ParamKindTy to support the linear modifiers.
+static bool getAArch64MTV(QualType QT, ParamKindTy Kind) {
+ QT = QT.getCanonicalType();
+
+ if (QT->isVoidType())
+ return false;
+
+ if (Kind == ParamKindTy::Uniform)
+ return false;
+
+ if (Kind == ParamKindTy::Linear)
+ return false;
+
+ // TODO: Handle linear references with modifiers
+
+ if (Kind == ParamKindTy::LinearWithVarStride)
+ return false;
+
+ return true;
+}
+
+/// Pass By Value (PBV), as defined in 3.1.2 of the AAVFABI.
+static bool getAArch64PBV(QualType QT, ASTContext &C) {
+ QT = QT.getCanonicalType();
+ unsigned Size = C.getTypeSize(QT);
+
+ // Only scalars and complex within 16 bytes wide set PVB to true.
+ if (Size != 8 && Size != 16 && Size != 32 && Size != 64 && Size != 128)
+ return false;
+
+ if (QT->isFloatingType())
+ return true;
+
+ if (QT->isIntegerType())
+ return true;
+
+ if (QT->isPointerType())
+ return true;
+
+ // TODO: Add support for complex types (section 3.1.2, item 2).
+
+ return false;
+}
+
+/// Computes the lane size (LS) of a return type or of an input parameter,
+/// as defined by `LS(P)` in 3.2.1 of the AAVFABI.
+/// TODO: Add support for references, section 3.2.1, item 1.
+static unsigned getAArch64LS(QualType QT, ParamKindTy Kind, ASTContext &C) {
+ if (getAArch64MTV(QT, Kind) && QT.getCanonicalType()->isPointerType()) {
+ QualType PTy = QT.getCanonicalType()->getPointeeType();
+ if (getAArch64PBV(PTy, C))
+ return C.getTypeSize(PTy);
+ }
+ if (getAArch64PBV(QT, C))
+ return C.getTypeSize(QT);
+
+ return C.getTypeSize(C.getUIntPtrType());
+}
+
+// Get Narrowest Data Size (NDS) and Widest Data Size (WDS) from the
+// signature of the scalar function, as defined in 3.2.2 of the
+// AAVFABI.
+static std::tuple<unsigned, unsigned, bool>
+getNDSWDS(const FunctionDecl *FD, ArrayRef<ParamAttrTy> ParamAttrs) {
+ QualType RetType = FD->getReturnType().getCanonicalType();
+
+ ASTContext &C = FD->getASTContext();
+
+ bool OutputBecomesInput = false;
+
+ llvm::SmallVector<unsigned, 8> Sizes;
+ if (!RetType->isVoidType()) {
+ Sizes.push_back(getAArch64LS(RetType, ParamKindTy::Vector, C));
+ if (!getAArch64PBV(RetType, C) && getAArch64MTV(RetType, {}))
+ OutputBecomesInput = true;
+ }
+ for (unsigned I = 0, E = FD->getNumParams(); I < E; ++I) {
+ QualType QT = FD->getParamDecl(I)->getType().getCanonicalType();
+ Sizes.push_back(getAArch64LS(QT, ParamAttrs[I].Kind, C));
+ }
+
+ assert(!Sizes.empty() && "Unable to determine NDS and WDS.");
+ // The LS of a function parameter / return value can only be a power
+ // of 2, starting from 8 bits, up to 128.
+ assert(std::all_of(Sizes.begin(), Sizes.end(),
+ [](unsigned Size) {
+ return Size == 8 || Size == 16 || Size == 32 ||
+ Size == 64 || Size == 128;
+ }) &&
+ "Invalid size");
+
+ return std::make_tuple(*std::min_element(std::begin(Sizes), std::end(Sizes)),
+ *std::max_element(std::begin(Sizes), std::end(Sizes)),
+ OutputBecomesInput);
+}
+
+/// Mangle the parameter part of the vector function name according to
+/// their OpenMP classification. The mangling function is defined in
+/// section 3.5 of the AAVFABI.
+static std::string mangleVectorParameters(ArrayRef<ParamAttrTy> ParamAttrs) {
+ SmallString<256> Buffer;
+ llvm::raw_svector_ostream Out(Buffer);
+ for (const auto &ParamAttr : ParamAttrs) {
+ switch (ParamAttr.Kind) {
+ case LinearWithVarStride:
+ Out << "ls" << ParamAttr.StrideOrArg;
+ break;
+ case Linear:
+ Out << 'l';
+ // Don't print the step value if it is not present or if it is
+ // equal to 1.
+ if (!!ParamAttr.StrideOrArg && ParamAttr.StrideOrArg != 1)
+ Out << ParamAttr.StrideOrArg;
+ break;
+ case Uniform:
+ Out << 'u';
+ break;
+ case Vector:
+ Out << 'v';
+ break;
+ }
+
+ if (!!ParamAttr.Alignment)
+ Out << 'a' << ParamAttr.Alignment;
+ }
+
+ return Out.str();
+}
+
+// Function used to add the attribute. The parameter `VLEN` is
+// templated to allow the use of "x" when targeting scalable functions
+// for SVE.
+template <typename T>
+static void addAArch64VectorName(T VLEN, StringRef LMask, StringRef Prefix,
+ char ISA, StringRef ParSeq,
+ StringRef MangledName, bool OutputBecomesInput,
+ llvm::Function *Fn) {
+ SmallString<256> Buffer;
+ llvm::raw_svector_ostream Out(Buffer);
+ Out << Prefix << ISA << LMask << VLEN;
+ if (OutputBecomesInput)
+ Out << "v";
+ Out << ParSeq << "_" << MangledName;
+ Fn->addFnAttr(Out.str());
+}
+
+// Helper function to generate the Advanced SIMD names depending on
+// the value of the NDS when simdlen is not present.
+static void addAArch64AdvSIMDNDSNames(unsigned NDS, StringRef Mask,
+ StringRef Prefix, char ISA,
+ StringRef ParSeq, StringRef MangledName,
+ bool OutputBecomesInput,
+ llvm::Function *Fn) {
+ switch (NDS) {
+ case 8:
+ addAArch64VectorName(8, Mask, Prefix, ISA, ParSeq, MangledName,
+ OutputBecomesInput, Fn);
+ addAArch64VectorName(16, Mask, Prefix, ISA, ParSeq, MangledName,
+ OutputBecomesInput, Fn);
+ break;
+ case 16:
+ addAArch64VectorName(4, Mask, Prefix, ISA, ParSeq, MangledName,
+ OutputBecomesInput, Fn);
+ addAArch64VectorName(8, Mask, Prefix, ISA, ParSeq, MangledName,
+ OutputBecomesInput, Fn);
+ break;
+ case 32:
+ addAArch64VectorName(2, Mask, Prefix, ISA, ParSeq, MangledName,
+ OutputBecomesInput, Fn);
+ addAArch64VectorName(4, Mask, Prefix, ISA, ParSeq, MangledName,
+ OutputBecomesInput, Fn);
+ break;
+ case 64:
+ case 128:
+ addAArch64VectorName(2, Mask, Prefix, ISA, ParSeq, MangledName,
+ OutputBecomesInput, Fn);
+ break;
+ default:
+ llvm_unreachable("Scalar type is too wide.");
+ }
+}
+
+/// Emit vector function attributes for AArch64, as defined in the AAVFABI.
+static void emitAArch64DeclareSimdFunction(
+ CodeGenModule &CGM, const FunctionDecl *FD, unsigned UserVLEN,
+ ArrayRef<ParamAttrTy> ParamAttrs,
+ OMPDeclareSimdDeclAttr::BranchStateTy State, StringRef MangledName,
+ char ISA, unsigned VecRegSize, llvm::Function *Fn, SourceLocation SLoc) {
+
+ // Get basic data for building the vector signature.
+ const auto Data = getNDSWDS(FD, ParamAttrs);
+ const unsigned NDS = std::get<0>(Data);
+ const unsigned WDS = std::get<1>(Data);
+ const bool OutputBecomesInput = std::get<2>(Data);
+
+ // Check the values provided via `simdlen` by the user.
+ // 1. A `simdlen(1)` doesn't produce vector signatures,
+ if (UserVLEN == 1) {
+ unsigned DiagID = CGM.getDiags().getCustomDiagID(
+ DiagnosticsEngine::Warning,
+ "The clause simdlen(1) has no effect when targeting aarch64.");
+ CGM.getDiags().Report(SLoc, DiagID);
+ return;
+ }
+
+ // 2. Section 3.3.1, item 1: user input must be a power of 2 for
+ // Advanced SIMD output.
+ if (ISA == 'n' && UserVLEN && !llvm::isPowerOf2_32(UserVLEN)) {
+ unsigned DiagID = CGM.getDiags().getCustomDiagID(
+ DiagnosticsEngine::Warning, "The value specified in simdlen must be a "
+ "power of 2 when targeting Advanced SIMD.");
+ CGM.getDiags().Report(SLoc, DiagID);
+ return;
+ }
+
+ // 3. Section 3.4.1. SVE fixed lengh must obey the architectural
+ // limits.
+ if (ISA == 's' && UserVLEN != 0) {
+ if ((UserVLEN * WDS > 2048) || (UserVLEN * WDS % 128 != 0)) {
+ unsigned DiagID = CGM.getDiags().getCustomDiagID(
+ DiagnosticsEngine::Warning, "The clause simdlen must fit the %0-bit "
+ "lanes in the architectural constraints "
+ "for SVE (min is 128-bit, max is "
+ "2048-bit, by steps of 128-bit)");
+ CGM.getDiags().Report(SLoc, DiagID) << WDS;
+ return;
+ }
+ }
+
+ // Sort out parameter sequence.
+ const std::string ParSeq = mangleVectorParameters(ParamAttrs);
+ StringRef Prefix = "_ZGV";
+ // Generate simdlen from user input (if any).
+ if (UserVLEN) {
+ if (ISA == 's') {
+ // SVE generates only a masked function.
+ addAArch64VectorName(UserVLEN, "M", Prefix, ISA, ParSeq, MangledName,
+ OutputBecomesInput, Fn);
+ } else {
+ assert(ISA == 'n' && "Expected ISA either 's' or 'n'.");
+ // Advanced SIMD generates one or two functions, depending on
+ // the `[not]inbranch` clause.
+ switch (State) {
+ case OMPDeclareSimdDeclAttr::BS_Undefined:
+ addAArch64VectorName(UserVLEN, "N", Prefix, ISA, ParSeq, MangledName,
+ OutputBecomesInput, Fn);
+ addAArch64VectorName(UserVLEN, "M", Prefix, ISA, ParSeq, MangledName,
+ OutputBecomesInput, Fn);
+ break;
+ case OMPDeclareSimdDeclAttr::BS_Notinbranch:
+ addAArch64VectorName(UserVLEN, "N", Prefix, ISA, ParSeq, MangledName,
+ OutputBecomesInput, Fn);
+ break;
+ case OMPDeclareSimdDeclAttr::BS_Inbranch:
+ addAArch64VectorName(UserVLEN, "M", Prefix, ISA, ParSeq, MangledName,
+ OutputBecomesInput, Fn);
+ break;
+ }
+ }
+ } else {
+ // If no user simdlen is provided, follow the AAVFABI rules for
+ // generating the vector length.
+ if (ISA == 's') {
+ // SVE, section 3.4.1, item 1.
+ addAArch64VectorName("x", "M", Prefix, ISA, ParSeq, MangledName,
+ OutputBecomesInput, Fn);
+ } else {
+ assert(ISA == 'n' && "Expected ISA either 's' or 'n'.");
+ // Advanced SIMD, Section 3.3.1 of the AAVFABI, generates one or
+ // two vector names depending on the use of the clause
+ // `[not]inbranch`.
+ switch (State) {
+ case OMPDeclareSimdDeclAttr::BS_Undefined:
+ addAArch64AdvSIMDNDSNames(NDS, "N", Prefix, ISA, ParSeq, MangledName,
+ OutputBecomesInput, Fn);
+ addAArch64AdvSIMDNDSNames(NDS, "M", Prefix, ISA, ParSeq, MangledName,
+ OutputBecomesInput, Fn);
+ break;
+ case OMPDeclareSimdDeclAttr::BS_Notinbranch:
+ addAArch64AdvSIMDNDSNames(NDS, "N", Prefix, ISA, ParSeq, MangledName,
+ OutputBecomesInput, Fn);
+ break;
+ case OMPDeclareSimdDeclAttr::BS_Inbranch:
+ addAArch64AdvSIMDNDSNames(NDS, "M", Prefix, ISA, ParSeq, MangledName,
+ OutputBecomesInput, Fn);
+ break;
+ }
+ }
+ }
+}
+
void CGOpenMPRuntime::emitDeclareSimdFunction(const FunctionDecl *FD,
llvm::Function *Fn) {
ASTContext &C = CGM.getContext();
@@ -9385,12 +10300,26 @@ void CGOpenMPRuntime::emitDeclareSimdFunction(const FunctionDecl *FD,
++MI;
}
llvm::APSInt VLENVal;
- if (const Expr *VLEN = Attr->getSimdlen())
- VLENVal = VLEN->EvaluateKnownConstInt(C);
+ SourceLocation ExprLoc;
+ const Expr *VLENExpr = Attr->getSimdlen();
+ if (VLENExpr) {
+ VLENVal = VLENExpr->EvaluateKnownConstInt(C);
+ ExprLoc = VLENExpr->getExprLoc();
+ }
OMPDeclareSimdDeclAttr::BranchStateTy State = Attr->getBranchState();
if (CGM.getTriple().getArch() == llvm::Triple::x86 ||
- CGM.getTriple().getArch() == llvm::Triple::x86_64)
+ CGM.getTriple().getArch() == llvm::Triple::x86_64) {
emitX86DeclareSimdFunction(FD, Fn, VLENVal, ParamAttrs, State);
+ } else if (CGM.getTriple().getArch() == llvm::Triple::aarch64) {
+ unsigned VLEN = VLENVal.getExtValue();
+ StringRef MangledName = Fn->getName();
+ if (CGM.getTarget().hasFeature("sve"))
+ emitAArch64DeclareSimdFunction(CGM, FD, VLEN, ParamAttrs, State,
+ MangledName, 's', 128, Fn, ExprLoc);
+ if (CGM.getTarget().hasFeature("neon"))
+ emitAArch64DeclareSimdFunction(CGM, FD, VLEN, ParamAttrs, State,
+ MangledName, 'n', 128, Fn, ExprLoc);
+ }
}
FD = FD->getPreviousDecl();
}
@@ -9403,11 +10332,12 @@ public:
static const int DoacrossFinArgs = 2;
private:
- llvm::Value *RTLFn;
+ llvm::FunctionCallee RTLFn;
llvm::Value *Args[DoacrossFinArgs];
public:
- DoacrossCleanupTy(llvm::Value *RTLFn, ArrayRef<llvm::Value *> CallArgs)
+ DoacrossCleanupTy(llvm::FunctionCallee RTLFn,
+ ArrayRef<llvm::Value *> CallArgs)
: RTLFn(RTLFn) {
assert(CallArgs.size() == DoacrossFinArgs);
std::copy(CallArgs.begin(), CallArgs.end(), std::begin(Args));
@@ -9454,10 +10384,8 @@ void CGOpenMPRuntime::emitDoacrossInit(CodeGenFunction &CGF,
enum { LowerFD = 0, UpperFD, StrideFD };
// Fill dims with data.
for (unsigned I = 0, E = NumIterations.size(); I < E; ++I) {
- LValue DimsLVal =
- CGF.MakeAddrLValue(CGF.Builder.CreateConstArrayGEP(
- DimsAddr, I, C.getTypeSizeInChars(KmpDimTy)),
- KmpDimTy);
+ LValue DimsLVal = CGF.MakeAddrLValue(
+ CGF.Builder.CreateConstArrayGEP(DimsAddr, I), KmpDimTy);
// dims.upper = num_iterations;
LValue UpperLVal = CGF.EmitLValueForField(
DimsLVal, *std::next(RD->field_begin(), UpperFD));
@@ -9480,16 +10408,16 @@ void CGOpenMPRuntime::emitDoacrossInit(CodeGenFunction &CGF,
getThreadID(CGF, D.getBeginLoc()),
llvm::ConstantInt::getSigned(CGM.Int32Ty, NumIterations.size()),
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- CGF.Builder
- .CreateConstArrayGEP(DimsAddr, 0, C.getTypeSizeInChars(KmpDimTy))
- .getPointer(),
+ CGF.Builder.CreateConstArrayGEP(DimsAddr, 0).getPointer(),
CGM.VoidPtrTy)};
- llvm::Value *RTLFn = createRuntimeFunction(OMPRTL__kmpc_doacross_init);
+ llvm::FunctionCallee RTLFn =
+ createRuntimeFunction(OMPRTL__kmpc_doacross_init);
CGF.EmitRuntimeCall(RTLFn, Args);
llvm::Value *FiniArgs[DoacrossCleanupTy::DoacrossFinArgs] = {
emitUpdateLocation(CGF, D.getEndLoc()), getThreadID(CGF, D.getEndLoc())};
- llvm::Value *FiniRTLFn = createRuntimeFunction(OMPRTL__kmpc_doacross_fini);
+ llvm::FunctionCallee FiniRTLFn =
+ createRuntimeFunction(OMPRTL__kmpc_doacross_fini);
CGF.EHStack.pushCleanup<DoacrossCleanupTy>(NormalAndEHCleanup, FiniRTLFn,
llvm::makeArrayRef(FiniArgs));
}
@@ -9508,20 +10436,14 @@ void CGOpenMPRuntime::emitDoacrossOrdered(CodeGenFunction &CGF,
llvm::Value *CntVal = CGF.EmitScalarConversion(
CGF.EmitScalarExpr(CounterVal), CounterVal->getType(), Int64Ty,
CounterVal->getExprLoc());
- CGF.EmitStoreOfScalar(
- CntVal,
- CGF.Builder.CreateConstArrayGEP(
- CntAddr, I, CGM.getContext().getTypeSizeInChars(Int64Ty)),
- /*Volatile=*/false, Int64Ty);
+ CGF.EmitStoreOfScalar(CntVal, CGF.Builder.CreateConstArrayGEP(CntAddr, I),
+ /*Volatile=*/false, Int64Ty);
}
llvm::Value *Args[] = {
emitUpdateLocation(CGF, C->getBeginLoc()),
getThreadID(CGF, C->getBeginLoc()),
- CGF.Builder
- .CreateConstArrayGEP(CntAddr, 0,
- CGM.getContext().getTypeSizeInChars(Int64Ty))
- .getPointer()};
- llvm::Value *RTLFn;
+ CGF.Builder.CreateConstArrayGEP(CntAddr, 0).getPointer()};
+ llvm::FunctionCallee RTLFn;
if (C->getDependencyKind() == OMPC_DEPEND_source) {
RTLFn = createRuntimeFunction(OMPRTL__kmpc_doacross_post);
} else {
@@ -9532,12 +10454,12 @@ void CGOpenMPRuntime::emitDoacrossOrdered(CodeGenFunction &CGF,
}
void CGOpenMPRuntime::emitCall(CodeGenFunction &CGF, SourceLocation Loc,
- llvm::Value *Callee,
+ llvm::FunctionCallee Callee,
ArrayRef<llvm::Value *> Args) const {
assert(Loc.isValid() && "Outlined function call location must be valid.");
auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF, Loc);
- if (auto *Fn = dyn_cast<llvm::Function>(Callee)) {
+ if (auto *Fn = dyn_cast<llvm::Function>(Callee.getCallee())) {
if (Fn->doesNotThrow()) {
CGF.EmitNounwindRuntimeCall(Fn, Args);
return;
@@ -9547,35 +10469,116 @@ void CGOpenMPRuntime::emitCall(CodeGenFunction &CGF, SourceLocation Loc,
}
void CGOpenMPRuntime::emitOutlinedFunctionCall(
- CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *OutlinedFn,
+ CodeGenFunction &CGF, SourceLocation Loc, llvm::FunctionCallee OutlinedFn,
ArrayRef<llvm::Value *> Args) const {
emitCall(CGF, Loc, OutlinedFn, Args);
}
+void CGOpenMPRuntime::emitFunctionProlog(CodeGenFunction &CGF, const Decl *D) {
+ if (const auto *FD = dyn_cast<FunctionDecl>(D))
+ if (OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(FD))
+ HasEmittedDeclareTargetRegion = true;
+}
+
Address CGOpenMPRuntime::getParameterAddress(CodeGenFunction &CGF,
const VarDecl *NativeParam,
const VarDecl *TargetParam) const {
return CGF.GetAddrOfLocalVar(NativeParam);
}
+namespace {
+/// Cleanup action for allocate support.
+class OMPAllocateCleanupTy final : public EHScopeStack::Cleanup {
+public:
+ static const int CleanupArgs = 3;
+
+private:
+ llvm::FunctionCallee RTLFn;
+ llvm::Value *Args[CleanupArgs];
+
+public:
+ OMPAllocateCleanupTy(llvm::FunctionCallee RTLFn,
+ ArrayRef<llvm::Value *> CallArgs)
+ : RTLFn(RTLFn) {
+ assert(CallArgs.size() == CleanupArgs &&
+ "Size of arguments does not match.");
+ std::copy(CallArgs.begin(), CallArgs.end(), std::begin(Args));
+ }
+ void Emit(CodeGenFunction &CGF, Flags /*flags*/) override {
+ if (!CGF.HaveInsertPoint())
+ return;
+ CGF.EmitRuntimeCall(RTLFn, Args);
+ }
+};
+} // namespace
+
Address CGOpenMPRuntime::getAddressOfLocalVariable(CodeGenFunction &CGF,
const VarDecl *VD) {
- return Address::invalid();
-}
-
-llvm::Value *CGOpenMPSIMDRuntime::emitParallelOutlinedFunction(
+ if (!VD)
+ return Address::invalid();
+ const VarDecl *CVD = VD->getCanonicalDecl();
+ if (!CVD->hasAttr<OMPAllocateDeclAttr>())
+ return Address::invalid();
+ const auto *AA = CVD->getAttr<OMPAllocateDeclAttr>();
+ // Use the default allocation.
+ if (AA->getAllocatorType() == OMPAllocateDeclAttr::OMPDefaultMemAlloc &&
+ !AA->getAllocator())
+ return Address::invalid();
+ llvm::Value *Size;
+ CharUnits Align = CGM.getContext().getDeclAlign(CVD);
+ if (CVD->getType()->isVariablyModifiedType()) {
+ Size = CGF.getTypeSize(CVD->getType());
+ // Align the size: ((size + align - 1) / align) * align
+ Size = CGF.Builder.CreateNUWAdd(
+ Size, CGM.getSize(Align - CharUnits::fromQuantity(1)));
+ Size = CGF.Builder.CreateUDiv(Size, CGM.getSize(Align));
+ Size = CGF.Builder.CreateNUWMul(Size, CGM.getSize(Align));
+ } else {
+ CharUnits Sz = CGM.getContext().getTypeSizeInChars(CVD->getType());
+ Size = CGM.getSize(Sz.alignTo(Align));
+ }
+ llvm::Value *ThreadID = getThreadID(CGF, CVD->getBeginLoc());
+ assert(AA->getAllocator() &&
+ "Expected allocator expression for non-default allocator.");
+ llvm::Value *Allocator = CGF.EmitScalarExpr(AA->getAllocator());
+ // According to the standard, the original allocator type is a enum (integer).
+ // Convert to pointer type, if required.
+ if (Allocator->getType()->isIntegerTy())
+ Allocator = CGF.Builder.CreateIntToPtr(Allocator, CGM.VoidPtrTy);
+ else if (Allocator->getType()->isPointerTy())
+ Allocator = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Allocator,
+ CGM.VoidPtrTy);
+ llvm::Value *Args[] = {ThreadID, Size, Allocator};
+
+ llvm::Value *Addr =
+ CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_alloc), Args,
+ CVD->getName() + ".void.addr");
+ llvm::Value *FiniArgs[OMPAllocateCleanupTy::CleanupArgs] = {ThreadID, Addr,
+ Allocator};
+ llvm::FunctionCallee FiniRTLFn = createRuntimeFunction(OMPRTL__kmpc_free);
+
+ CGF.EHStack.pushCleanup<OMPAllocateCleanupTy>(NormalAndEHCleanup, FiniRTLFn,
+ llvm::makeArrayRef(FiniArgs));
+ Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ Addr,
+ CGF.ConvertTypeForMem(CGM.getContext().getPointerType(CVD->getType())),
+ CVD->getName() + ".addr");
+ return Address(Addr, Align);
+}
+
+llvm::Function *CGOpenMPSIMDRuntime::emitParallelOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
llvm_unreachable("Not supported in SIMD-only mode");
}
-llvm::Value *CGOpenMPSIMDRuntime::emitTeamsOutlinedFunction(
+llvm::Function *CGOpenMPSIMDRuntime::emitTeamsOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
llvm_unreachable("Not supported in SIMD-only mode");
}
-llvm::Value *CGOpenMPSIMDRuntime::emitTaskOutlinedFunction(
+llvm::Function *CGOpenMPSIMDRuntime::emitTaskOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
const VarDecl *PartIDVar, const VarDecl *TaskTVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
@@ -9585,7 +10588,7 @@ llvm::Value *CGOpenMPSIMDRuntime::emitTaskOutlinedFunction(
void CGOpenMPSIMDRuntime::emitParallelCall(CodeGenFunction &CGF,
SourceLocation Loc,
- llvm::Value *OutlinedFn,
+ llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars,
const Expr *IfCond) {
llvm_unreachable("Not supported in SIMD-only mode");
@@ -9716,7 +10719,7 @@ void CGOpenMPSIMDRuntime::emitFlush(CodeGenFunction &CGF,
void CGOpenMPSIMDRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
const OMPExecutableDirective &D,
- llvm::Value *TaskFunction,
+ llvm::Function *TaskFunction,
QualType SharedsTy, Address Shareds,
const Expr *IfCond,
const OMPTaskDataTy &Data) {
@@ -9725,7 +10728,7 @@ void CGOpenMPSIMDRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
void CGOpenMPSIMDRuntime::emitTaskLoopCall(
CodeGenFunction &CGF, SourceLocation Loc, const OMPLoopDirective &D,
- llvm::Value *TaskFunction, QualType SharedsTy, Address Shareds,
+ llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds,
const Expr *IfCond, const OMPTaskDataTy &Data) {
llvm_unreachable("Not supported in SIMD-only mode");
}
@@ -9785,9 +10788,10 @@ void CGOpenMPSIMDRuntime::emitTargetOutlinedFunction(
void CGOpenMPSIMDRuntime::emitTargetCall(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
- llvm::Value *OutlinedFn,
+ llvm::Function *OutlinedFn,
llvm::Value *OutlinedFnID,
- const Expr *IfCond, const Expr *Device) {
+ const Expr *IfCond,
+ const Expr *Device) {
llvm_unreachable("Not supported in SIMD-only mode");
}
@@ -9810,7 +10814,7 @@ llvm::Function *CGOpenMPSIMDRuntime::emitRegistrationFunction() {
void CGOpenMPSIMDRuntime::emitTeamsCall(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
SourceLocation Loc,
- llvm::Value *OutlinedFn,
+ llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars) {
llvm_unreachable("Not supported in SIMD-only mode");
}
@@ -9857,4 +10861,3 @@ CGOpenMPSIMDRuntime::getParameterAddress(CodeGenFunction &CGF,
const VarDecl *TargetParam) const {
llvm_unreachable("Not supported in SIMD-only mode");
}
-
diff --git a/lib/CodeGen/CGOpenMPRuntime.h b/lib/CodeGen/CGOpenMPRuntime.h
index 1822a6fd1974..3f842ce96407 100644
--- a/lib/CodeGen/CGOpenMPRuntime.h
+++ b/lib/CodeGen/CGOpenMPRuntime.h
@@ -1,9 +1,8 @@
//===----- CGOpenMPRuntime.h - Interface to OpenMP Runtimes -----*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -272,7 +271,8 @@ protected:
virtual StringRef getOutlinedHelperName() const { return ".omp_outlined."; }
/// Emits \p Callee function call with arguments \p Args with location \p Loc.
- void emitCall(CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *Callee,
+ void emitCall(CodeGenFunction &CGF, SourceLocation Loc,
+ llvm::FunctionCallee Callee,
ArrayRef<llvm::Value *> Args = llvm::None) const;
/// Emits address of the word in a memory where current thread id is
@@ -636,6 +636,17 @@ private:
/// must be emitted.
llvm::SmallDenseSet<const VarDecl *> DeferredGlobalVariables;
+ /// Flag for keeping track of weather a requires unified_shared_memory
+ /// directive is present.
+ bool HasRequiresUnifiedSharedMemory = false;
+
+ /// Flag for keeping track of weather a target region has been emitted.
+ bool HasEmittedTargetRegion = false;
+
+ /// Flag for keeping track of weather a device routine has been emitted.
+ /// Device routines are specific to the
+ bool HasEmittedDeclareTargetRegion = false;
+
/// Creates and registers offloading binary descriptor for the current
/// compilation unit. The function that does the registration is returned.
llvm::Function *createOffloadingBinaryDescriptorRegistration();
@@ -672,23 +683,27 @@ private:
/// Returns specified OpenMP runtime function.
/// \param Function OpenMP runtime function.
/// \return Specified function.
- llvm::Constant *createRuntimeFunction(unsigned Function);
+ llvm::FunctionCallee createRuntimeFunction(unsigned Function);
/// Returns __kmpc_for_static_init_* runtime function for the specified
/// size \a IVSize and sign \a IVSigned.
- llvm::Constant *createForStaticInitFunction(unsigned IVSize, bool IVSigned);
+ llvm::FunctionCallee createForStaticInitFunction(unsigned IVSize,
+ bool IVSigned);
/// Returns __kmpc_dispatch_init_* runtime function for the specified
/// size \a IVSize and sign \a IVSigned.
- llvm::Constant *createDispatchInitFunction(unsigned IVSize, bool IVSigned);
+ llvm::FunctionCallee createDispatchInitFunction(unsigned IVSize,
+ bool IVSigned);
/// Returns __kmpc_dispatch_next_* runtime function for the specified
/// size \a IVSize and sign \a IVSigned.
- llvm::Constant *createDispatchNextFunction(unsigned IVSize, bool IVSigned);
+ llvm::FunctionCallee createDispatchNextFunction(unsigned IVSize,
+ bool IVSigned);
/// Returns __kmpc_dispatch_fini_* runtime function for the specified
/// size \a IVSize and sign \a IVSigned.
- llvm::Constant *createDispatchFiniFunction(unsigned IVSize, bool IVSigned);
+ llvm::FunctionCallee createDispatchFiniFunction(unsigned IVSize,
+ bool IVSigned);
/// If the specified mangled name is not in the module, create and
/// return threadprivate cache object. This object is a pointer's worth of
@@ -704,7 +719,8 @@ private:
/// must be the same.
/// \param Name Name of the variable.
llvm::Constant *getOrCreateInternalVariable(llvm::Type *Ty,
- const llvm::Twine &Name);
+ const llvm::Twine &Name,
+ unsigned AddressSpace = 0);
/// Set of threadprivate variables with the generated initializer.
llvm::StringSet<> ThreadPrivateWithDefinition;
@@ -724,7 +740,7 @@ private:
struct TaskResultTy {
llvm::Value *NewTask = nullptr;
- llvm::Value *TaskEntry = nullptr;
+ llvm::Function *TaskEntry = nullptr;
llvm::Value *NewTaskNewTaskTTy = nullptr;
LValue TDBase;
const RecordDecl *KmpTaskTQTyRD = nullptr;
@@ -754,15 +770,24 @@ private:
/// state, list of privates etc.
TaskResultTy emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
const OMPExecutableDirective &D,
- llvm::Value *TaskFunction, QualType SharedsTy,
+ llvm::Function *TaskFunction, QualType SharedsTy,
Address Shareds, const OMPTaskDataTy &Data);
+ /// Returns default address space for the constant firstprivates, 0 by
+ /// default.
+ virtual unsigned getDefaultFirstprivateAddressSpace() const { return 0; }
+
public:
explicit CGOpenMPRuntime(CodeGenModule &CGM)
: CGOpenMPRuntime(CGM, ".", ".") {}
virtual ~CGOpenMPRuntime() {}
virtual void clear();
+ /// Checks if the \p Body is the \a CompoundStmt and returns its child
+ /// statement iff there is only one that is not evaluatable at the compile
+ /// time.
+ static const Stmt *getSingleCompoundChild(ASTContext &Ctx, const Stmt *Body);
+
/// Get the platform-specific name separator.
std::string getName(ArrayRef<StringRef> Parts) const;
@@ -781,7 +806,7 @@ public:
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
- virtual llvm::Value *emitParallelOutlinedFunction(
+ virtual llvm::Function *emitParallelOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen);
@@ -793,7 +818,7 @@ public:
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
- virtual llvm::Value *emitTeamsOutlinedFunction(
+ virtual llvm::Function *emitTeamsOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen);
@@ -812,7 +837,7 @@ public:
/// \param NumberOfParts Number of parts in untied task. Ignored for tied
/// tasks.
///
- virtual llvm::Value *emitTaskOutlinedFunction(
+ virtual llvm::Function *emitTaskOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
const VarDecl *PartIDVar, const VarDecl *TaskTVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
@@ -833,7 +858,7 @@ public:
/// specified, nullptr otherwise.
///
virtual void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
- llvm::Value *OutlinedFn,
+ llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars,
const Expr *IfCond);
@@ -1096,8 +1121,8 @@ public:
SourceLocation Loc);
/// Returns the address of the variable marked as declare target with link
- /// clause.
- virtual Address getAddrOfDeclareTargetLink(const VarDecl *VD);
+ /// clause OR as declare target with to clause and unified memory.
+ virtual Address getAddrOfDeclareTargetVar(const VarDecl *VD);
/// Emit a code for initialization of threadprivate variable. It emits
/// a call to runtime library which adds initial value to the newly created
@@ -1162,7 +1187,7 @@ public:
/// state, list of privates etc.
virtual void emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
const OMPExecutableDirective &D,
- llvm::Value *TaskFunction, QualType SharedsTy,
+ llvm::Function *TaskFunction, QualType SharedsTy,
Address Shareds, const Expr *IfCond,
const OMPTaskDataTy &Data);
@@ -1195,10 +1220,11 @@ public:
/// otherwise.
/// \param Data Additional data for task generation like tiednsee, final
/// state, list of privates etc.
- virtual void emitTaskLoopCall(
- CodeGenFunction &CGF, SourceLocation Loc, const OMPLoopDirective &D,
- llvm::Value *TaskFunction, QualType SharedsTy, Address Shareds,
- const Expr *IfCond, const OMPTaskDataTy &Data);
+ virtual void emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc,
+ const OMPLoopDirective &D,
+ llvm::Function *TaskFunction,
+ QualType SharedsTy, Address Shareds,
+ const Expr *IfCond, const OMPTaskDataTy &Data);
/// Emit code for the directive that does not require outlining.
///
@@ -1219,12 +1245,12 @@ public:
/// \param RHSExprs List of RHS in \a ReductionOps reduction operations.
/// \param ReductionOps List of reduction operations in form 'LHS binop RHS'
/// or 'operator binop(LHS, RHS)'.
- llvm::Value *emitReductionFunction(CodeGenModule &CGM, SourceLocation Loc,
- llvm::Type *ArgsType,
- ArrayRef<const Expr *> Privates,
- ArrayRef<const Expr *> LHSExprs,
- ArrayRef<const Expr *> RHSExprs,
- ArrayRef<const Expr *> ReductionOps);
+ llvm::Function *emitReductionFunction(SourceLocation Loc,
+ llvm::Type *ArgsType,
+ ArrayRef<const Expr *> Privates,
+ ArrayRef<const Expr *> LHSExprs,
+ ArrayRef<const Expr *> RHSExprs,
+ ArrayRef<const Expr *> ReductionOps);
/// Emits single reduction combiner
void emitSingleReductionCombiner(CodeGenFunction &CGF,
@@ -1389,7 +1415,7 @@ public:
/// target directive, or null if no device clause is used.
virtual void emitTargetCall(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
- llvm::Value *OutlinedFn,
+ llvm::Function *OutlinedFn,
llvm::Value *OutlinedFnID, const Expr *IfCond,
const Expr *Device);
@@ -1409,11 +1435,20 @@ public:
virtual void registerTargetGlobalVariable(const VarDecl *VD,
llvm::Constant *Addr);
+ /// Registers provided target firstprivate variable as global on the
+ /// target.
+ llvm::Constant *registerTargetFirstprivateCopy(CodeGenFunction &CGF,
+ const VarDecl *VD);
+
/// Emit the global \a GD if it is meaningful for the target. Returns
/// if it was emitted successfully.
/// \param GD Global to scan.
virtual bool emitTargetGlobal(GlobalDecl GD);
+ /// Creates and returns a registration function for when at least one
+ /// requires directives was used in the current module.
+ llvm::Function *emitRequiresDirectiveRegFun();
+
/// Creates the offloading descriptor in the event any target region
/// was emitted in the current module and return the function that registers
/// it.
@@ -1429,7 +1464,7 @@ public:
///
virtual void emitTeamsCall(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
- SourceLocation Loc, llvm::Value *OutlinedFn,
+ SourceLocation Loc, llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars);
/// Emits call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32
@@ -1550,18 +1585,18 @@ public:
/// schedule clause.
virtual void getDefaultScheduleAndChunk(CodeGenFunction &CGF,
const OMPLoopDirective &S, OpenMPScheduleClauseKind &ScheduleKind,
- const Expr *&ChunkExpr) const {}
+ const Expr *&ChunkExpr) const;
/// Emits call of the outlined function with the provided arguments,
/// translating these arguments to correct target-specific arguments.
virtual void
emitOutlinedFunctionCall(CodeGenFunction &CGF, SourceLocation Loc,
- llvm::Value *OutlinedFn,
+ llvm::FunctionCallee OutlinedFn,
ArrayRef<llvm::Value *> Args = llvm::None) const;
/// Emits OpenMP-specific function prolog.
/// Required for device constructs.
- virtual void emitFunctionProlog(CodeGenFunction &CGF, const Decl *D) {}
+ virtual void emitFunctionProlog(CodeGenFunction &CGF, const Decl *D);
/// Gets the OpenMP-specific address of the local variable.
virtual Address getAddressOfLocalVariable(CodeGenFunction &CGF,
@@ -1582,8 +1617,15 @@ public:
/// Perform check on requires decl to ensure that target architecture
/// supports unified addressing
- virtual void checkArchForUnifiedAddressing(CodeGenModule &CGM,
- const OMPRequiresDecl *D) const {}
+ virtual void checkArchForUnifiedAddressing(const OMPRequiresDecl *D);
+
+ /// Checks if the variable has associated OMPAllocateDeclAttr attribute with
+ /// the predefined allocator and translates it into the corresponding address
+ /// space.
+ virtual bool hasAllocateAttributeForGlobalVar(const VarDecl *VD, LangAS &AS);
+
+ /// Return whether the unified_shared_memory has been specified.
+ bool hasRequiresUnifiedSharedMemory() const;
};
/// Class supports emissionof SIMD-only code.
@@ -1600,7 +1642,7 @@ public:
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
- llvm::Value *
+ llvm::Function *
emitParallelOutlinedFunction(const OMPExecutableDirective &D,
const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind,
@@ -1614,7 +1656,7 @@ public:
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
- llvm::Value *
+ llvm::Function *
emitTeamsOutlinedFunction(const OMPExecutableDirective &D,
const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind,
@@ -1635,7 +1677,7 @@ public:
/// \param NumberOfParts Number of parts in untied task. Ignored for tied
/// tasks.
///
- llvm::Value *emitTaskOutlinedFunction(
+ llvm::Function *emitTaskOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
const VarDecl *PartIDVar, const VarDecl *TaskTVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
@@ -1652,7 +1694,7 @@ public:
/// specified, nullptr otherwise.
///
void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
- llvm::Value *OutlinedFn,
+ llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars,
const Expr *IfCond) override;
@@ -1878,8 +1920,9 @@ public:
/// \param Data Additional data for task generation like tiednsee, final
/// state, list of privates etc.
void emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
- const OMPExecutableDirective &D, llvm::Value *TaskFunction,
- QualType SharedsTy, Address Shareds, const Expr *IfCond,
+ const OMPExecutableDirective &D,
+ llvm::Function *TaskFunction, QualType SharedsTy,
+ Address Shareds, const Expr *IfCond,
const OMPTaskDataTy &Data) override;
/// Emit task region for the taskloop directive. The taskloop region is
@@ -1912,7 +1955,7 @@ public:
/// \param Data Additional data for task generation like tiednsee, final
/// state, list of privates etc.
void emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc,
- const OMPLoopDirective &D, llvm::Value *TaskFunction,
+ const OMPLoopDirective &D, llvm::Function *TaskFunction,
QualType SharedsTy, Address Shareds, const Expr *IfCond,
const OMPTaskDataTy &Data) override;
@@ -2055,7 +2098,7 @@ public:
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used.
void emitTargetCall(CodeGenFunction &CGF, const OMPExecutableDirective &D,
- llvm::Value *OutlinedFn, llvm::Value *OutlinedFnID,
+ llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID,
const Expr *IfCond, const Expr *Device) override;
/// Emit the target regions enclosed in \a GD function definition or
@@ -2088,7 +2131,7 @@ public:
/// variables used in \a OutlinedFn function.
///
void emitTeamsCall(CodeGenFunction &CGF, const OMPExecutableDirective &D,
- SourceLocation Loc, llvm::Value *OutlinedFn,
+ SourceLocation Loc, llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars) override;
/// Emits call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32
@@ -2147,6 +2190,12 @@ public:
/// \param TargetParam Corresponding target-specific parameter.
Address getParameterAddress(CodeGenFunction &CGF, const VarDecl *NativeParam,
const VarDecl *TargetParam) const override;
+
+ /// Gets the OpenMP-specific address of the local variable.
+ Address getAddressOfLocalVariable(CodeGenFunction &CGF,
+ const VarDecl *VD) override {
+ return Address::invalid();
+ }
};
} // namespace CodeGen
diff --git a/lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp b/lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp
index 7046ab3aa35c..48dcbbf3cabd 100644
--- a/lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp
+++ b/lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp
@@ -1,9 +1,8 @@
//===---- CGOpenMPRuntimeNVPTX.cpp - Interface to OpenMP NVPTX Runtimes ---===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -61,13 +60,19 @@ enum OpenMPRTLFunctionNVPTX {
/// void (*kmp_ShuffleReductFctPtr)(void *rhsData, int16_t lane_id, int16_t
/// lane_offset, int16_t shortCircuit),
/// void (*kmp_InterWarpCopyFctPtr)(void* src, int32_t warp_num));
- OMPRTL_NVPTX__kmpc_parallel_reduce_nowait_v2,
- /// Call to __kmpc_nvptx_teams_reduce_nowait_simple(ident_t *loc, kmp_int32
- /// global_tid, kmp_critical_name *lck)
- OMPRTL_NVPTX__kmpc_nvptx_teams_reduce_nowait_simple,
- /// Call to __kmpc_nvptx_teams_end_reduce_nowait_simple(ident_t *loc,
- /// kmp_int32 global_tid, kmp_critical_name *lck)
- OMPRTL_NVPTX__kmpc_nvptx_teams_end_reduce_nowait_simple,
+ OMPRTL_NVPTX__kmpc_nvptx_parallel_reduce_nowait_v2,
+ /// Call to __kmpc_nvptx_teams_reduce_nowait_v2(ident_t *loc, kmp_int32
+ /// global_tid, void *global_buffer, int32_t num_of_records, void*
+ /// reduce_data,
+ /// void (*kmp_ShuffleReductFctPtr)(void *rhsData, int16_t lane_id, int16_t
+ /// lane_offset, int16_t shortCircuit),
+ /// void (*kmp_InterWarpCopyFctPtr)(void* src, int32_t warp_num), void
+ /// (*kmp_ListToGlobalCpyFctPtr)(void *buffer, int idx, void *reduce_data),
+ /// void (*kmp_GlobalToListCpyFctPtr)(void *buffer, int idx,
+ /// void *reduce_data), void (*kmp_GlobalToListCpyPtrsFctPtr)(void *buffer,
+ /// int idx, void *reduce_data), void (*kmp_GlobalToListRedFctPtr)(void
+ /// *buffer, int idx, void *reduce_data));
+ OMPRTL_NVPTX__kmpc_nvptx_teams_reduce_nowait_v2,
/// Call to __kmpc_nvptx_end_reduce_nowait(int32_t global_tid);
OMPRTL_NVPTX__kmpc_end_reduce_nowait,
/// Call to void __kmpc_data_sharing_init_stack();
@@ -106,17 +111,18 @@ enum OpenMPRTLFunctionNVPTX {
/// Pre(post)-action for different OpenMP constructs specialized for NVPTX.
class NVPTXActionTy final : public PrePostActionTy {
- llvm::Value *EnterCallee = nullptr;
+ llvm::FunctionCallee EnterCallee = nullptr;
ArrayRef<llvm::Value *> EnterArgs;
- llvm::Value *ExitCallee = nullptr;
+ llvm::FunctionCallee ExitCallee = nullptr;
ArrayRef<llvm::Value *> ExitArgs;
bool Conditional = false;
llvm::BasicBlock *ContBlock = nullptr;
public:
- NVPTXActionTy(llvm::Value *EnterCallee, ArrayRef<llvm::Value *> EnterArgs,
- llvm::Value *ExitCallee, ArrayRef<llvm::Value *> ExitArgs,
- bool Conditional = false)
+ NVPTXActionTy(llvm::FunctionCallee EnterCallee,
+ ArrayRef<llvm::Value *> EnterArgs,
+ llvm::FunctionCallee ExitCallee,
+ ArrayRef<llvm::Value *> ExitArgs, bool Conditional = false)
: EnterCallee(EnterCallee), EnterArgs(EnterArgs), ExitCallee(ExitCallee),
ExitArgs(ExitArgs), Conditional(Conditional) {}
void Enter(CodeGenFunction &CGF) override {
@@ -215,16 +221,13 @@ static const ValueDecl *getPrivateItem(const Expr *RefExpr) {
return cast<ValueDecl>(ME->getMemberDecl()->getCanonicalDecl());
}
-typedef std::pair<CharUnits /*Align*/, const ValueDecl *> VarsDataTy;
-static bool stable_sort_comparator(const VarsDataTy P1, const VarsDataTy P2) {
- return P1.first > P2.first;
-}
static RecordDecl *buildRecordForGlobalizedVars(
ASTContext &C, ArrayRef<const ValueDecl *> EscapedDecls,
ArrayRef<const ValueDecl *> EscapedDeclsForTeams,
llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
- &MappedDeclsFields) {
+ &MappedDeclsFields, int BufSize) {
+ using VarsDataTy = std::pair<CharUnits /*Align*/, const ValueDecl *>;
if (EscapedDecls.empty() && EscapedDeclsForTeams.empty())
return nullptr;
SmallVector<VarsDataTy, 4> GlobalizedVars;
@@ -236,8 +239,10 @@ static RecordDecl *buildRecordForGlobalizedVars(
D);
for (const ValueDecl *D : EscapedDeclsForTeams)
GlobalizedVars.emplace_back(C.getDeclAlign(D), D);
- std::stable_sort(GlobalizedVars.begin(), GlobalizedVars.end(),
- stable_sort_comparator);
+ llvm::stable_sort(GlobalizedVars, [](VarsDataTy L, VarsDataTy R) {
+ return L.first > R.first;
+ });
+
// Build struct _globalized_locals_ty {
// /* globalized vars */[WarSize] align (max(decl_align,
// GlobalMemoryAlignment))
@@ -270,7 +275,7 @@ static RecordDecl *buildRecordForGlobalizedVars(
Field->addAttr(*I);
}
} else {
- llvm::APInt ArraySize(32, WarpSize);
+ llvm::APInt ArraySize(32, BufSize);
Type = C.getConstantArrayType(Type, ArraySize, ArrayType::Normal, 0);
Field = FieldDecl::Create(
C, GlobalizedRD, Loc, Loc, VD->getIdentifier(), Type,
@@ -312,6 +317,9 @@ class CheckVarsEscapingDeclContext final
OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
return;
VD = cast<ValueDecl>(VD->getCanonicalDecl());
+ // Use user-specified allocation.
+ if (VD->hasAttrs() && VD->hasAttr<OMPAllocateDeclAttr>())
+ return;
// Variables captured by value must be globalized.
if (auto *CSI = CGF.CapturedStmtInfo) {
if (const FieldDecl *FD = CSI->lookup(cast<VarDecl>(VD))) {
@@ -419,7 +427,7 @@ class CheckVarsEscapingDeclContext final
EscapedDeclsForParallel = EscapedDecls.getArrayRef();
GlobalizedRD = ::buildRecordForGlobalizedVars(
CGF.getContext(), EscapedDeclsForParallel, EscapedDeclsForTeams,
- MappedDeclsFields);
+ MappedDeclsFields, WarpSize);
}
public:
@@ -705,112 +713,37 @@ getDataSharingMode(CodeGenModule &CGM) {
: CGOpenMPRuntimeNVPTX::Generic;
}
-/// Checks if the expression is constant or does not have non-trivial function
-/// calls.
-static bool isTrivial(ASTContext &Ctx, const Expr * E) {
- // We can skip constant expressions.
- // We can skip expressions with trivial calls or simple expressions.
- return (E->isEvaluatable(Ctx, Expr::SE_AllowUndefinedBehavior) ||
- !E->hasNonTrivialCall(Ctx)) &&
- !E->HasSideEffects(Ctx, /*IncludePossibleEffects=*/true);
-}
-
-/// Checks if the \p Body is the \a CompoundStmt and returns its child statement
-/// iff there is only one that is not evaluatable at the compile time.
-static const Stmt *getSingleCompoundChild(ASTContext &Ctx, const Stmt *Body) {
- if (const auto *C = dyn_cast<CompoundStmt>(Body)) {
- const Stmt *Child = nullptr;
- for (const Stmt *S : C->body()) {
- if (const auto *E = dyn_cast<Expr>(S)) {
- if (isTrivial(Ctx, E))
- continue;
- }
- // Some of the statements can be ignored.
- if (isa<AsmStmt>(S) || isa<NullStmt>(S) || isa<OMPFlushDirective>(S) ||
- isa<OMPBarrierDirective>(S) || isa<OMPTaskyieldDirective>(S))
- continue;
- // Analyze declarations.
- if (const auto *DS = dyn_cast<DeclStmt>(S)) {
- if (llvm::all_of(DS->decls(), [&Ctx](const Decl *D) {
- if (isa<EmptyDecl>(D) || isa<DeclContext>(D) ||
- isa<TypeDecl>(D) || isa<PragmaCommentDecl>(D) ||
- isa<PragmaDetectMismatchDecl>(D) || isa<UsingDecl>(D) ||
- isa<UsingDirectiveDecl>(D) ||
- isa<OMPDeclareReductionDecl>(D) ||
- isa<OMPThreadPrivateDecl>(D))
- return true;
- const auto *VD = dyn_cast<VarDecl>(D);
- if (!VD)
- return false;
- return VD->isConstexpr() ||
- ((VD->getType().isTrivialType(Ctx) ||
- VD->getType()->isReferenceType()) &&
- (!VD->hasInit() || isTrivial(Ctx, VD->getInit())));
- }))
- continue;
- }
- // Found multiple children - cannot get the one child only.
- if (Child)
- return Body;
- Child = S;
- }
- if (Child)
- return Child;
- }
- return Body;
-}
-
-/// Check if the parallel directive has an 'if' clause with non-constant or
-/// false condition. Also, check if the number of threads is strictly specified
-/// and run those directives in non-SPMD mode.
-static bool hasParallelIfNumThreadsClause(ASTContext &Ctx,
- const OMPExecutableDirective &D) {
- if (D.hasClausesOfKind<OMPNumThreadsClause>())
- return true;
- for (const auto *C : D.getClausesOfKind<OMPIfClause>()) {
- OpenMPDirectiveKind NameModifier = C->getNameModifier();
- if (NameModifier != OMPD_parallel && NameModifier != OMPD_unknown)
- continue;
- const Expr *Cond = C->getCondition();
- bool Result;
- if (!Cond->EvaluateAsBooleanCondition(Result, Ctx) || !Result)
- return true;
- }
- return false;
-}
-
/// Check for inner (nested) SPMD construct, if any
static bool hasNestedSPMDDirective(ASTContext &Ctx,
const OMPExecutableDirective &D) {
const auto *CS = D.getInnermostCapturedStmt();
const auto *Body =
CS->getCapturedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true);
- const Stmt *ChildStmt = getSingleCompoundChild(Ctx, Body);
+ const Stmt *ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
- if (const auto *NestedDir = dyn_cast<OMPExecutableDirective>(ChildStmt)) {
+ if (const auto *NestedDir =
+ dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
OpenMPDirectiveKind DKind = NestedDir->getDirectiveKind();
switch (D.getDirectiveKind()) {
case OMPD_target:
- if (isOpenMPParallelDirective(DKind) &&
- !hasParallelIfNumThreadsClause(Ctx, *NestedDir))
+ if (isOpenMPParallelDirective(DKind))
return true;
if (DKind == OMPD_teams) {
Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
/*IgnoreCaptured=*/true);
if (!Body)
return false;
- ChildStmt = getSingleCompoundChild(Ctx, Body);
- if (const auto *NND = dyn_cast<OMPExecutableDirective>(ChildStmt)) {
+ ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
+ if (const auto *NND =
+ dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
DKind = NND->getDirectiveKind();
- if (isOpenMPParallelDirective(DKind) &&
- !hasParallelIfNumThreadsClause(Ctx, *NND))
+ if (isOpenMPParallelDirective(DKind))
return true;
}
}
return false;
case OMPD_target_teams:
- return isOpenMPParallelDirective(DKind) &&
- !hasParallelIfNumThreadsClause(Ctx, *NestedDir);
+ return isOpenMPParallelDirective(DKind);
case OMPD_target_simd:
case OMPD_target_parallel:
case OMPD_target_parallel_for:
@@ -829,6 +762,7 @@ static bool hasNestedSPMDDirective(ASTContext &Ctx,
case OMPD_cancellation_point:
case OMPD_ordered:
case OMPD_threadprivate:
+ case OMPD_allocate:
case OMPD_task:
case OMPD_simd:
case OMPD_sections:
@@ -859,6 +793,7 @@ static bool hasNestedSPMDDirective(ASTContext &Ctx,
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_declare_reduction:
+ case OMPD_declare_mapper:
case OMPD_taskloop:
case OMPD_taskloop_simd:
case OMPD_requires:
@@ -882,10 +817,10 @@ static bool supportsSPMDExecutionMode(ASTContext &Ctx,
case OMPD_target_parallel_for_simd:
case OMPD_target_teams_distribute_parallel_for:
case OMPD_target_teams_distribute_parallel_for_simd:
- return !hasParallelIfNumThreadsClause(Ctx, D);
case OMPD_target_simd:
- case OMPD_target_teams_distribute:
case OMPD_target_teams_distribute_simd:
+ return true;
+ case OMPD_target_teams_distribute:
return false;
case OMPD_parallel:
case OMPD_for:
@@ -897,6 +832,7 @@ static bool supportsSPMDExecutionMode(ASTContext &Ctx,
case OMPD_cancellation_point:
case OMPD_ordered:
case OMPD_threadprivate:
+ case OMPD_allocate:
case OMPD_task:
case OMPD_simd:
case OMPD_sections:
@@ -927,6 +863,7 @@ static bool supportsSPMDExecutionMode(ASTContext &Ctx,
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_declare_reduction:
+ case OMPD_declare_mapper:
case OMPD_taskloop:
case OMPD_taskloop_simd:
case OMPD_requires:
@@ -958,9 +895,10 @@ static bool hasNestedLightweightDirective(ASTContext &Ctx,
const auto *CS = D.getInnermostCapturedStmt();
const auto *Body =
CS->getCapturedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true);
- const Stmt *ChildStmt = getSingleCompoundChild(Ctx, Body);
+ const Stmt *ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
- if (const auto *NestedDir = dyn_cast<OMPExecutableDirective>(ChildStmt)) {
+ if (const auto *NestedDir =
+ dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
OpenMPDirectiveKind DKind = NestedDir->getDirectiveKind();
switch (D.getDirectiveKind()) {
case OMPD_target:
@@ -968,13 +906,16 @@ static bool hasNestedLightweightDirective(ASTContext &Ctx,
isOpenMPWorksharingDirective(DKind) && isOpenMPLoopDirective(DKind) &&
hasStaticScheduling(*NestedDir))
return true;
+ if (DKind == OMPD_teams_distribute_simd || DKind == OMPD_simd)
+ return true;
if (DKind == OMPD_parallel) {
Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
/*IgnoreCaptured=*/true);
if (!Body)
return false;
- ChildStmt = getSingleCompoundChild(Ctx, Body);
- if (const auto *NND = dyn_cast<OMPExecutableDirective>(ChildStmt)) {
+ ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
+ if (const auto *NND =
+ dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
DKind = NND->getDirectiveKind();
if (isOpenMPWorksharingDirective(DKind) &&
isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
@@ -985,8 +926,9 @@ static bool hasNestedLightweightDirective(ASTContext &Ctx,
/*IgnoreCaptured=*/true);
if (!Body)
return false;
- ChildStmt = getSingleCompoundChild(Ctx, Body);
- if (const auto *NND = dyn_cast<OMPExecutableDirective>(ChildStmt)) {
+ ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
+ if (const auto *NND =
+ dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
DKind = NND->getDirectiveKind();
if (isOpenMPParallelDirective(DKind) &&
isOpenMPWorksharingDirective(DKind) &&
@@ -997,8 +939,9 @@ static bool hasNestedLightweightDirective(ASTContext &Ctx,
/*IgnoreCaptured=*/true);
if (!Body)
return false;
- ChildStmt = getSingleCompoundChild(Ctx, Body);
- if (const auto *NND = dyn_cast<OMPExecutableDirective>(ChildStmt)) {
+ ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
+ if (const auto *NND =
+ dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
DKind = NND->getDirectiveKind();
if (isOpenMPWorksharingDirective(DKind) &&
isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
@@ -1013,13 +956,16 @@ static bool hasNestedLightweightDirective(ASTContext &Ctx,
isOpenMPWorksharingDirective(DKind) && isOpenMPLoopDirective(DKind) &&
hasStaticScheduling(*NestedDir))
return true;
+ if (DKind == OMPD_distribute_simd || DKind == OMPD_simd)
+ return true;
if (DKind == OMPD_parallel) {
Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
/*IgnoreCaptured=*/true);
if (!Body)
return false;
- ChildStmt = getSingleCompoundChild(Ctx, Body);
- if (const auto *NND = dyn_cast<OMPExecutableDirective>(ChildStmt)) {
+ ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
+ if (const auto *NND =
+ dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
DKind = NND->getDirectiveKind();
if (isOpenMPWorksharingDirective(DKind) &&
isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
@@ -1028,6 +974,8 @@ static bool hasNestedLightweightDirective(ASTContext &Ctx,
}
return false;
case OMPD_target_parallel:
+ if (DKind == OMPD_simd)
+ return true;
return isOpenMPWorksharingDirective(DKind) &&
isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NestedDir);
case OMPD_target_teams_distribute:
@@ -1047,6 +995,7 @@ static bool hasNestedLightweightDirective(ASTContext &Ctx,
case OMPD_cancellation_point:
case OMPD_ordered:
case OMPD_threadprivate:
+ case OMPD_allocate:
case OMPD_task:
case OMPD_simd:
case OMPD_sections:
@@ -1077,6 +1026,7 @@ static bool hasNestedLightweightDirective(ASTContext &Ctx,
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_declare_reduction:
+ case OMPD_declare_mapper:
case OMPD_taskloop:
case OMPD_taskloop_simd:
case OMPD_requires:
@@ -1107,8 +1057,9 @@ static bool supportsLightweightRuntime(ASTContext &Ctx,
// (Last|First)-privates must be shared in parallel region.
return hasStaticScheduling(D);
case OMPD_target_simd:
- case OMPD_target_teams_distribute:
case OMPD_target_teams_distribute_simd:
+ return true;
+ case OMPD_target_teams_distribute:
return false;
case OMPD_parallel:
case OMPD_for:
@@ -1120,6 +1071,7 @@ static bool supportsLightweightRuntime(ASTContext &Ctx,
case OMPD_cancellation_point:
case OMPD_ordered:
case OMPD_threadprivate:
+ case OMPD_allocate:
case OMPD_task:
case OMPD_simd:
case OMPD_sections:
@@ -1150,6 +1102,7 @@ static bool supportsLightweightRuntime(ASTContext &Ctx,
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_declare_reduction:
+ case OMPD_declare_mapper:
case OMPD_taskloop:
case OMPD_taskloop_simd:
case OMPD_requires:
@@ -1512,14 +1465,14 @@ void CGOpenMPRuntimeNVPTX::emitWorkerLoop(CodeGenFunction &CGF,
// directive.
auto *ParallelFnTy =
llvm::FunctionType::get(CGM.VoidTy, {CGM.Int16Ty, CGM.Int32Ty},
- /*isVarArg=*/false)
- ->getPointerTo();
- llvm::Value *WorkFnCast = Bld.CreateBitCast(WorkID, ParallelFnTy);
+ /*isVarArg=*/false);
+ llvm::Value *WorkFnCast =
+ Bld.CreateBitCast(WorkID, ParallelFnTy->getPointerTo());
// Insert call to work function via shared wrapper. The shared
// wrapper takes two arguments:
// - the parallelism level;
// - the thread ID;
- emitCall(CGF, WST.Loc, WorkFnCast,
+ emitCall(CGF, WST.Loc, {ParallelFnTy, WorkFnCast},
{Bld.getInt16(/*ParallelLevel=*/0), getThreadID(CGF, WST.Loc)});
// Go to end of parallel region.
CGF.EmitBranch(TerminateBB);
@@ -1547,9 +1500,9 @@ void CGOpenMPRuntimeNVPTX::emitWorkerLoop(CodeGenFunction &CGF,
/// implementation. Specialized for the NVPTX device.
/// \param Function OpenMP runtime function.
/// \return Specified function.
-llvm::Constant *
+llvm::FunctionCallee
CGOpenMPRuntimeNVPTX::createNVPTXRuntimeFunction(unsigned Function) {
- llvm::Constant *RTLFn = nullptr;
+ llvm::FunctionCallee RTLFn = nullptr;
switch (static_cast<OpenMPRTLFunctionNVPTX>(Function)) {
case OMPRTL_NVPTX__kmpc_kernel_init: {
// Build void __kmpc_kernel_init(kmp_int32 thread_limit, int16_t
@@ -1647,7 +1600,7 @@ CGOpenMPRuntimeNVPTX::createNVPTXRuntimeFunction(unsigned Function) {
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_shuffle_int64");
break;
}
- case OMPRTL_NVPTX__kmpc_parallel_reduce_nowait_v2: {
+ case OMPRTL_NVPTX__kmpc_nvptx_parallel_reduce_nowait_v2: {
// Build int32_t kmpc_nvptx_parallel_reduce_nowait_v2(ident_t *loc,
// kmp_int32 global_tid, kmp_int32 num_vars, size_t reduce_size, void*
// reduce_data, void (*kmp_ShuffleReductFctPtr)(void *rhsData, int16_t
@@ -1684,28 +1637,47 @@ CGOpenMPRuntimeNVPTX::createNVPTXRuntimeFunction(unsigned Function) {
FnTy, /*Name=*/"__kmpc_nvptx_end_reduce_nowait");
break;
}
- case OMPRTL_NVPTX__kmpc_nvptx_teams_reduce_nowait_simple: {
- // Build __kmpc_nvptx_teams_reduce_nowait_simple(ident_t *loc, kmp_int32
- // global_tid, kmp_critical_name *lck)
- llvm::Type *TypeParams[] = {
- getIdentTyPointerTy(), CGM.Int32Ty,
- llvm::PointerType::getUnqual(getKmpCriticalNameTy())};
+ case OMPRTL_NVPTX__kmpc_nvptx_teams_reduce_nowait_v2: {
+ // Build int32_t __kmpc_nvptx_teams_reduce_nowait_v2(ident_t *loc, kmp_int32
+ // global_tid, void *global_buffer, int32_t num_of_records, void*
+ // reduce_data,
+ // void (*kmp_ShuffleReductFctPtr)(void *rhsData, int16_t lane_id, int16_t
+ // lane_offset, int16_t shortCircuit),
+ // void (*kmp_InterWarpCopyFctPtr)(void* src, int32_t warp_num), void
+ // (*kmp_ListToGlobalCpyFctPtr)(void *buffer, int idx, void *reduce_data),
+ // void (*kmp_GlobalToListCpyFctPtr)(void *buffer, int idx,
+ // void *reduce_data), void (*kmp_GlobalToListCpyPtrsFctPtr)(void *buffer,
+ // int idx, void *reduce_data), void (*kmp_GlobalToListRedFctPtr)(void
+ // *buffer, int idx, void *reduce_data));
+ llvm::Type *ShuffleReduceTypeParams[] = {CGM.VoidPtrTy, CGM.Int16Ty,
+ CGM.Int16Ty, CGM.Int16Ty};
+ auto *ShuffleReduceFnTy =
+ llvm::FunctionType::get(CGM.VoidTy, ShuffleReduceTypeParams,
+ /*isVarArg=*/false);
+ llvm::Type *InterWarpCopyTypeParams[] = {CGM.VoidPtrTy, CGM.Int32Ty};
+ auto *InterWarpCopyFnTy =
+ llvm::FunctionType::get(CGM.VoidTy, InterWarpCopyTypeParams,
+ /*isVarArg=*/false);
+ llvm::Type *GlobalListTypeParams[] = {CGM.VoidPtrTy, CGM.IntTy,
+ CGM.VoidPtrTy};
+ auto *GlobalListFnTy =
+ llvm::FunctionType::get(CGM.VoidTy, GlobalListTypeParams,
+ /*isVarArg=*/false);
+ llvm::Type *TypeParams[] = {getIdentTyPointerTy(),
+ CGM.Int32Ty,
+ CGM.VoidPtrTy,
+ CGM.Int32Ty,
+ CGM.VoidPtrTy,
+ ShuffleReduceFnTy->getPointerTo(),
+ InterWarpCopyFnTy->getPointerTo(),
+ GlobalListFnTy->getPointerTo(),
+ GlobalListFnTy->getPointerTo(),
+ GlobalListFnTy->getPointerTo(),
+ GlobalListFnTy->getPointerTo()};
auto *FnTy =
llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
RTLFn = CGM.CreateRuntimeFunction(
- FnTy, /*Name=*/"__kmpc_nvptx_teams_reduce_nowait_simple");
- break;
- }
- case OMPRTL_NVPTX__kmpc_nvptx_teams_end_reduce_nowait_simple: {
- // Build __kmpc_nvptx_teams_end_reduce_nowait_simple(ident_t *loc, kmp_int32
- // global_tid, kmp_critical_name *lck)
- llvm::Type *TypeParams[] = {
- getIdentTyPointerTy(), CGM.Int32Ty,
- llvm::PointerType::getUnqual(getKmpCriticalNameTy())};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(
- FnTy, /*Name=*/"__kmpc_nvptx_teams_end_reduce_nowait_simple");
+ FnTy, /*Name=*/"__kmpc_nvptx_teams_reduce_nowait_v2");
break;
}
case OMPRTL_NVPTX__kmpc_data_sharing_init_stack: {
@@ -1806,7 +1778,8 @@ CGOpenMPRuntimeNVPTX::createNVPTXRuntimeFunction(unsigned Function) {
auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name*/ "__kmpc_barrier");
- cast<llvm::Function>(RTLFn)->addFnAttr(llvm::Attribute::Convergent);
+ cast<llvm::Function>(RTLFn.getCallee())
+ ->addFnAttr(llvm::Attribute::Convergent);
break;
}
case OMPRTL__kmpc_barrier_simple_spmd: {
@@ -1817,7 +1790,8 @@ CGOpenMPRuntimeNVPTX::createNVPTXRuntimeFunction(unsigned Function) {
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
RTLFn =
CGM.CreateRuntimeFunction(FnTy, /*Name*/ "__kmpc_barrier_simple_spmd");
- cast<llvm::Function>(RTLFn)->addFnAttr(llvm::Attribute::Convergent);
+ cast<llvm::Function>(RTLFn.getCallee())
+ ->addFnAttr(llvm::Attribute::Convergent);
break;
}
}
@@ -1928,7 +1902,7 @@ void CGOpenMPRuntimeNVPTX::emitNumTeamsClause(CodeGenFunction &CGF,
const Expr *ThreadLimit,
SourceLocation Loc) {}
-llvm::Value *CGOpenMPRuntimeNVPTX::emitParallelOutlinedFunction(
+llvm::Function *CGOpenMPRuntimeNVPTX::emitParallelOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
// Emit target region as a standalone region.
@@ -1955,6 +1929,11 @@ llvm::Value *CGOpenMPRuntimeNVPTX::emitParallelOutlinedFunction(
auto *OutlinedFun =
cast<llvm::Function>(CGOpenMPRuntime::emitParallelOutlinedFunction(
D, ThreadIDVar, InnermostKind, CodeGen));
+ if (CGM.getLangOpts().Optimize) {
+ OutlinedFun->removeFnAttr(llvm::Attribute::NoInline);
+ OutlinedFun->removeFnAttr(llvm::Attribute::OptimizeNone);
+ OutlinedFun->addFnAttr(llvm::Attribute::AlwaysInline);
+ }
IsInTargetMasterThreadRegion = PrevIsInTargetMasterThreadRegion;
IsInTTDRegion = PrevIsInTTDRegion;
if (getExecutionMode() != CGOpenMPRuntimeNVPTX::EM_SPMD &&
@@ -1976,11 +1955,11 @@ getDistributeLastprivateVars(ASTContext &Ctx, const OMPExecutableDirective &D,
"expected teams directive.");
const OMPExecutableDirective *Dir = &D;
if (!isOpenMPDistributeDirective(D.getDirectiveKind())) {
- if (const Stmt *S = getSingleCompoundChild(
+ if (const Stmt *S = CGOpenMPRuntime::getSingleCompoundChild(
Ctx,
D.getInnermostCapturedStmt()->getCapturedStmt()->IgnoreContainers(
/*IgnoreCaptured=*/true))) {
- Dir = dyn_cast<OMPExecutableDirective>(S);
+ Dir = dyn_cast_or_null<OMPExecutableDirective>(S);
if (Dir && !isOpenMPDistributeDirective(Dir->getDirectiveKind()))
Dir = nullptr;
}
@@ -2005,7 +1984,7 @@ getTeamsReductionVars(ASTContext &Ctx, const OMPExecutableDirective &D,
}
}
-llvm::Value *CGOpenMPRuntimeNVPTX::emitTeamsOutlinedFunction(
+llvm::Function *CGOpenMPRuntimeNVPTX::emitTeamsOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
SourceLocation Loc = D.getBeginLoc();
@@ -2014,13 +1993,14 @@ llvm::Value *CGOpenMPRuntimeNVPTX::emitTeamsOutlinedFunction(
llvm::SmallVector<const ValueDecl *, 4> LastPrivatesReductions;
llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> MappedDeclsFields;
// Globalize team reductions variable unconditionally in all modes.
- getTeamsReductionVars(CGM.getContext(), D, LastPrivatesReductions);
+ if (getExecutionMode() != CGOpenMPRuntimeNVPTX::EM_SPMD)
+ getTeamsReductionVars(CGM.getContext(), D, LastPrivatesReductions);
if (getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_SPMD) {
getDistributeLastprivateVars(CGM.getContext(), D, LastPrivatesReductions);
if (!LastPrivatesReductions.empty()) {
GlobalizedRD = ::buildRecordForGlobalizedVars(
CGM.getContext(), llvm::None, LastPrivatesReductions,
- MappedDeclsFields);
+ MappedDeclsFields, WarpSize);
}
} else if (!LastPrivatesReductions.empty()) {
assert(!TeamAndReductions.first &&
@@ -2068,12 +2048,13 @@ llvm::Value *CGOpenMPRuntimeNVPTX::emitTeamsOutlinedFunction(
}
} Action(Loc, GlobalizedRD, MappedDeclsFields);
CodeGen.setAction(Action);
- llvm::Value *OutlinedFunVal = CGOpenMPRuntime::emitTeamsOutlinedFunction(
+ llvm::Function *OutlinedFun = CGOpenMPRuntime::emitTeamsOutlinedFunction(
D, ThreadIDVar, InnermostKind, CodeGen);
- llvm::Function *OutlinedFun = cast<llvm::Function>(OutlinedFunVal);
- OutlinedFun->removeFnAttr(llvm::Attribute::NoInline);
- OutlinedFun->removeFnAttr(llvm::Attribute::OptimizeNone);
- OutlinedFun->addFnAttr(llvm::Attribute::AlwaysInline);
+ if (CGM.getLangOpts().Optimize) {
+ OutlinedFun->removeFnAttr(llvm::Attribute::NoInline);
+ OutlinedFun->removeFnAttr(llvm::Attribute::OptimizeNone);
+ OutlinedFun->addFnAttr(llvm::Attribute::AlwaysInline);
+ }
return OutlinedFun;
}
@@ -2235,8 +2216,7 @@ void CGOpenMPRuntimeNVPTX::emitGenericVarsProlog(CodeGenFunction &CGF,
.getPointerType(CGM.getContext().VoidPtrTy)
.castAs<PointerType>());
llvm::Value *GlobalRecValue =
- Bld.CreateConstInBoundsGEP(FrameAddr, Offset, CharUnits::One())
- .getPointer();
+ Bld.CreateConstInBoundsGEP(FrameAddr, Offset).getPointer();
I->getSecond().GlobalRecordAddr = GlobalRecValue;
I->getSecond().IsInSPMDModeFlag = nullptr;
GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
@@ -2429,7 +2409,7 @@ void CGOpenMPRuntimeNVPTX::emitGenericVarsEpilog(CodeGenFunction &CGF,
void CGOpenMPRuntimeNVPTX::emitTeamsCall(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
SourceLocation Loc,
- llvm::Value *OutlinedFn,
+ llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars) {
if (!CGF.HaveInsertPoint())
return;
@@ -2446,7 +2426,7 @@ void CGOpenMPRuntimeNVPTX::emitTeamsCall(CodeGenFunction &CGF,
}
void CGOpenMPRuntimeNVPTX::emitParallelCall(
- CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *OutlinedFn,
+ CodeGenFunction &CGF, SourceLocation Loc, llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond) {
if (!CGF.HaveInsertPoint())
return;
@@ -2536,8 +2516,7 @@ void CGOpenMPRuntimeNVPTX::emitNonSPMDParallelCall(
SharedArgs, Ctx.getPointerType(Ctx.getPointerType(Ctx.VoidPtrTy))
.castAs<PointerType>());
for (llvm::Value *V : CapturedVars) {
- Address Dst = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx,
- CGF.getPointerSize());
+ Address Dst = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx);
llvm::Value *PtrV;
if (V->getType()->isIntegerTy())
PtrV = Bld.CreateIntToPtr(V, CGF.VoidPtrTy);
@@ -2625,7 +2604,7 @@ void CGOpenMPRuntimeNVPTX::emitNonSPMDParallelCall(
}
void CGOpenMPRuntimeNVPTX::emitSPMDParallelCall(
- CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *OutlinedFn,
+ CodeGenFunction &CGF, SourceLocation Loc, llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond) {
// Just call the outlined function to execute the parallel region.
// OutlinedFn(&GTid, &zero, CapturedStruct);
@@ -2846,7 +2825,7 @@ static void shuffleAndStore(CodeGenFunction &CGF, Address SrcAddr,
Address ElemPtr = DestAddr;
Address Ptr = SrcAddr;
Address PtrEnd = Bld.CreatePointerBitCastOrAddrSpaceCast(
- Bld.CreateConstGEP(SrcAddr, 1, Size), CGF.VoidPtrTy);
+ Bld.CreateConstGEP(SrcAddr, 1), CGF.VoidPtrTy);
for (int IntSize = 8; IntSize >= 1; IntSize /= 2) {
if (Size < CharUnits::fromQuantity(IntSize))
continue;
@@ -2881,10 +2860,8 @@ static void shuffleAndStore(CodeGenFunction &CGF, Address SrcAddr,
CGF, CGF.EmitLoadOfScalar(Ptr, /*Volatile=*/false, IntType, Loc),
IntType, Offset, Loc);
CGF.EmitStoreOfScalar(Res, ElemPtr, /*Volatile=*/false, IntType);
- Address LocalPtr =
- Bld.CreateConstGEP(Ptr, 1, CharUnits::fromQuantity(IntSize));
- Address LocalElemPtr =
- Bld.CreateConstGEP(ElemPtr, 1, CharUnits::fromQuantity(IntSize));
+ Address LocalPtr = Bld.CreateConstGEP(Ptr, 1);
+ Address LocalElemPtr = Bld.CreateConstGEP(ElemPtr, 1);
PhiSrc->addIncoming(LocalPtr.getPointer(), ThenBB);
PhiDest->addIncoming(LocalElemPtr.getPointer(), ThenBB);
CGF.EmitBranch(PreCondBB);
@@ -2894,9 +2871,8 @@ static void shuffleAndStore(CodeGenFunction &CGF, Address SrcAddr,
CGF, CGF.EmitLoadOfScalar(Ptr, /*Volatile=*/false, IntType, Loc),
IntType, Offset, Loc);
CGF.EmitStoreOfScalar(Res, ElemPtr, /*Volatile=*/false, IntType);
- Ptr = Bld.CreateConstGEP(Ptr, 1, CharUnits::fromQuantity(IntSize));
- ElemPtr =
- Bld.CreateConstGEP(ElemPtr, 1, CharUnits::fromQuantity(IntSize));
+ Ptr = Bld.CreateConstGEP(Ptr, 1);
+ ElemPtr = Bld.CreateConstGEP(ElemPtr, 1);
}
Size = Size % IntSize;
}
@@ -2959,16 +2935,14 @@ static void emitReductionListCopy(
switch (Action) {
case RemoteLaneToThread: {
// Step 1.1: Get the address for the src element in the Reduce list.
- Address SrcElementPtrAddr =
- Bld.CreateConstArrayGEP(SrcBase, Idx, CGF.getPointerSize());
+ Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx);
SrcElementAddr = CGF.EmitLoadOfPointer(
SrcElementPtrAddr,
C.getPointerType(Private->getType())->castAs<PointerType>());
// Step 1.2: Create a temporary to store the element in the destination
// Reduce list.
- DestElementPtrAddr =
- Bld.CreateConstArrayGEP(DestBase, Idx, CGF.getPointerSize());
+ DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx);
DestElementAddr =
CGF.CreateMemTemp(Private->getType(), ".omp.reduction.element");
ShuffleInElement = true;
@@ -2977,16 +2951,14 @@ static void emitReductionListCopy(
}
case ThreadCopy: {
// Step 1.1: Get the address for the src element in the Reduce list.
- Address SrcElementPtrAddr =
- Bld.CreateConstArrayGEP(SrcBase, Idx, CGF.getPointerSize());
+ Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx);
SrcElementAddr = CGF.EmitLoadOfPointer(
SrcElementPtrAddr,
C.getPointerType(Private->getType())->castAs<PointerType>());
// Step 1.2: Get the address for dest element. The destination
// element has already been created on the thread's stack.
- DestElementPtrAddr =
- Bld.CreateConstArrayGEP(DestBase, Idx, CGF.getPointerSize());
+ DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx);
DestElementAddr = CGF.EmitLoadOfPointer(
DestElementPtrAddr,
C.getPointerType(Private->getType())->castAs<PointerType>());
@@ -2994,8 +2966,7 @@ static void emitReductionListCopy(
}
case ThreadToScratchpad: {
// Step 1.1: Get the address for the src element in the Reduce list.
- Address SrcElementPtrAddr =
- Bld.CreateConstArrayGEP(SrcBase, Idx, CGF.getPointerSize());
+ Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx);
SrcElementAddr = CGF.EmitLoadOfPointer(
SrcElementPtrAddr,
C.getPointerType(Private->getType())->castAs<PointerType>());
@@ -3030,8 +3001,7 @@ static void emitReductionListCopy(
// Step 1.2: Create a temporary to store the element in the destination
// Reduce list.
- DestElementPtrAddr =
- Bld.CreateConstArrayGEP(DestBase, Idx, CGF.getPointerSize());
+ DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx);
DestElementAddr =
CGF.CreateMemTemp(Private->getType(), ".omp.reduction.element");
UpdateDestListPtr = true;
@@ -3052,18 +3022,31 @@ static void emitReductionListCopy(
shuffleAndStore(CGF, SrcElementAddr, DestElementAddr, Private->getType(),
RemoteLaneOffset, Private->getExprLoc());
} else {
- if (Private->getType()->isScalarType()) {
+ switch (CGF.getEvaluationKind(Private->getType())) {
+ case TEK_Scalar: {
llvm::Value *Elem =
CGF.EmitLoadOfScalar(SrcElementAddr, /*Volatile=*/false,
Private->getType(), Private->getExprLoc());
// Store the source element value to the dest element address.
CGF.EmitStoreOfScalar(Elem, DestElementAddr, /*Volatile=*/false,
Private->getType());
- } else {
+ break;
+ }
+ case TEK_Complex: {
+ CodeGenFunction::ComplexPairTy Elem = CGF.EmitLoadOfComplex(
+ CGF.MakeAddrLValue(SrcElementAddr, Private->getType()),
+ Private->getExprLoc());
+ CGF.EmitStoreOfComplex(
+ Elem, CGF.MakeAddrLValue(DestElementAddr, Private->getType()),
+ /*isInit=*/false);
+ break;
+ }
+ case TEK_Aggregate:
CGF.EmitAggregateCopy(
CGF.MakeAddrLValue(DestElementAddr, Private->getType()),
CGF.MakeAddrLValue(SrcElementAddr, Private->getType()),
Private->getType(), AggValueSlot::DoesNotOverlap);
+ break;
}
}
@@ -3147,9 +3130,9 @@ static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM,
const CGFunctionInfo &CGFI =
CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
- auto *Fn = llvm::Function::Create(
- CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
- "_omp_reduction_inter_warp_copy_func", &CGM.getModule());
+ auto *Fn = llvm::Function::Create(CGM.getTypes().GetFunctionType(CGFI),
+ llvm::GlobalValue::InternalLinkage,
+ "_omp_reduction_inter_warp_copy_func", &M);
CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
Fn->setDoesNotRecurse();
CodeGenFunction CGF(CGM);
@@ -3246,8 +3229,7 @@ static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM,
CGF.EmitBlock(ThenBB);
// Reduce element = LocalReduceList[i]
- Address ElemPtrPtrAddr =
- Bld.CreateConstArrayGEP(LocalReduceList, Idx, CGF.getPointerSize());
+ Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
// elemptr = ((CopyType*)(elemptrptr)) + I
@@ -3313,8 +3295,7 @@ static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM,
SrcMediumPtr = Bld.CreateElementBitCast(SrcMediumPtr, CopyType);
// TargetElemPtr = (CopyType*)(SrcDataAddr[i]) + I
- Address TargetElemPtrPtr =
- Bld.CreateConstArrayGEP(LocalReduceList, Idx, CGF.getPointerSize());
+ Address TargetElemPtrPtr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
llvm::Value *TargetElemPtrVal = CGF.EmitLoadOfScalar(
TargetElemPtrPtr, /*Volatile=*/false, C.VoidPtrTy, Loc);
Address TargetElemPtr = Address(TargetElemPtrVal, Align);
@@ -3418,9 +3399,9 @@ static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM,
/// (2k+1)th thread is ignored in the value aggregation. Therefore
/// we copy the Reduce list from the (2k+1)th lane to (k+1)th lane so
/// that the contiguity assumption still holds.
-static llvm::Value *emitShuffleAndReduceFunction(
+static llvm::Function *emitShuffleAndReduceFunction(
CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
- QualType ReductionArrayTy, llvm::Value *ReduceFn, SourceLocation Loc) {
+ QualType ReductionArrayTy, llvm::Function *ReduceFn, SourceLocation Loc) {
ASTContext &C = CGM.getContext();
// Thread local Reduce list used to host the values of data to be reduced.
@@ -3448,6 +3429,12 @@ static llvm::Value *emitShuffleAndReduceFunction(
"_omp_reduction_shuffle_and_reduce_func", &CGM.getModule());
CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
Fn->setDoesNotRecurse();
+ if (CGM.getLangOpts().Optimize) {
+ Fn->removeFnAttr(llvm::Attribute::NoInline);
+ Fn->removeFnAttr(llvm::Attribute::OptimizeNone);
+ Fn->addFnAttr(llvm::Attribute::AlwaysInline);
+ }
+
CodeGenFunction CGF(CGM);
CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
@@ -3568,6 +3555,406 @@ static llvm::Value *emitShuffleAndReduceFunction(
return Fn;
}
+/// This function emits a helper that copies all the reduction variables from
+/// the team into the provided global buffer for the reduction variables.
+///
+/// void list_to_global_copy_func(void *buffer, int Idx, void *reduce_data)
+/// For all data entries D in reduce_data:
+/// Copy local D to buffer.D[Idx]
+static llvm::Value *emitListToGlobalCopyFunction(
+ CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
+ QualType ReductionArrayTy, SourceLocation Loc,
+ const RecordDecl *TeamReductionRec,
+ const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
+ &VarFieldMap) {
+ ASTContext &C = CGM.getContext();
+
+ // Buffer: global reduction buffer.
+ ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
+ C.VoidPtrTy, ImplicitParamDecl::Other);
+ // Idx: index of the buffer.
+ ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
+ ImplicitParamDecl::Other);
+ // ReduceList: thread local Reduce list.
+ ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
+ C.VoidPtrTy, ImplicitParamDecl::Other);
+ FunctionArgList Args;
+ Args.push_back(&BufferArg);
+ Args.push_back(&IdxArg);
+ Args.push_back(&ReduceListArg);
+
+ const CGFunctionInfo &CGFI =
+ CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
+ auto *Fn = llvm::Function::Create(
+ CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
+ "_omp_reduction_list_to_global_copy_func", &CGM.getModule());
+ CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
+ Fn->setDoesNotRecurse();
+ CodeGenFunction CGF(CGM);
+ CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
+
+ CGBuilderTy &Bld = CGF.Builder;
+
+ Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
+ Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
+ Address LocalReduceList(
+ Bld.CreatePointerBitCastOrAddrSpaceCast(
+ CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
+ C.VoidPtrTy, Loc),
+ CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
+ CGF.getPointerAlign());
+ QualType StaticTy = C.getRecordType(TeamReductionRec);
+ llvm::Type *LLVMReductionsBufferTy =
+ CGM.getTypes().ConvertTypeForMem(StaticTy);
+ llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
+ CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
+ LLVMReductionsBufferTy->getPointerTo());
+ llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
+ CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
+ /*Volatile=*/false, C.IntTy,
+ Loc)};
+ unsigned Idx = 0;
+ for (const Expr *Private : Privates) {
+ // Reduce element = LocalReduceList[i]
+ Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
+ llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
+ ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
+ // elemptr = ((CopyType*)(elemptrptr)) + I
+ ElemPtrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
+ ElemPtrPtr, CGF.ConvertTypeForMem(Private->getType())->getPointerTo());
+ Address ElemPtr =
+ Address(ElemPtrPtr, C.getTypeAlignInChars(Private->getType()));
+ const ValueDecl *VD = cast<DeclRefExpr>(Private)->getDecl();
+ // Global = Buffer.VD[Idx];
+ const FieldDecl *FD = VarFieldMap.lookup(VD);
+ LValue GlobLVal = CGF.EmitLValueForField(
+ CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
+ llvm::Value *BufferPtr = Bld.CreateInBoundsGEP(GlobLVal.getPointer(), Idxs);
+ GlobLVal.setAddress(Address(BufferPtr, GlobLVal.getAlignment()));
+ switch (CGF.getEvaluationKind(Private->getType())) {
+ case TEK_Scalar: {
+ llvm::Value *V = CGF.EmitLoadOfScalar(ElemPtr, /*Volatile=*/false,
+ Private->getType(), Loc);
+ CGF.EmitStoreOfScalar(V, GlobLVal);
+ break;
+ }
+ case TEK_Complex: {
+ CodeGenFunction::ComplexPairTy V = CGF.EmitLoadOfComplex(
+ CGF.MakeAddrLValue(ElemPtr, Private->getType()), Loc);
+ CGF.EmitStoreOfComplex(V, GlobLVal, /*isInit=*/false);
+ break;
+ }
+ case TEK_Aggregate:
+ CGF.EmitAggregateCopy(GlobLVal,
+ CGF.MakeAddrLValue(ElemPtr, Private->getType()),
+ Private->getType(), AggValueSlot::DoesNotOverlap);
+ break;
+ }
+ ++Idx;
+ }
+
+ CGF.FinishFunction();
+ return Fn;
+}
+
+/// This function emits a helper that reduces all the reduction variables from
+/// the team into the provided global buffer for the reduction variables.
+///
+/// void list_to_global_reduce_func(void *buffer, int Idx, void *reduce_data)
+/// void *GlobPtrs[];
+/// GlobPtrs[0] = (void*)&buffer.D0[Idx];
+/// ...
+/// GlobPtrs[N] = (void*)&buffer.DN[Idx];
+/// reduce_function(GlobPtrs, reduce_data);
+static llvm::Value *emitListToGlobalReduceFunction(
+ CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
+ QualType ReductionArrayTy, SourceLocation Loc,
+ const RecordDecl *TeamReductionRec,
+ const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
+ &VarFieldMap,
+ llvm::Function *ReduceFn) {
+ ASTContext &C = CGM.getContext();
+
+ // Buffer: global reduction buffer.
+ ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
+ C.VoidPtrTy, ImplicitParamDecl::Other);
+ // Idx: index of the buffer.
+ ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
+ ImplicitParamDecl::Other);
+ // ReduceList: thread local Reduce list.
+ ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
+ C.VoidPtrTy, ImplicitParamDecl::Other);
+ FunctionArgList Args;
+ Args.push_back(&BufferArg);
+ Args.push_back(&IdxArg);
+ Args.push_back(&ReduceListArg);
+
+ const CGFunctionInfo &CGFI =
+ CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
+ auto *Fn = llvm::Function::Create(
+ CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
+ "_omp_reduction_list_to_global_reduce_func", &CGM.getModule());
+ CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
+ Fn->setDoesNotRecurse();
+ CodeGenFunction CGF(CGM);
+ CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
+
+ CGBuilderTy &Bld = CGF.Builder;
+
+ Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
+ QualType StaticTy = C.getRecordType(TeamReductionRec);
+ llvm::Type *LLVMReductionsBufferTy =
+ CGM.getTypes().ConvertTypeForMem(StaticTy);
+ llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
+ CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
+ LLVMReductionsBufferTy->getPointerTo());
+
+ // 1. Build a list of reduction variables.
+ // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
+ Address ReductionList =
+ CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
+ auto IPriv = Privates.begin();
+ llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
+ CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
+ /*Volatile=*/false, C.IntTy,
+ Loc)};
+ unsigned Idx = 0;
+ for (unsigned I = 0, E = Privates.size(); I < E; ++I, ++IPriv, ++Idx) {
+ Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
+ // Global = Buffer.VD[Idx];
+ const ValueDecl *VD = cast<DeclRefExpr>(*IPriv)->getDecl();
+ const FieldDecl *FD = VarFieldMap.lookup(VD);
+ LValue GlobLVal = CGF.EmitLValueForField(
+ CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
+ llvm::Value *BufferPtr = Bld.CreateInBoundsGEP(GlobLVal.getPointer(), Idxs);
+ llvm::Value *Ptr = CGF.EmitCastToVoidPtr(BufferPtr);
+ CGF.EmitStoreOfScalar(Ptr, Elem, /*Volatile=*/false, C.VoidPtrTy);
+ if ((*IPriv)->getType()->isVariablyModifiedType()) {
+ // Store array size.
+ ++Idx;
+ Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
+ llvm::Value *Size = CGF.Builder.CreateIntCast(
+ CGF.getVLASize(
+ CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
+ .NumElts,
+ CGF.SizeTy, /*isSigned=*/false);
+ CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
+ Elem);
+ }
+ }
+
+ // Call reduce_function(GlobalReduceList, ReduceList)
+ llvm::Value *GlobalReduceList =
+ CGF.EmitCastToVoidPtr(ReductionList.getPointer());
+ Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
+ llvm::Value *ReducedPtr = CGF.EmitLoadOfScalar(
+ AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc);
+ CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
+ CGF, Loc, ReduceFn, {GlobalReduceList, ReducedPtr});
+ CGF.FinishFunction();
+ return Fn;
+}
+
+/// This function emits a helper that copies all the reduction variables from
+/// the team into the provided global buffer for the reduction variables.
+///
+/// void list_to_global_copy_func(void *buffer, int Idx, void *reduce_data)
+/// For all data entries D in reduce_data:
+/// Copy buffer.D[Idx] to local D;
+static llvm::Value *emitGlobalToListCopyFunction(
+ CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
+ QualType ReductionArrayTy, SourceLocation Loc,
+ const RecordDecl *TeamReductionRec,
+ const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
+ &VarFieldMap) {
+ ASTContext &C = CGM.getContext();
+
+ // Buffer: global reduction buffer.
+ ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
+ C.VoidPtrTy, ImplicitParamDecl::Other);
+ // Idx: index of the buffer.
+ ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
+ ImplicitParamDecl::Other);
+ // ReduceList: thread local Reduce list.
+ ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
+ C.VoidPtrTy, ImplicitParamDecl::Other);
+ FunctionArgList Args;
+ Args.push_back(&BufferArg);
+ Args.push_back(&IdxArg);
+ Args.push_back(&ReduceListArg);
+
+ const CGFunctionInfo &CGFI =
+ CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
+ auto *Fn = llvm::Function::Create(
+ CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
+ "_omp_reduction_global_to_list_copy_func", &CGM.getModule());
+ CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
+ Fn->setDoesNotRecurse();
+ CodeGenFunction CGF(CGM);
+ CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
+
+ CGBuilderTy &Bld = CGF.Builder;
+
+ Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
+ Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
+ Address LocalReduceList(
+ Bld.CreatePointerBitCastOrAddrSpaceCast(
+ CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
+ C.VoidPtrTy, Loc),
+ CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
+ CGF.getPointerAlign());
+ QualType StaticTy = C.getRecordType(TeamReductionRec);
+ llvm::Type *LLVMReductionsBufferTy =
+ CGM.getTypes().ConvertTypeForMem(StaticTy);
+ llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
+ CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
+ LLVMReductionsBufferTy->getPointerTo());
+
+ llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
+ CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
+ /*Volatile=*/false, C.IntTy,
+ Loc)};
+ unsigned Idx = 0;
+ for (const Expr *Private : Privates) {
+ // Reduce element = LocalReduceList[i]
+ Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
+ llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
+ ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
+ // elemptr = ((CopyType*)(elemptrptr)) + I
+ ElemPtrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
+ ElemPtrPtr, CGF.ConvertTypeForMem(Private->getType())->getPointerTo());
+ Address ElemPtr =
+ Address(ElemPtrPtr, C.getTypeAlignInChars(Private->getType()));
+ const ValueDecl *VD = cast<DeclRefExpr>(Private)->getDecl();
+ // Global = Buffer.VD[Idx];
+ const FieldDecl *FD = VarFieldMap.lookup(VD);
+ LValue GlobLVal = CGF.EmitLValueForField(
+ CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
+ llvm::Value *BufferPtr = Bld.CreateInBoundsGEP(GlobLVal.getPointer(), Idxs);
+ GlobLVal.setAddress(Address(BufferPtr, GlobLVal.getAlignment()));
+ switch (CGF.getEvaluationKind(Private->getType())) {
+ case TEK_Scalar: {
+ llvm::Value *V = CGF.EmitLoadOfScalar(GlobLVal, Loc);
+ CGF.EmitStoreOfScalar(V, ElemPtr, /*Volatile=*/false, Private->getType());
+ break;
+ }
+ case TEK_Complex: {
+ CodeGenFunction::ComplexPairTy V = CGF.EmitLoadOfComplex(GlobLVal, Loc);
+ CGF.EmitStoreOfComplex(V, CGF.MakeAddrLValue(ElemPtr, Private->getType()),
+ /*isInit=*/false);
+ break;
+ }
+ case TEK_Aggregate:
+ CGF.EmitAggregateCopy(CGF.MakeAddrLValue(ElemPtr, Private->getType()),
+ GlobLVal, Private->getType(),
+ AggValueSlot::DoesNotOverlap);
+ break;
+ }
+ ++Idx;
+ }
+
+ CGF.FinishFunction();
+ return Fn;
+}
+
+/// This function emits a helper that reduces all the reduction variables from
+/// the team into the provided global buffer for the reduction variables.
+///
+/// void global_to_list_reduce_func(void *buffer, int Idx, void *reduce_data)
+/// void *GlobPtrs[];
+/// GlobPtrs[0] = (void*)&buffer.D0[Idx];
+/// ...
+/// GlobPtrs[N] = (void*)&buffer.DN[Idx];
+/// reduce_function(reduce_data, GlobPtrs);
+static llvm::Value *emitGlobalToListReduceFunction(
+ CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
+ QualType ReductionArrayTy, SourceLocation Loc,
+ const RecordDecl *TeamReductionRec,
+ const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
+ &VarFieldMap,
+ llvm::Function *ReduceFn) {
+ ASTContext &C = CGM.getContext();
+
+ // Buffer: global reduction buffer.
+ ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
+ C.VoidPtrTy, ImplicitParamDecl::Other);
+ // Idx: index of the buffer.
+ ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
+ ImplicitParamDecl::Other);
+ // ReduceList: thread local Reduce list.
+ ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
+ C.VoidPtrTy, ImplicitParamDecl::Other);
+ FunctionArgList Args;
+ Args.push_back(&BufferArg);
+ Args.push_back(&IdxArg);
+ Args.push_back(&ReduceListArg);
+
+ const CGFunctionInfo &CGFI =
+ CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
+ auto *Fn = llvm::Function::Create(
+ CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
+ "_omp_reduction_global_to_list_reduce_func", &CGM.getModule());
+ CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
+ Fn->setDoesNotRecurse();
+ CodeGenFunction CGF(CGM);
+ CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
+
+ CGBuilderTy &Bld = CGF.Builder;
+
+ Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
+ QualType StaticTy = C.getRecordType(TeamReductionRec);
+ llvm::Type *LLVMReductionsBufferTy =
+ CGM.getTypes().ConvertTypeForMem(StaticTy);
+ llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
+ CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
+ LLVMReductionsBufferTy->getPointerTo());
+
+ // 1. Build a list of reduction variables.
+ // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
+ Address ReductionList =
+ CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
+ auto IPriv = Privates.begin();
+ llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
+ CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
+ /*Volatile=*/false, C.IntTy,
+ Loc)};
+ unsigned Idx = 0;
+ for (unsigned I = 0, E = Privates.size(); I < E; ++I, ++IPriv, ++Idx) {
+ Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
+ // Global = Buffer.VD[Idx];
+ const ValueDecl *VD = cast<DeclRefExpr>(*IPriv)->getDecl();
+ const FieldDecl *FD = VarFieldMap.lookup(VD);
+ LValue GlobLVal = CGF.EmitLValueForField(
+ CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
+ llvm::Value *BufferPtr = Bld.CreateInBoundsGEP(GlobLVal.getPointer(), Idxs);
+ llvm::Value *Ptr = CGF.EmitCastToVoidPtr(BufferPtr);
+ CGF.EmitStoreOfScalar(Ptr, Elem, /*Volatile=*/false, C.VoidPtrTy);
+ if ((*IPriv)->getType()->isVariablyModifiedType()) {
+ // Store array size.
+ ++Idx;
+ Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
+ llvm::Value *Size = CGF.Builder.CreateIntCast(
+ CGF.getVLASize(
+ CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
+ .NumElts,
+ CGF.SizeTy, /*isSigned=*/false);
+ CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
+ Elem);
+ }
+ }
+
+ // Call reduce_function(ReduceList, GlobalReduceList)
+ llvm::Value *GlobalReduceList =
+ CGF.EmitCastToVoidPtr(ReductionList.getPointer());
+ Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
+ llvm::Value *ReducedPtr = CGF.EmitLoadOfScalar(
+ AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc);
+ CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
+ CGF, Loc, ReduceFn, {ReducedPtr, GlobalReduceList});
+ CGF.FinishFunction();
+ return Fn;
+}
+
///
/// Design of OpenMP reductions on the GPU
///
@@ -3841,57 +4228,55 @@ void CGOpenMPRuntimeNVPTX::emitReduction(
llvm::Value *ThreadId = getThreadID(CGF, Loc);
llvm::Value *Res;
- if (ParallelReduction) {
- ASTContext &C = CGM.getContext();
- // 1. Build a list of reduction variables.
- // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
- auto Size = RHSExprs.size();
- for (const Expr *E : Privates) {
- if (E->getType()->isVariablyModifiedType())
- // Reserve place for array size.
- ++Size;
- }
- llvm::APInt ArraySize(/*unsigned int numBits=*/32, Size);
- QualType ReductionArrayTy =
- C.getConstantArrayType(C.VoidPtrTy, ArraySize, ArrayType::Normal,
- /*IndexTypeQuals=*/0);
- Address ReductionList =
- CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
- auto IPriv = Privates.begin();
- unsigned Idx = 0;
- for (unsigned I = 0, E = RHSExprs.size(); I < E; ++I, ++IPriv, ++Idx) {
- Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx,
- CGF.getPointerSize());
- CGF.Builder.CreateStore(
- CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- CGF.EmitLValue(RHSExprs[I]).getPointer(), CGF.VoidPtrTy),
- Elem);
- if ((*IPriv)->getType()->isVariablyModifiedType()) {
- // Store array size.
- ++Idx;
- Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx,
- CGF.getPointerSize());
- llvm::Value *Size = CGF.Builder.CreateIntCast(
- CGF.getVLASize(
- CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
- .NumElts,
- CGF.SizeTy, /*isSigned=*/false);
- CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
- Elem);
- }
+ ASTContext &C = CGM.getContext();
+ // 1. Build a list of reduction variables.
+ // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
+ auto Size = RHSExprs.size();
+ for (const Expr *E : Privates) {
+ if (E->getType()->isVariablyModifiedType())
+ // Reserve place for array size.
+ ++Size;
+ }
+ llvm::APInt ArraySize(/*unsigned int numBits=*/32, Size);
+ QualType ReductionArrayTy =
+ C.getConstantArrayType(C.VoidPtrTy, ArraySize, ArrayType::Normal,
+ /*IndexTypeQuals=*/0);
+ Address ReductionList =
+ CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
+ auto IPriv = Privates.begin();
+ unsigned Idx = 0;
+ for (unsigned I = 0, E = RHSExprs.size(); I < E; ++I, ++IPriv, ++Idx) {
+ Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
+ CGF.Builder.CreateStore(
+ CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ CGF.EmitLValue(RHSExprs[I]).getPointer(), CGF.VoidPtrTy),
+ Elem);
+ if ((*IPriv)->getType()->isVariablyModifiedType()) {
+ // Store array size.
+ ++Idx;
+ Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
+ llvm::Value *Size = CGF.Builder.CreateIntCast(
+ CGF.getVLASize(
+ CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
+ .NumElts,
+ CGF.SizeTy, /*isSigned=*/false);
+ CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
+ Elem);
}
+ }
- llvm::Value *ReductionArrayTySize = CGF.getTypeSize(ReductionArrayTy);
- llvm::Value *RL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- ReductionList.getPointer(), CGF.VoidPtrTy);
- llvm::Value *ReductionFn = emitReductionFunction(
- CGM, Loc, CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo(),
- Privates, LHSExprs, RHSExprs, ReductionOps);
- llvm::Value *ShuffleAndReduceFn = emitShuffleAndReduceFunction(
- CGM, Privates, ReductionArrayTy, ReductionFn, Loc);
- llvm::Value *InterWarpCopyFn =
- emitInterWarpCopyFunction(CGM, Privates, ReductionArrayTy, Loc);
+ llvm::Value *RL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ ReductionList.getPointer(), CGF.VoidPtrTy);
+ llvm::Function *ReductionFn = emitReductionFunction(
+ Loc, CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo(), Privates,
+ LHSExprs, RHSExprs, ReductionOps);
+ llvm::Value *ReductionArrayTySize = CGF.getTypeSize(ReductionArrayTy);
+ llvm::Function *ShuffleAndReduceFn = emitShuffleAndReduceFunction(
+ CGM, Privates, ReductionArrayTy, ReductionFn, Loc);
+ llvm::Value *InterWarpCopyFn =
+ emitInterWarpCopyFunction(CGM, Privates, ReductionArrayTy, Loc);
+ if (ParallelReduction) {
llvm::Value *Args[] = {RTLoc,
ThreadId,
CGF.Builder.getInt32(RHSExprs.size()),
@@ -3900,17 +4285,59 @@ void CGOpenMPRuntimeNVPTX::emitReduction(
ShuffleAndReduceFn,
InterWarpCopyFn};
- Res = CGF.EmitRuntimeCall(createNVPTXRuntimeFunction(
- OMPRTL_NVPTX__kmpc_parallel_reduce_nowait_v2),
- Args);
+ Res = CGF.EmitRuntimeCall(
+ createNVPTXRuntimeFunction(
+ OMPRTL_NVPTX__kmpc_nvptx_parallel_reduce_nowait_v2),
+ Args);
} else {
assert(TeamsReduction && "expected teams reduction.");
- std::string Name = getName({"reduction"});
- llvm::Value *Lock = getCriticalRegionLock(Name);
- llvm::Value *Args[] = {RTLoc, ThreadId, Lock};
+ llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> VarFieldMap;
+ llvm::SmallVector<const ValueDecl *, 4> PrivatesReductions(Privates.size());
+ int Cnt = 0;
+ for (const Expr *DRE : Privates) {
+ PrivatesReductions[Cnt] = cast<DeclRefExpr>(DRE)->getDecl();
+ ++Cnt;
+ }
+ const RecordDecl *TeamReductionRec = ::buildRecordForGlobalizedVars(
+ CGM.getContext(), PrivatesReductions, llvm::None, VarFieldMap,
+ C.getLangOpts().OpenMPCUDAReductionBufNum);
+ TeamsReductions.push_back(TeamReductionRec);
+ if (!KernelTeamsReductionPtr) {
+ KernelTeamsReductionPtr = new llvm::GlobalVariable(
+ CGM.getModule(), CGM.VoidPtrTy, /*isConstant=*/true,
+ llvm::GlobalValue::InternalLinkage, nullptr,
+ "_openmp_teams_reductions_buffer_$_$ptr");
+ }
+ llvm::Value *GlobalBufferPtr = CGF.EmitLoadOfScalar(
+ Address(KernelTeamsReductionPtr, CGM.getPointerAlign()),
+ /*Volatile=*/false, C.getPointerType(C.VoidPtrTy), Loc);
+ llvm::Value *GlobalToBufferCpyFn = ::emitListToGlobalCopyFunction(
+ CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap);
+ llvm::Value *GlobalToBufferRedFn = ::emitListToGlobalReduceFunction(
+ CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap,
+ ReductionFn);
+ llvm::Value *BufferToGlobalCpyFn = ::emitGlobalToListCopyFunction(
+ CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap);
+ llvm::Value *BufferToGlobalRedFn = ::emitGlobalToListReduceFunction(
+ CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap,
+ ReductionFn);
+
+ llvm::Value *Args[] = {
+ RTLoc,
+ ThreadId,
+ GlobalBufferPtr,
+ CGF.Builder.getInt32(C.getLangOpts().OpenMPCUDAReductionBufNum),
+ RL,
+ ShuffleAndReduceFn,
+ InterWarpCopyFn,
+ GlobalToBufferCpyFn,
+ GlobalToBufferRedFn,
+ BufferToGlobalCpyFn,
+ BufferToGlobalRedFn};
+
Res = CGF.EmitRuntimeCall(
createNVPTXRuntimeFunction(
- OMPRTL_NVPTX__kmpc_nvptx_teams_reduce_nowait_simple),
+ OMPRTL_NVPTX__kmpc_nvptx_teams_reduce_nowait_v2),
Args);
}
@@ -3941,30 +4368,14 @@ void CGOpenMPRuntimeNVPTX::emitReduction(
++IRHS;
}
};
- if (ParallelReduction) {
- llvm::Value *EndArgs[] = {ThreadId};
- RegionCodeGenTy RCG(CodeGen);
- NVPTXActionTy Action(
- nullptr, llvm::None,
- createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_end_reduce_nowait),
- EndArgs);
- RCG.setAction(Action);
- RCG(CGF);
- } else {
- assert(TeamsReduction && "expected teams reduction.");
- llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
- std::string Name = getName({"reduction"});
- llvm::Value *Lock = getCriticalRegionLock(Name);
- llvm::Value *EndArgs[] = {RTLoc, ThreadId, Lock};
- RegionCodeGenTy RCG(CodeGen);
- NVPTXActionTy Action(
- nullptr, llvm::None,
- createNVPTXRuntimeFunction(
- OMPRTL_NVPTX__kmpc_nvptx_teams_end_reduce_nowait_simple),
- EndArgs);
- RCG.setAction(Action);
- RCG(CGF);
- }
+ llvm::Value *EndArgs[] = {ThreadId};
+ RegionCodeGenTy RCG(CodeGen);
+ NVPTXActionTy Action(
+ nullptr, llvm::None,
+ createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_end_reduce_nowait),
+ EndArgs);
+ RCG.setAction(Action);
+ RCG(CGF);
// There is no need to emit line number for unconditional branch.
(void)ApplyDebugLocation::CreateEmpty(CGF);
CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
@@ -3983,6 +4394,10 @@ CGOpenMPRuntimeNVPTX::translateParameter(const FieldDecl *FD,
if (Attr->getCaptureKind() == OMPC_map) {
PointeeTy = CGM.getContext().getAddrSpaceQualType(PointeeTy,
LangAS::opencl_global);
+ } else if (Attr->getCaptureKind() == OMPC_firstprivate &&
+ PointeeTy.isConstant(CGM.getContext())) {
+ PointeeTy = CGM.getContext().getAddrSpaceQualType(PointeeTy,
+ LangAS::opencl_generic);
}
}
ArgType = CGM.getContext().getPointerType(PointeeTy);
@@ -4034,12 +4449,11 @@ CGOpenMPRuntimeNVPTX::getParameterAddress(CodeGenFunction &CGF,
}
void CGOpenMPRuntimeNVPTX::emitOutlinedFunctionCall(
- CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *OutlinedFn,
+ CodeGenFunction &CGF, SourceLocation Loc, llvm::FunctionCallee OutlinedFn,
ArrayRef<llvm::Value *> Args) const {
SmallVector<llvm::Value *, 4> TargetArgs;
TargetArgs.reserve(Args.size());
- auto *FnType =
- cast<llvm::FunctionType>(OutlinedFn->getType()->getPointerElementType());
+ auto *FnType = OutlinedFn.getFunctionType();
for (unsigned I = 0, E = Args.size(); I < E; ++I) {
if (FnType->isVarArg() && FnType->getNumParams() <= I) {
TargetArgs.append(std::next(Args.begin(), I), Args.end());
@@ -4137,8 +4551,7 @@ llvm::Function *CGOpenMPRuntimeNVPTX::createParallelDataSharingWrapper(
}
unsigned Idx = 0;
if (isOpenMPLoopBoundSharingDirective(D.getDirectiveKind())) {
- Address Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx,
- CGF.getPointerSize());
+ Address Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx);
Address TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
Src, CGF.SizeTy->getPointerTo());
llvm::Value *LB = CGF.EmitLoadOfScalar(
@@ -4148,8 +4561,7 @@ llvm::Function *CGOpenMPRuntimeNVPTX::createParallelDataSharingWrapper(
cast<OMPLoopDirective>(D).getLowerBoundVariable()->getExprLoc());
Args.emplace_back(LB);
++Idx;
- Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx,
- CGF.getPointerSize());
+ Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx);
TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
Src, CGF.SizeTy->getPointerTo());
llvm::Value *UB = CGF.EmitLoadOfScalar(
@@ -4164,8 +4576,7 @@ llvm::Function *CGOpenMPRuntimeNVPTX::createParallelDataSharingWrapper(
ASTContext &CGFContext = CGF.getContext();
for (unsigned I = 0, E = CS.capture_size(); I < E; ++I, ++CI, ++CurField) {
QualType ElemTy = CurField->getType();
- Address Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, I + Idx,
- CGF.getPointerSize());
+ Address Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, I + Idx);
Address TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
Src, CGF.ConvertTypeForMem(CGFContext.getPointerType(ElemTy)));
llvm::Value *Arg = CGF.EmitLoadOfScalar(TypedAddress,
@@ -4266,6 +4677,58 @@ void CGOpenMPRuntimeNVPTX::emitFunctionProlog(CodeGenFunction &CGF,
Address CGOpenMPRuntimeNVPTX::getAddressOfLocalVariable(CodeGenFunction &CGF,
const VarDecl *VD) {
+ if (VD && VD->hasAttr<OMPAllocateDeclAttr>()) {
+ const auto *A = VD->getAttr<OMPAllocateDeclAttr>();
+ switch (A->getAllocatorType()) {
+ // Use the default allocator here as by default local vars are
+ // threadlocal.
+ case OMPAllocateDeclAttr::OMPDefaultMemAlloc:
+ case OMPAllocateDeclAttr::OMPThreadMemAlloc:
+ case OMPAllocateDeclAttr::OMPHighBWMemAlloc:
+ case OMPAllocateDeclAttr::OMPLowLatMemAlloc:
+ // Follow the user decision - use default allocation.
+ return Address::invalid();
+ case OMPAllocateDeclAttr::OMPUserDefinedMemAlloc:
+ // TODO: implement aupport for user-defined allocators.
+ return Address::invalid();
+ case OMPAllocateDeclAttr::OMPConstMemAlloc: {
+ llvm::Type *VarTy = CGF.ConvertTypeForMem(VD->getType());
+ auto *GV = new llvm::GlobalVariable(
+ CGM.getModule(), VarTy, /*isConstant=*/false,
+ llvm::GlobalValue::InternalLinkage,
+ llvm::Constant::getNullValue(VarTy), VD->getName(),
+ /*InsertBefore=*/nullptr, llvm::GlobalValue::NotThreadLocal,
+ CGM.getContext().getTargetAddressSpace(LangAS::cuda_constant));
+ CharUnits Align = CGM.getContext().getDeclAlign(VD);
+ GV->setAlignment(Align.getQuantity());
+ return Address(GV, Align);
+ }
+ case OMPAllocateDeclAttr::OMPPTeamMemAlloc: {
+ llvm::Type *VarTy = CGF.ConvertTypeForMem(VD->getType());
+ auto *GV = new llvm::GlobalVariable(
+ CGM.getModule(), VarTy, /*isConstant=*/false,
+ llvm::GlobalValue::InternalLinkage,
+ llvm::Constant::getNullValue(VarTy), VD->getName(),
+ /*InsertBefore=*/nullptr, llvm::GlobalValue::NotThreadLocal,
+ CGM.getContext().getTargetAddressSpace(LangAS::cuda_shared));
+ CharUnits Align = CGM.getContext().getDeclAlign(VD);
+ GV->setAlignment(Align.getQuantity());
+ return Address(GV, Align);
+ }
+ case OMPAllocateDeclAttr::OMPLargeCapMemAlloc:
+ case OMPAllocateDeclAttr::OMPCGroupMemAlloc: {
+ llvm::Type *VarTy = CGF.ConvertTypeForMem(VD->getType());
+ auto *GV = new llvm::GlobalVariable(
+ CGM.getModule(), VarTy, /*isConstant=*/false,
+ llvm::GlobalValue::InternalLinkage,
+ llvm::Constant::getNullValue(VarTy), VD->getName());
+ CharUnits Align = CGM.getContext().getDeclAlign(VD);
+ GV->setAlignment(Align.getQuantity());
+ return Address(GV, Align);
+ }
+ }
+ }
+
if (getDataSharingMode(CGM) != CGOpenMPRuntimeNVPTX::Generic)
return Address::invalid();
@@ -4287,6 +4750,7 @@ Address CGOpenMPRuntimeNVPTX::getAddressOfLocalVariable(CodeGenFunction &CGF,
return VDI->second.PrivateAddr;
}
}
+
return Address::invalid();
}
@@ -4374,6 +4838,38 @@ void CGOpenMPRuntimeNVPTX::adjustTargetSpecificDataForLambdas(
}
}
+unsigned CGOpenMPRuntimeNVPTX::getDefaultFirstprivateAddressSpace() const {
+ return CGM.getContext().getTargetAddressSpace(LangAS::cuda_constant);
+}
+
+bool CGOpenMPRuntimeNVPTX::hasAllocateAttributeForGlobalVar(const VarDecl *VD,
+ LangAS &AS) {
+ if (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())
+ return false;
+ const auto *A = VD->getAttr<OMPAllocateDeclAttr>();
+ switch(A->getAllocatorType()) {
+ case OMPAllocateDeclAttr::OMPDefaultMemAlloc:
+ // Not supported, fallback to the default mem space.
+ case OMPAllocateDeclAttr::OMPThreadMemAlloc:
+ case OMPAllocateDeclAttr::OMPLargeCapMemAlloc:
+ case OMPAllocateDeclAttr::OMPCGroupMemAlloc:
+ case OMPAllocateDeclAttr::OMPHighBWMemAlloc:
+ case OMPAllocateDeclAttr::OMPLowLatMemAlloc:
+ AS = LangAS::Default;
+ return true;
+ case OMPAllocateDeclAttr::OMPConstMemAlloc:
+ AS = LangAS::cuda_constant;
+ return true;
+ case OMPAllocateDeclAttr::OMPPTeamMemAlloc:
+ AS = LangAS::cuda_shared;
+ return true;
+ case OMPAllocateDeclAttr::OMPUserDefinedMemAlloc:
+ llvm_unreachable("Expected predefined allocator for the variables with the "
+ "static storage.");
+ }
+ return false;
+}
+
// Get current CudaArch and ignore any unknown values
static CudaArch getCudaArch(CodeGenModule &CGM) {
if (!CGM.getTarget().hasFeature("ptx"))
@@ -4395,7 +4891,7 @@ static CudaArch getCudaArch(CodeGenModule &CGM) {
/// Check to see if target architecture supports unified addressing which is
/// a restriction for OpenMP requires clause "unified_shared_memory".
void CGOpenMPRuntimeNVPTX::checkArchForUnifiedAddressing(
- CodeGenModule &CGM, const OMPRequiresDecl *D) const {
+ const OMPRequiresDecl *D) {
for (const OMPClause *Clause : D->clauselists()) {
if (Clause->getClauseKind() == OMPC_unified_shared_memory) {
switch (getCudaArch(CGM)) {
@@ -4432,7 +4928,11 @@ void CGOpenMPRuntimeNVPTX::checkArchForUnifiedAddressing(
case CudaArch::GFX902:
case CudaArch::GFX904:
case CudaArch::GFX906:
+ case CudaArch::GFX908:
case CudaArch::GFX909:
+ case CudaArch::GFX1010:
+ case CudaArch::GFX1011:
+ case CudaArch::GFX1012:
case CudaArch::UNKNOWN:
break;
case CudaArch::LAST:
@@ -4440,6 +4940,7 @@ void CGOpenMPRuntimeNVPTX::checkArchForUnifiedAddressing(
}
}
}
+ CGOpenMPRuntime::checkArchForUnifiedAddressing(D);
}
/// Get number of SMs and number of blocks per SM.
@@ -4485,7 +4986,11 @@ static std::pair<unsigned, unsigned> getSMsBlocksPerSM(CodeGenModule &CGM) {
case CudaArch::GFX902:
case CudaArch::GFX904:
case CudaArch::GFX906:
+ case CudaArch::GFX908:
case CudaArch::GFX909:
+ case CudaArch::GFX1010:
+ case CudaArch::GFX1011:
+ case CudaArch::GFX1012:
case CudaArch::UNKNOWN:
break;
case CudaArch::LAST:
@@ -4587,9 +5092,12 @@ void CGOpenMPRuntimeNVPTX::clear() {
QualType Arr2Ty = C.getConstantArrayType(Arr1Ty, Size2, ArrayType::Normal,
/*IndexTypeQuals=*/0);
llvm::Type *LLVMArr2Ty = CGM.getTypes().ConvertTypeForMem(Arr2Ty);
+ // FIXME: nvlink does not handle weak linkage correctly (object with the
+ // different size are reported as erroneous).
+ // Restore CommonLinkage as soon as nvlink is fixed.
auto *GV = new llvm::GlobalVariable(
CGM.getModule(), LLVMArr2Ty,
- /*isConstant=*/false, llvm::GlobalValue::CommonLinkage,
+ /*isConstant=*/false, llvm::GlobalValue::InternalLinkage,
llvm::Constant::getNullValue(LLVMArr2Ty),
"_openmp_static_glob_rd_$_");
auto *Replacement = llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
@@ -4600,5 +5108,36 @@ void CGOpenMPRuntimeNVPTX::clear() {
}
}
}
+ if (!TeamsReductions.empty()) {
+ ASTContext &C = CGM.getContext();
+ RecordDecl *StaticRD = C.buildImplicitRecord(
+ "_openmp_teams_reduction_type_$_", RecordDecl::TagKind::TTK_Union);
+ StaticRD->startDefinition();
+ for (const RecordDecl *TeamReductionRec : TeamsReductions) {
+ QualType RecTy = C.getRecordType(TeamReductionRec);
+ auto *Field = FieldDecl::Create(
+ C, StaticRD, SourceLocation(), SourceLocation(), nullptr, RecTy,
+ C.getTrivialTypeSourceInfo(RecTy, SourceLocation()),
+ /*BW=*/nullptr, /*Mutable=*/false,
+ /*InitStyle=*/ICIS_NoInit);
+ Field->setAccess(AS_public);
+ StaticRD->addDecl(Field);
+ }
+ StaticRD->completeDefinition();
+ QualType StaticTy = C.getRecordType(StaticRD);
+ llvm::Type *LLVMReductionsBufferTy =
+ CGM.getTypes().ConvertTypeForMem(StaticTy);
+ // FIXME: nvlink does not handle weak linkage correctly (object with the
+ // different size are reported as erroneous).
+ // Restore CommonLinkage as soon as nvlink is fixed.
+ auto *GV = new llvm::GlobalVariable(
+ CGM.getModule(), LLVMReductionsBufferTy,
+ /*isConstant=*/false, llvm::GlobalValue::InternalLinkage,
+ llvm::Constant::getNullValue(LLVMReductionsBufferTy),
+ "_openmp_teams_reductions_buffer_$_");
+ KernelTeamsReductionPtr->setInitializer(
+ llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(GV,
+ CGM.VoidPtrTy));
+ }
CGOpenMPRuntime::clear();
}
diff --git a/lib/CodeGen/CGOpenMPRuntimeNVPTX.h b/lib/CodeGen/CGOpenMPRuntimeNVPTX.h
index 6091610c37e3..e7fd458e7271 100644
--- a/lib/CodeGen/CGOpenMPRuntimeNVPTX.h
+++ b/lib/CodeGen/CGOpenMPRuntimeNVPTX.h
@@ -1,9 +1,8 @@
//===----- CGOpenMPRuntimeNVPTX.h - Interface to OpenMP NVPTX Runtimes ----===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -18,7 +17,6 @@
#include "CGOpenMPRuntime.h"
#include "CodeGenFunction.h"
#include "clang/AST/StmtOpenMP.h"
-#include "llvm/IR/CallSite.h"
namespace clang {
namespace CodeGen {
@@ -173,7 +171,7 @@ private:
/// specified, nullptr otherwise.
///
void emitSPMDParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
- llvm::Value *OutlinedFn,
+ llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars,
const Expr *IfCond);
@@ -230,7 +228,7 @@ public:
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
- llvm::Value *
+ llvm::Function *
emitParallelOutlinedFunction(const OMPExecutableDirective &D,
const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind,
@@ -245,7 +243,7 @@ public:
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
- llvm::Value *
+ llvm::Function *
emitTeamsOutlinedFunction(const OMPExecutableDirective &D,
const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind,
@@ -260,7 +258,7 @@ public:
/// variables used in \a OutlinedFn function.
///
void emitTeamsCall(CodeGenFunction &CGF, const OMPExecutableDirective &D,
- SourceLocation Loc, llvm::Value *OutlinedFn,
+ SourceLocation Loc, llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars) override;
/// Emits code for parallel or serial call of the \a OutlinedFn with
@@ -273,7 +271,7 @@ public:
/// \param IfCond Condition in the associated 'if' clause, if it was
/// specified, nullptr otherwise.
void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
- llvm::Value *OutlinedFn,
+ llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars,
const Expr *IfCond) override;
@@ -323,7 +321,7 @@ public:
/// implementation. Specialized for the NVPTX device.
/// \param Function OpenMP runtime function.
/// \return Specified function.
- llvm::Constant *createNVPTXRuntimeFunction(unsigned Function);
+ llvm::FunctionCallee createNVPTXRuntimeFunction(unsigned Function);
/// Translates the native parameter of outlined function if this is required
/// for target.
@@ -342,7 +340,7 @@ public:
/// Emits call of the outlined function with the provided arguments,
/// translating these arguments to correct target-specific arguments.
void emitOutlinedFunctionCall(
- CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *OutlinedFn,
+ CodeGenFunction &CGF, SourceLocation Loc, llvm::FunctionCallee OutlinedFn,
ArrayRef<llvm::Value *> Args = llvm::None) const override;
/// Emits OpenMP-specific function prolog.
@@ -385,8 +383,16 @@ public:
/// Perform check on requires decl to ensure that target architecture
/// supports unified addressing
- void checkArchForUnifiedAddressing(CodeGenModule &CGM,
- const OMPRequiresDecl *D) const override;
+ void checkArchForUnifiedAddressing(const OMPRequiresDecl *D) override;
+
+ /// Returns default address space for the constant firstprivates, __constant__
+ /// address space by default.
+ unsigned getDefaultFirstprivateAddressSpace() const override;
+
+ /// Checks if the variable has associated OMPAllocateDeclAttr attribute with
+ /// the predefined allocator and translates it into the corresponding address
+ /// space.
+ bool hasAllocateAttributeForGlobalVar(const VarDecl *VD, LangAS &AS) override;
private:
/// Track the execution mode when codegening directives within a target
@@ -463,6 +469,12 @@ private:
unsigned RegionCounter = 0;
};
llvm::SmallVector<GlobalPtrSizeRecsTy, 8> GlobalizedRecords;
+ llvm::GlobalVariable *KernelTeamsReductionPtr = nullptr;
+ /// List of the records with the list of fields for the reductions across the
+ /// teams. Used to build the intermediate buffer for the fast teams
+ /// reductions.
+ /// All the records are gathered into a union `union.type` is created.
+ llvm::SmallVector<const RecordDecl *, 4> TeamsReductions;
/// Shared pointer for the global memory in the global memory buffer used for
/// the given kernel.
llvm::GlobalVariable *KernelStaticGlobalized = nullptr;
diff --git a/lib/CodeGen/CGRecordLayout.h b/lib/CodeGen/CGRecordLayout.h
index 41084294ab9a..730ee4c438e7 100644
--- a/lib/CodeGen/CGRecordLayout.h
+++ b/lib/CodeGen/CGRecordLayout.h
@@ -1,9 +1,8 @@
//===--- CGRecordLayout.h - LLVM Record Layout Information ------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/CodeGen/CGRecordLayoutBuilder.cpp b/lib/CodeGen/CGRecordLayoutBuilder.cpp
index c754541ac121..4de64a32f2ac 100644
--- a/lib/CodeGen/CGRecordLayoutBuilder.cpp
+++ b/lib/CodeGen/CGRecordLayoutBuilder.cpp
@@ -1,9 +1,8 @@
//===--- CGRecordLayoutBuilder.cpp - CGRecordLayout builder ----*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -274,7 +273,7 @@ void CGRecordLowering::lower(bool NVBaseType) {
if (!NVBaseType)
accumulateVBases();
}
- std::stable_sort(Members.begin(), Members.end());
+ llvm::stable_sort(Members);
Members.push_back(StorageInfo(Size, getIntNType(8)));
clipTailPadding();
determinePacked(NVBaseType);
@@ -348,18 +347,21 @@ void CGRecordLowering::lowerUnion() {
void CGRecordLowering::accumulateFields() {
for (RecordDecl::field_iterator Field = D->field_begin(),
FieldEnd = D->field_end();
- Field != FieldEnd;)
+ Field != FieldEnd;) {
if (Field->isBitField()) {
RecordDecl::field_iterator Start = Field;
// Iterate to gather the list of bitfields.
for (++Field; Field != FieldEnd && Field->isBitField(); ++Field);
accumulateBitFields(Start, Field);
- } else {
+ } else if (!Field->isZeroSize(Context)) {
Members.push_back(MemberInfo(
bitsToCharUnits(getFieldBitOffset(*Field)), MemberInfo::Field,
getStorageType(*Field), *Field));
++Field;
+ } else {
+ ++Field;
}
+ }
}
void
@@ -591,10 +593,17 @@ void CGRecordLowering::clipTailPadding() {
if (!Member->Data && Member->Kind != MemberInfo::Scissor)
continue;
if (Member->Offset < Tail) {
- assert(Prior->Kind == MemberInfo::Field && !Prior->FD &&
+ assert(Prior->Kind == MemberInfo::Field &&
"Only storage fields have tail padding!");
- Prior->Data = getByteArrayType(bitsToCharUnits(llvm::alignTo(
- cast<llvm::IntegerType>(Prior->Data)->getIntegerBitWidth(), 8)));
+ if (!Prior->FD || Prior->FD->isBitField())
+ Prior->Data = getByteArrayType(bitsToCharUnits(llvm::alignTo(
+ cast<llvm::IntegerType>(Prior->Data)->getIntegerBitWidth(), 8)));
+ else {
+ assert(Prior->FD->hasAttr<NoUniqueAddressAttr>() &&
+ "should not have reused this field's tail padding");
+ Prior->Data = getByteArrayType(
+ Context.getTypeInfoDataSizeInChars(Prior->FD->getType()).first);
+ }
}
if (Member->Data)
Prior = Member;
@@ -659,7 +668,7 @@ void CGRecordLowering::insertPadding() {
Pad = Padding.begin(), PadEnd = Padding.end();
Pad != PadEnd; ++Pad)
Members.push_back(StorageInfo(Pad->first, getByteArrayType(Pad->second)));
- std::stable_sort(Members.begin(), Members.end());
+ llvm::stable_sort(Members);
}
void CGRecordLowering::fillOutputFields() {
@@ -798,6 +807,10 @@ CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D,
for (unsigned i = 0, e = AST_RL.getFieldCount(); i != e; ++i, ++it) {
const FieldDecl *FD = *it;
+ // Ignore zero-sized fields.
+ if (FD->isZeroSize(getContext()))
+ continue;
+
// For non-bit-fields, just check that the LLVM struct offset matches the
// AST offset.
if (!FD->isBitField()) {
@@ -811,10 +824,6 @@ CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D,
if (!FD->getDeclName())
continue;
- // Don't inspect zero-length bitfields.
- if (FD->isZeroLengthBitField(getContext()))
- continue;
-
const CGBitFieldInfo &Info = RL->getBitFieldInfo(FD);
llvm::Type *ElementTy = ST->getTypeAtIndex(RL->getLLVMFieldNo(FD));
diff --git a/lib/CodeGen/CGStmt.cpp b/lib/CodeGen/CGStmt.cpp
index 0242b48659d1..dd0dea5b94a0 100644
--- a/lib/CodeGen/CGStmt.cpp
+++ b/lib/CodeGen/CGStmt.cpp
@@ -1,9 +1,8 @@
//===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -20,7 +19,6 @@
#include "clang/Basic/PrettyStackTrace.h"
#include "clang/Basic/TargetInfo.h"
#include "llvm/ADT/StringExtras.h"
-#include "llvm/IR/CallSite.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/Intrinsics.h"
@@ -383,36 +381,49 @@ CodeGenFunction::EmitCompoundStmtWithoutScope(const CompoundStmt &S,
bool GetLast,
AggValueSlot AggSlot) {
- for (CompoundStmt::const_body_iterator I = S.body_begin(),
- E = S.body_end()-GetLast; I != E; ++I)
- EmitStmt(*I);
+ const Stmt *ExprResult = S.getStmtExprResult();
+ assert((!GetLast || (GetLast && ExprResult)) &&
+ "If GetLast is true then the CompoundStmt must have a StmtExprResult");
Address RetAlloca = Address::invalid();
- if (GetLast) {
- // We have to special case labels here. They are statements, but when put
- // at the end of a statement expression, they yield the value of their
- // subexpression. Handle this by walking through all labels we encounter,
- // emitting them before we evaluate the subexpr.
- const Stmt *LastStmt = S.body_back();
- while (const LabelStmt *LS = dyn_cast<LabelStmt>(LastStmt)) {
- EmitLabel(LS->getDecl());
- LastStmt = LS->getSubStmt();
- }
- EnsureInsertPoint();
+ for (auto *CurStmt : S.body()) {
+ if (GetLast && ExprResult == CurStmt) {
+ // We have to special case labels here. They are statements, but when put
+ // at the end of a statement expression, they yield the value of their
+ // subexpression. Handle this by walking through all labels we encounter,
+ // emitting them before we evaluate the subexpr.
+ // Similar issues arise for attributed statements.
+ while (!isa<Expr>(ExprResult)) {
+ if (const auto *LS = dyn_cast<LabelStmt>(ExprResult)) {
+ EmitLabel(LS->getDecl());
+ ExprResult = LS->getSubStmt();
+ } else if (const auto *AS = dyn_cast<AttributedStmt>(ExprResult)) {
+ // FIXME: Update this if we ever have attributes that affect the
+ // semantics of an expression.
+ ExprResult = AS->getSubStmt();
+ } else {
+ llvm_unreachable("unknown value statement");
+ }
+ }
- QualType ExprTy = cast<Expr>(LastStmt)->getType();
- if (hasAggregateEvaluationKind(ExprTy)) {
- EmitAggExpr(cast<Expr>(LastStmt), AggSlot);
+ EnsureInsertPoint();
+
+ const Expr *E = cast<Expr>(ExprResult);
+ QualType ExprTy = E->getType();
+ if (hasAggregateEvaluationKind(ExprTy)) {
+ EmitAggExpr(E, AggSlot);
+ } else {
+ // We can't return an RValue here because there might be cleanups at
+ // the end of the StmtExpr. Because of that, we have to emit the result
+ // here into a temporary alloca.
+ RetAlloca = CreateMemTemp(ExprTy);
+ EmitAnyExprToMem(E, RetAlloca, Qualifiers(),
+ /*IsInit*/ false);
+ }
} else {
- // We can't return an RValue here because there might be cleanups at
- // the end of the StmtExpr. Because of that, we have to emit the result
- // here into a temporary alloca.
- RetAlloca = CreateMemTemp(ExprTy);
- EmitAnyExprToMem(cast<Expr>(LastStmt), RetAlloca, Qualifiers(),
- /*IsInit*/false);
+ EmitStmt(CurStmt);
}
-
}
return RetAlloca;
@@ -529,6 +540,16 @@ void CodeGenFunction::EmitLabel(const LabelDecl *D) {
}
EmitBlock(Dest.getBlock());
+
+ // Emit debug info for labels.
+ if (CGDebugInfo *DI = getDebugInfo()) {
+ if (CGM.getCodeGenOpts().getDebugInfo() >=
+ codegenoptions::LimitedDebugInfo) {
+ DI->setLocation(D->getLocation());
+ DI->EmitLabel(D, Builder);
+ }
+ }
+
incrementProfileCounter(D->getStmt());
}
@@ -1007,7 +1028,7 @@ void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
} else if (RV.isAggregate()) {
LValue Dest = MakeAddrLValue(ReturnValue, Ty);
LValue Src = MakeAddrLValue(RV.getAggregateAddress(), Ty);
- EmitAggregateCopy(Dest, Src, Ty, overlapForReturnValue());
+ EmitAggregateCopy(Dest, Src, Ty, getOverlapForReturnValue());
} else {
EmitStoreOfComplex(RV.getComplexVal(), MakeAddrLValue(ReturnValue, Ty),
/*init*/ true);
@@ -1089,7 +1110,7 @@ void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased,
- overlapForReturnValue()));
+ getOverlapForReturnValue()));
break;
}
}
@@ -1821,8 +1842,15 @@ llvm::Value* CodeGenFunction::EmitAsmInput(
// (immediate or symbolic), try to emit it as such.
if (!Info.allowsRegister() && !Info.allowsMemory()) {
if (Info.requiresImmediateConstant()) {
- llvm::APSInt AsmConst = InputExpr->EvaluateKnownConstInt(getContext());
- return llvm::ConstantInt::get(getLLVMContext(), AsmConst);
+ Expr::EvalResult EVResult;
+ InputExpr->EvaluateAsRValue(EVResult, getContext(), true);
+
+ llvm::APSInt IntResult;
+ if (!EVResult.Val.toIntegralConstant(IntResult, InputExpr->getType(),
+ getContext()))
+ llvm_unreachable("Invalid immediate constant!");
+
+ return llvm::ConstantInt::get(getLLVMContext(), IntResult);
}
Expr::EvalResult Result;
@@ -1872,6 +1900,55 @@ static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
return llvm::MDNode::get(CGF.getLLVMContext(), Locs);
}
+static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect,
+ bool ReadOnly, bool ReadNone, const AsmStmt &S,
+ const std::vector<llvm::Type *> &ResultRegTypes,
+ CodeGenFunction &CGF,
+ std::vector<llvm::Value *> &RegResults) {
+ Result.addAttribute(llvm::AttributeList::FunctionIndex,
+ llvm::Attribute::NoUnwind);
+ // Attach readnone and readonly attributes.
+ if (!HasSideEffect) {
+ if (ReadNone)
+ Result.addAttribute(llvm::AttributeList::FunctionIndex,
+ llvm::Attribute::ReadNone);
+ else if (ReadOnly)
+ Result.addAttribute(llvm::AttributeList::FunctionIndex,
+ llvm::Attribute::ReadOnly);
+ }
+
+ // Slap the source location of the inline asm into a !srcloc metadata on the
+ // call.
+ if (const auto *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S))
+ Result.setMetadata("srcloc",
+ getAsmSrcLocInfo(gccAsmStmt->getAsmString(), CGF));
+ else {
+ // At least put the line number on MS inline asm blobs.
+ llvm::Constant *Loc = llvm::ConstantInt::get(CGF.Int32Ty,
+ S.getAsmLoc().getRawEncoding());
+ Result.setMetadata("srcloc",
+ llvm::MDNode::get(CGF.getLLVMContext(),
+ llvm::ConstantAsMetadata::get(Loc)));
+ }
+
+ if (CGF.getLangOpts().assumeFunctionsAreConvergent())
+ // Conservatively, mark all inline asm blocks in CUDA or OpenCL as
+ // convergent (meaning, they may call an intrinsically convergent op, such
+ // as bar.sync, and so can't have certain optimizations applied around
+ // them).
+ Result.addAttribute(llvm::AttributeList::FunctionIndex,
+ llvm::Attribute::Convergent);
+ // Extract all of the register value results from the asm.
+ if (ResultRegTypes.size() == 1) {
+ RegResults.push_back(&Result);
+ } else {
+ for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) {
+ llvm::Value *Tmp = CGF.Builder.CreateExtractValue(&Result, i, "asmresult");
+ RegResults.push_back(Tmp);
+ }
+ }
+}
+
void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
// Assemble the final asm string.
std::string AsmString = S.generateAsmString(getContext());
@@ -1915,6 +1992,9 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
std::vector<llvm::Value*> InOutArgs;
std::vector<llvm::Type*> InOutArgTypes;
+ // Keep track of out constraints for tied input operand.
+ std::vector<std::string> OutputConstraints;
+
// An inline asm can be marked readonly if it meets the following conditions:
// - it doesn't have any sideeffects
// - it doesn't clobber memory
@@ -1937,7 +2017,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
OutputConstraint = AddVariableConstraints(OutputConstraint, *OutExpr,
getTarget(), CGM, S,
Info.earlyClobber());
-
+ OutputConstraints.push_back(OutputConstraint);
LValue Dest = EmitLValue(OutExpr);
if (!Constraints.empty())
Constraints += ',';
@@ -2055,6 +2135,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
InputConstraint, *InputExpr->IgnoreParenNoopCasts(getContext()),
getTarget(), CGM, S, false /* No EarlyClobber */);
+ std::string ReplaceConstraint (InputConstraint);
llvm::Value *Arg = EmitAsmInput(Info, InputExpr, Constraints);
// If this input argument is tied to a larger output result, extend the
@@ -2082,9 +2163,11 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
Arg = Builder.CreateFPExt(Arg, OutputTy);
}
}
+ // Deal with the tied operands' constraint code in adjustInlineAsmType.
+ ReplaceConstraint = OutputConstraints[Output];
}
if (llvm::Type* AdjTy =
- getTargetHooks().adjustInlineAsmType(*this, InputConstraint,
+ getTargetHooks().adjustInlineAsmType(*this, ReplaceConstraint,
Arg->getType()))
Arg = Builder.CreateBitCast(Arg, AdjTy);
else
@@ -2108,6 +2191,29 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
}
Constraints += InOutConstraints;
+ // Labels
+ SmallVector<llvm::BasicBlock *, 16> Transfer;
+ llvm::BasicBlock *Fallthrough = nullptr;
+ bool IsGCCAsmGoto = false;
+ if (const auto *GS = dyn_cast<GCCAsmStmt>(&S)) {
+ IsGCCAsmGoto = GS->isAsmGoto();
+ if (IsGCCAsmGoto) {
+ for (auto *E : GS->labels()) {
+ JumpDest Dest = getJumpDestForLabel(E->getLabel());
+ Transfer.push_back(Dest.getBlock());
+ llvm::BlockAddress *BA =
+ llvm::BlockAddress::get(CurFn, Dest.getBlock());
+ Args.push_back(BA);
+ ArgTypes.push_back(BA->getType());
+ if (!Constraints.empty())
+ Constraints += ',';
+ Constraints += 'X';
+ }
+ StringRef Name = "asm.fallthrough";
+ Fallthrough = createBasicBlock(Name);
+ }
+ }
+
// Clobbers
for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) {
StringRef Clobber = S.getClobber(i);
@@ -2150,52 +2256,18 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
llvm::InlineAsm *IA =
llvm::InlineAsm::get(FTy, AsmString, Constraints, HasSideEffect,
/* IsAlignStack */ false, AsmDialect);
- llvm::CallInst *Result =
- Builder.CreateCall(IA, Args, getBundlesForFunclet(IA));
- Result->addAttribute(llvm::AttributeList::FunctionIndex,
- llvm::Attribute::NoUnwind);
-
- // Attach readnone and readonly attributes.
- if (!HasSideEffect) {
- if (ReadNone)
- Result->addAttribute(llvm::AttributeList::FunctionIndex,
- llvm::Attribute::ReadNone);
- else if (ReadOnly)
- Result->addAttribute(llvm::AttributeList::FunctionIndex,
- llvm::Attribute::ReadOnly);
- }
-
- // Slap the source location of the inline asm into a !srcloc metadata on the
- // call.
- if (const GCCAsmStmt *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S)) {
- Result->setMetadata("srcloc", getAsmSrcLocInfo(gccAsmStmt->getAsmString(),
- *this));
- } else {
- // At least put the line number on MS inline asm blobs.
- auto Loc = llvm::ConstantInt::get(Int32Ty, S.getAsmLoc().getRawEncoding());
- Result->setMetadata("srcloc",
- llvm::MDNode::get(getLLVMContext(),
- llvm::ConstantAsMetadata::get(Loc)));
- }
-
- if (getLangOpts().assumeFunctionsAreConvergent()) {
- // Conservatively, mark all inline asm blocks in CUDA or OpenCL as
- // convergent (meaning, they may call an intrinsically convergent op, such
- // as bar.sync, and so can't have certain optimizations applied around
- // them).
- Result->addAttribute(llvm::AttributeList::FunctionIndex,
- llvm::Attribute::Convergent);
- }
-
- // Extract all of the register value results from the asm.
std::vector<llvm::Value*> RegResults;
- if (ResultRegTypes.size() == 1) {
- RegResults.push_back(Result);
+ if (IsGCCAsmGoto) {
+ llvm::CallBrInst *Result =
+ Builder.CreateCallBr(IA, Fallthrough, Transfer, Args);
+ UpdateAsmCallInst(cast<llvm::CallBase>(*Result), HasSideEffect, ReadOnly,
+ ReadNone, S, ResultRegTypes, *this, RegResults);
+ EmitBlock(Fallthrough);
} else {
- for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) {
- llvm::Value *Tmp = Builder.CreateExtractValue(Result, i, "asmresult");
- RegResults.push_back(Tmp);
- }
+ llvm::CallInst *Result =
+ Builder.CreateCall(IA, Args, getBundlesForFunclet(IA));
+ UpdateAsmCallInst(cast<llvm::CallBase>(*Result), HasSideEffect, ReadOnly,
+ ReadNone, S, ResultRegTypes, *this, RegResults);
}
assert(RegResults.size() == ResultRegTypes.size());
diff --git a/lib/CodeGen/CGStmtOpenMP.cpp b/lib/CodeGen/CGStmtOpenMP.cpp
index eb1304d89345..e8fbca5108ad 100644
--- a/lib/CodeGen/CGStmtOpenMP.cpp
+++ b/lib/CodeGen/CGStmtOpenMP.cpp
@@ -1,9 +1,8 @@
//===--- CGStmtOpenMP.cpp - Emit LLVM Code from Statements ----------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -19,7 +18,6 @@
#include "clang/AST/Stmt.h"
#include "clang/AST/StmtOpenMP.h"
#include "clang/AST/DeclOpenMP.h"
-#include "llvm/IR/CallSite.h"
using namespace clang;
using namespace CodeGen;
@@ -298,8 +296,7 @@ void CodeGenFunction::GenerateOpenMPCapturedVars(
static Address castValueFromUintptr(CodeGenFunction &CGF, SourceLocation Loc,
QualType DstType, StringRef Name,
- LValue AddrLV,
- bool isReferenceType = false) {
+ LValue AddrLV) {
ASTContext &Ctx = CGF.getContext();
llvm::Value *CastedPtr = CGF.EmitScalarConversion(
@@ -308,17 +305,6 @@ static Address castValueFromUintptr(CodeGenFunction &CGF, SourceLocation Loc,
Address TmpAddr =
CGF.MakeNaturalAlignAddrLValue(CastedPtr, Ctx.getPointerType(DstType))
.getAddress();
-
- // If we are dealing with references we need to return the address of the
- // reference instead of the reference of the value.
- if (isReferenceType) {
- QualType RefType = Ctx.getLValueReferenceType(DstType);
- llvm::Value *RefVal = TmpAddr.getPointer();
- TmpAddr = CGF.CreateMemTemp(RefType, Twine(Name, ".ref"));
- LValue TmpLVal = CGF.MakeAddrLValue(TmpAddr, RefType);
- CGF.EmitStoreThroughLValue(RValue::get(RefVal), TmpLVal, /*isInit=*/true);
- }
-
return TmpAddr;
}
@@ -475,14 +461,6 @@ static llvm::Function *emitOutlinedFunctionPrologue(
// use the value that we get from the arguments.
if (I->capturesVariableByCopy() && FD->getType()->isAnyPointerType()) {
const VarDecl *CurVD = I->getCapturedVar();
- // If the variable is a reference we need to materialize it here.
- if (CurVD->getType()->isReferenceType()) {
- Address RefAddr = CGF.CreateMemTemp(
- CurVD->getType(), CGM.getPointerAlign(), ".materialized_ref");
- CGF.EmitStoreOfScalar(LocalAddr.getPointer(), RefAddr,
- /*Volatile=*/false, CurVD->getType());
- LocalAddr = RefAddr;
- }
if (!FO.RegisterCastedArgsOnly)
LocalAddrs.insert({Args[Cnt], {CurVD, LocalAddr}});
++Cnt;
@@ -506,15 +484,12 @@ static llvm::Function *emitOutlinedFunctionPrologue(
const VarDecl *Var = I->getCapturedVar();
QualType VarTy = Var->getType();
Address ArgAddr = ArgLVal.getAddress();
- if (!VarTy->isReferenceType()) {
- if (ArgLVal.getType()->isLValueReferenceType()) {
- ArgAddr = CGF.EmitLoadOfReference(ArgLVal);
- } else if (!VarTy->isVariablyModifiedType() ||
- !VarTy->isPointerType()) {
- assert(ArgLVal.getType()->isPointerType());
- ArgAddr = CGF.EmitLoadOfPointer(
- ArgAddr, ArgLVal.getType()->castAs<PointerType>());
- }
+ if (ArgLVal.getType()->isLValueReferenceType()) {
+ ArgAddr = CGF.EmitLoadOfReference(ArgLVal);
+ } else if (!VarTy->isVariablyModifiedType() || !VarTy->isPointerType()) {
+ assert(ArgLVal.getType()->isPointerType());
+ ArgAddr = CGF.EmitLoadOfPointer(
+ ArgAddr, ArgLVal.getType()->castAs<PointerType>());
}
if (!FO.RegisterCastedArgsOnly) {
LocalAddrs.insert(
@@ -525,14 +500,12 @@ static llvm::Function *emitOutlinedFunctionPrologue(
assert(!FD->getType()->isAnyPointerType() &&
"Not expecting a captured pointer.");
const VarDecl *Var = I->getCapturedVar();
- QualType VarTy = Var->getType();
- LocalAddrs.insert(
- {Args[Cnt],
- {Var, FO.UIntPtrCastRequired
- ? castValueFromUintptr(CGF, I->getLocation(),
- FD->getType(), Args[Cnt]->getName(),
- ArgLVal, VarTy->isReferenceType())
- : ArgLVal.getAddress()}});
+ LocalAddrs.insert({Args[Cnt],
+ {Var, FO.UIntPtrCastRequired
+ ? castValueFromUintptr(
+ CGF, I->getLocation(), FD->getType(),
+ Args[Cnt]->getName(), ArgLVal)
+ : ArgLVal.getAddress()}});
} else {
// If 'this' is captured, load it into CXXThisValue.
assert(I->capturesThis());
@@ -568,16 +541,20 @@ CodeGenFunction::GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S) {
Out.str());
llvm::Function *F = emitOutlinedFunctionPrologue(*this, Args, LocalAddrs,
VLASizes, CXXThisValue, FO);
+ CodeGenFunction::OMPPrivateScope LocalScope(*this);
for (const auto &LocalAddrPair : LocalAddrs) {
if (LocalAddrPair.second.first) {
- setAddrOfLocalVar(LocalAddrPair.second.first,
- LocalAddrPair.second.second);
+ LocalScope.addPrivate(LocalAddrPair.second.first, [&LocalAddrPair]() {
+ return LocalAddrPair.second.second;
+ });
}
}
+ (void)LocalScope.Privatize();
for (const auto &VLASizePair : VLASizes)
VLASizeMap[VLASizePair.second.first] = VLASizePair.second.second;
PGO.assignRegionCounters(GlobalDecl(CD), F);
CapturedStmtInfo->EmitBody(*this, CD->getBody());
+ (void)LocalScope.ForceCleanup();
FinishFunction(CD->getBodyRBrace());
if (!NeedWrapperFunction)
return F;
@@ -727,6 +704,9 @@ bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D,
OMPPrivateScope &PrivateScope) {
if (!HaveInsertPoint())
return false;
+ bool DeviceConstTarget =
+ getLangOpts().OpenMPIsDevice &&
+ isOpenMPTargetExecutionDirective(D.getDirectiveKind());
bool FirstprivateIsLastprivate = false;
llvm::DenseSet<const VarDecl *> Lastprivates;
for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) {
@@ -749,24 +729,54 @@ bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D,
bool ThisFirstprivateIsLastprivate =
Lastprivates.count(OrigVD->getCanonicalDecl()) > 0;
const FieldDecl *FD = CapturedStmtInfo->lookup(OrigVD);
+ const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
if (!MustEmitFirstprivateCopy && !ThisFirstprivateIsLastprivate && FD &&
- !FD->getType()->isReferenceType()) {
+ !FD->getType()->isReferenceType() &&
+ (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())) {
EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl());
++IRef;
++InitsRef;
continue;
}
+ // Do not emit copy for firstprivate constant variables in target regions,
+ // captured by reference.
+ if (DeviceConstTarget && OrigVD->getType().isConstant(getContext()) &&
+ FD && FD->getType()->isReferenceType() &&
+ (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())) {
+ (void)CGM.getOpenMPRuntime().registerTargetFirstprivateCopy(*this,
+ OrigVD);
+ ++IRef;
+ ++InitsRef;
+ continue;
+ }
FirstprivateIsLastprivate =
FirstprivateIsLastprivate || ThisFirstprivateIsLastprivate;
if (EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl()).second) {
- const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
const auto *VDInit =
cast<VarDecl>(cast<DeclRefExpr>(*InitsRef)->getDecl());
bool IsRegistered;
DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD),
/*RefersToEnclosingVariableOrCapture=*/FD != nullptr,
(*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
- LValue OriginalLVal = EmitLValue(&DRE);
+ LValue OriginalLVal;
+ if (!FD) {
+ // Check if the firstprivate variable is just a constant value.
+ ConstantEmission CE = tryEmitAsConstant(&DRE);
+ if (CE && !CE.isReference()) {
+ // Constant value, no need to create a copy.
+ ++IRef;
+ ++InitsRef;
+ continue;
+ }
+ if (CE && CE.isReference()) {
+ OriginalLVal = CE.getReferenceLValue(*this, &DRE);
+ } else {
+ assert(!CE && "Expected non-constant firstprivate.");
+ OriginalLVal = EmitLValue(&DRE);
+ }
+ } else {
+ OriginalLVal = EmitLValue(&DRE);
+ }
QualType Type = VD->getType();
if (Type->isArrayType()) {
// Emit VarDecl with copy init for arrays.
@@ -1227,7 +1237,7 @@ static void emitCommonOMPParallelDirective(
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
const CodeGenBoundParametersTy &CodeGenBoundParameters) {
const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel);
- llvm::Value *OutlinedFn =
+ llvm::Function *OutlinedFn =
CGF.CGM.getOpenMPRuntime().emitParallelOutlinedFunction(
S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen);
if (const auto *NumThreadsClause = S.getSingleClause<OMPNumThreadsClause>()) {
@@ -1518,8 +1528,9 @@ void CodeGenFunction::EmitOMPPrivateLoopCounters(
I < E; ++I) {
const auto *DRE = cast<DeclRefExpr>(C->getLoopCounter(I));
const auto *VD = cast<VarDecl>(DRE->getDecl());
- // Override only those variables that are really emitted already.
- if (LocalDeclMap.count(VD)) {
+ // Override only those variables that can be captured to avoid re-emission
+ // of the variables declared within the loops.
+ if (DRE->refersToEnclosingVariableOrCapture()) {
(void)LoopScope.addPrivate(VD, [this, DRE, VD]() {
return CreateMemTemp(DRE->getType(), VD->getName());
});
@@ -2893,6 +2904,8 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
OMPPrivateScope Scope(CGF);
if (!Data.PrivateVars.empty() || !Data.FirstprivateVars.empty() ||
!Data.LastprivateVars.empty()) {
+ llvm::FunctionType *CopyFnTy = llvm::FunctionType::get(
+ CGF.Builder.getVoidTy(), {CGF.Builder.getInt8PtrTy()}, true);
enum { PrivatesParam = 2, CopyFnParam = 3 };
llvm::Value *CopyFn = CGF.Builder.CreateLoad(
CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(CopyFnParam)));
@@ -2925,8 +2938,8 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
PrivatePtrs.emplace_back(VD, PrivatePtr);
CallArgs.push_back(PrivatePtr.getPointer());
}
- CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, S.getBeginLoc(),
- CopyFn, CallArgs);
+ CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
+ CGF, S.getBeginLoc(), {CopyFnTy, CopyFn}, CallArgs);
for (const auto &Pair : LastprivateDstsOrigs) {
const auto *OrigVD = cast<VarDecl>(Pair.second->getDecl());
DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(OrigVD),
@@ -3028,7 +3041,7 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
Action.Enter(CGF);
BodyGen(CGF);
};
- llvm::Value *OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction(
+ llvm::Function *OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction(
S, *I, *PartId, *TaskT, S.getDirectiveKind(), CodeGen, Data.Tied,
Data.NumberOfParts);
OMPLexicalScope Scope(*this, S);
@@ -3106,7 +3119,8 @@ void CodeGenFunction::EmitOMPTargetTaskBasedDirective(
PVD = createImplicitFirstprivateForType(
getContext(), Data, BaseAndPointersType, CD, S.getBeginLoc());
QualType SizesType = getContext().getConstantArrayType(
- getContext().getSizeType(), ArrSize, ArrayType::Normal,
+ getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1),
+ ArrSize, ArrayType::Normal,
/*IndexTypeQuals=*/0);
SVD = createImplicitFirstprivateForType(getContext(), Data, SizesType, CD,
S.getBeginLoc());
@@ -3127,6 +3141,8 @@ void CodeGenFunction::EmitOMPTargetTaskBasedDirective(
// Set proper addresses for generated private copies.
OMPPrivateScope Scope(CGF);
if (!Data.FirstprivateVars.empty()) {
+ llvm::FunctionType *CopyFnTy = llvm::FunctionType::get(
+ CGF.Builder.getVoidTy(), {CGF.Builder.getInt8PtrTy()}, true);
enum { PrivatesParam = 2, CopyFnParam = 3 };
llvm::Value *CopyFn = CGF.Builder.CreateLoad(
CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(CopyFnParam)));
@@ -3144,8 +3160,8 @@ void CodeGenFunction::EmitOMPTargetTaskBasedDirective(
PrivatePtrs.emplace_back(VD, PrivatePtr);
CallArgs.push_back(PrivatePtr.getPointer());
}
- CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, S.getBeginLoc(),
- CopyFn, CallArgs);
+ CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
+ CGF, S.getBeginLoc(), {CopyFnTy, CopyFn}, CallArgs);
for (const auto &Pair : PrivatePtrs) {
Address Replacement(CGF.Builder.CreateLoad(Pair.second),
CGF.getContext().getDeclAlign(Pair.first));
@@ -3156,18 +3172,18 @@ void CodeGenFunction::EmitOMPTargetTaskBasedDirective(
(void)Scope.Privatize();
if (InputInfo.NumberOfTargetItems > 0) {
InputInfo.BasePointersArray = CGF.Builder.CreateConstArrayGEP(
- CGF.GetAddrOfLocalVar(BPVD), /*Index=*/0, CGF.getPointerSize());
+ CGF.GetAddrOfLocalVar(BPVD), /*Index=*/0);
InputInfo.PointersArray = CGF.Builder.CreateConstArrayGEP(
- CGF.GetAddrOfLocalVar(PVD), /*Index=*/0, CGF.getPointerSize());
+ CGF.GetAddrOfLocalVar(PVD), /*Index=*/0);
InputInfo.SizesArray = CGF.Builder.CreateConstArrayGEP(
- CGF.GetAddrOfLocalVar(SVD), /*Index=*/0, CGF.getSizeSize());
+ CGF.GetAddrOfLocalVar(SVD), /*Index=*/0);
}
Action.Enter(CGF);
OMPLexicalScope LexScope(CGF, S, OMPD_task, /*EmitPreInitStmt=*/false);
BodyGen(CGF);
};
- llvm::Value *OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction(
+ llvm::Function *OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction(
S, *I, *PartId, *TaskT, S.getDirectiveKind(), CodeGen, /*Tied=*/true,
Data.NumberOfParts);
llvm::APInt TrueOrFalse(32, S.hasClausesOfKind<OMPNowaitClause>() ? 1 : 0);
@@ -3200,7 +3216,7 @@ void CodeGenFunction::EmitOMPTaskDirective(const OMPTaskDirective &S) {
CGF.EmitStmt(CS->getCapturedStmt());
};
auto &&TaskGen = [&S, SharedsTy, CapturedStruct,
- IfCond](CodeGenFunction &CGF, llvm::Value *OutlinedFn,
+ IfCond](CodeGenFunction &CGF, llvm::Function *OutlinedFn,
const OMPTaskDataTy &Data) {
CGF.CGM.getOpenMPRuntime().emitTaskCall(CGF, S.getBeginLoc(), S, OutlinedFn,
SharedsTy, CapturedStruct, IfCond,
@@ -3587,7 +3603,7 @@ static void emitSimpleAtomicStore(CodeGenFunction &CGF, bool IsSeqCst,
CGF.EmitAtomicStore(RVal, LVal,
IsSeqCst ? llvm::AtomicOrdering::SequentiallyConsistent
: llvm::AtomicOrdering::Monotonic,
- LVal.isVolatile(), /*IsInit=*/false);
+ LVal.isVolatile(), /*isInit=*/false);
}
}
@@ -3933,6 +3949,8 @@ static void emitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind,
case OMPC_in_reduction:
case OMPC_safelen:
case OMPC_simdlen:
+ case OMPC_allocator:
+ case OMPC_allocate:
case OMPC_collapse:
case OMPC_default:
case OMPC_seq_cst:
@@ -4077,11 +4095,12 @@ static void emitCommonOMPTargetDirective(CodeGenFunction &CGF,
// Emit calculation of the iterations count.
llvm::Value *NumIterations = CGF.EmitScalarExpr(D.getNumIterations());
NumIterations = CGF.Builder.CreateIntCast(NumIterations, CGF.Int64Ty,
- /*IsSigned=*/false);
+ /*isSigned=*/false);
return NumIterations;
};
- CGM.getOpenMPRuntime().emitTargetNumIterationsCall(CGF, S, Device,
- SizeEmitter);
+ if (IsOffloadEntry)
+ CGM.getOpenMPRuntime().emitTargetNumIterationsCall(CGF, S, Device,
+ SizeEmitter);
CGM.getOpenMPRuntime().emitTargetCall(CGF, S, Fn, FnID, IfCond, Device);
}
@@ -4124,7 +4143,7 @@ static void emitCommonOMPTeamsDirective(CodeGenFunction &CGF,
OpenMPDirectiveKind InnermostKind,
const RegionCodeGenTy &CodeGen) {
const CapturedStmt *CS = S.getCapturedStmt(OMPD_teams);
- llvm::Value *OutlinedFn =
+ llvm::Function *OutlinedFn =
CGF.CGM.getOpenMPRuntime().emitTeamsOutlinedFunction(
S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen);
@@ -4970,7 +4989,7 @@ void CodeGenFunction::EmitOMPTaskLoopBasedDirective(const OMPLoopDirective &S) {
}
};
auto &&TaskGen = [&S, SharedsTy, CapturedStruct,
- IfCond](CodeGenFunction &CGF, llvm::Value *OutlinedFn,
+ IfCond](CodeGenFunction &CGF, llvm::Function *OutlinedFn,
const OMPTaskDataTy &Data) {
auto &&CodeGen = [&S, OutlinedFn, SharedsTy, CapturedStruct, IfCond,
&Data](CodeGenFunction &CGF, PrePostActionTy &) {
@@ -5077,4 +5096,3 @@ void CodeGenFunction::EmitSimpleOMPExecutableDirective(
: D.getDirectiveKind(),
CodeGen);
}
-
diff --git a/lib/CodeGen/CGVTT.cpp b/lib/CodeGen/CGVTT.cpp
index fbd8146702a9..e79f3f3dd8bc 100644
--- a/lib/CodeGen/CGVTT.cpp
+++ b/lib/CodeGen/CGVTT.cpp
@@ -1,9 +1,8 @@
//===--- CGVTT.cpp - Emit LLVM Code for C++ VTTs --------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/CodeGen/CGVTables.cpp b/lib/CodeGen/CGVTables.cpp
index bfb089ff908e..3cb3d3544838 100644
--- a/lib/CodeGen/CGVTables.cpp
+++ b/lib/CodeGen/CGVTables.cpp
@@ -1,9 +1,8 @@
//===--- CGVTables.cpp - Emit LLVM Code for C++ vtables -------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -279,7 +278,7 @@ void CodeGenFunction::FinishThunk() {
FinishFunction();
}
-void CodeGenFunction::EmitCallAndReturnForThunk(llvm::Constant *CalleePtr,
+void CodeGenFunction::EmitCallAndReturnForThunk(llvm::FunctionCallee Callee,
const ThunkInfo *Thunk,
bool IsUnprototyped) {
assert(isa<CXXMethodDecl>(CurGD.getDecl()) &&
@@ -304,7 +303,7 @@ void CodeGenFunction::EmitCallAndReturnForThunk(llvm::Constant *CalleePtr,
CGM.ErrorUnsupported(
MD, "non-trivial argument copy for return-adjusting thunk");
}
- EmitMustTailThunk(CurGD, AdjustedThisPtr, CalleePtr);
+ EmitMustTailThunk(CurGD, AdjustedThisPtr, Callee);
return;
}
@@ -327,7 +326,7 @@ void CodeGenFunction::EmitCallAndReturnForThunk(llvm::Constant *CalleePtr,
#ifndef NDEBUG
const CGFunctionInfo &CallFnInfo = CGM.getTypes().arrangeCXXMethodCall(
- CallArgs, FPT, RequiredArgs::forPrototypePlus(FPT, 1, MD), PrefixArgs);
+ CallArgs, FPT, RequiredArgs::forPrototypePlus(FPT, 1), PrefixArgs);
assert(CallFnInfo.getRegParm() == CurFnInfo->getRegParm() &&
CallFnInfo.isNoReturn() == CurFnInfo->isNoReturn() &&
CallFnInfo.getCallingConvention() == CurFnInfo->getCallingConvention());
@@ -354,9 +353,9 @@ void CodeGenFunction::EmitCallAndReturnForThunk(llvm::Constant *CalleePtr,
Slot = ReturnValueSlot(ReturnValue, ResultType.isVolatileQualified());
// Now emit our call.
- llvm::Instruction *CallOrInvoke;
- CGCallee Callee = CGCallee::forDirect(CalleePtr, CurGD);
- RValue RV = EmitCall(*CurFnInfo, Callee, Slot, CallArgs, &CallOrInvoke);
+ llvm::CallBase *CallOrInvoke;
+ RValue RV = EmitCall(*CurFnInfo, CGCallee::forDirect(Callee, CurGD), Slot,
+ CallArgs, &CallOrInvoke);
// Consider return adjustment if we have ThunkInfo.
if (Thunk && !Thunk->Return.isEmpty())
@@ -376,7 +375,7 @@ void CodeGenFunction::EmitCallAndReturnForThunk(llvm::Constant *CalleePtr,
void CodeGenFunction::EmitMustTailThunk(GlobalDecl GD,
llvm::Value *AdjustedThisPtr,
- llvm::Value *CalleePtr) {
+ llvm::FunctionCallee Callee) {
// Emitting a musttail call thunk doesn't use any of the CGCall.cpp machinery
// to translate AST arguments into LLVM IR arguments. For thunks, we know
// that the caller prototype more or less matches the callee prototype with
@@ -405,14 +404,14 @@ void CodeGenFunction::EmitMustTailThunk(GlobalDecl GD,
// Emit the musttail call manually. Even if the prologue pushed cleanups, we
// don't actually want to run them.
- llvm::CallInst *Call = Builder.CreateCall(CalleePtr, Args);
+ llvm::CallInst *Call = Builder.CreateCall(Callee, Args);
Call->setTailCallKind(llvm::CallInst::TCK_MustTail);
// Apply the standard set of call attributes.
unsigned CallingConv;
llvm::AttributeList Attrs;
- CGM.ConstructAttributeList(CalleePtr->getName(), *CurFnInfo, GD, Attrs,
- CallingConv, /*AttrOnCallSite=*/true);
+ CGM.ConstructAttributeList(Callee.getCallee()->getName(), *CurFnInfo, GD,
+ Attrs, CallingConv, /*AttrOnCallSite=*/true);
Call->setAttributes(Attrs);
Call->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
@@ -450,7 +449,8 @@ void CodeGenFunction::generateThunk(llvm::Function *Fn,
Callee = llvm::ConstantExpr::getBitCast(Callee, Fn->getType());
// Make the call and return the result.
- EmitCallAndReturnForThunk(Callee, &Thunk, IsUnprototyped);
+ EmitCallAndReturnForThunk(llvm::FunctionCallee(Fn->getFunctionType(), Callee),
+ &Thunk, IsUnprototyped);
}
static bool shouldEmitVTableThunk(CodeGenModule &CGM, const CXXMethodDecl *MD,
@@ -649,7 +649,8 @@ void CodeGenVTables::addVTableComponent(
auto getSpecialVirtualFn = [&](StringRef name) {
llvm::FunctionType *fnTy =
llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
- llvm::Constant *fn = CGM.CreateRuntimeFunction(fnTy, name);
+ llvm::Constant *fn = cast<llvm::Constant>(
+ CGM.CreateRuntimeFunction(fnTy, name).getCallee());
if (auto f = dyn_cast<llvm::Function>(fn))
f->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
return llvm::ConstantExpr::getBitCast(fn, CGM.Int8PtrTy);
@@ -760,7 +761,6 @@ CodeGenVTables::GenerateConstructionVTable(const CXXRecordDecl *RD,
// Create the variable that will hold the construction vtable.
llvm::GlobalVariable *VTable =
CGM.CreateOrReplaceCXXRuntimeVariable(Name, VTType, Linkage, Align);
- CGM.setGVProperties(VTable, RD);
// V-tables are always unnamed_addr.
VTable->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
@@ -774,6 +774,11 @@ CodeGenVTables::GenerateConstructionVTable(const CXXRecordDecl *RD,
createVTableInitializer(components, *VTLayout, RTTI);
components.finishAndSetAsInitializer(VTable);
+ // Set properties only after the initializer has been set to ensure that the
+ // GV is treated as definition and not declaration.
+ assert(!VTable->isDeclaration() && "Shouldn't set properties on declaration");
+ CGM.setGVProperties(VTable, RD);
+
CGM.EmitVTableTypeMetadata(VTable, *VTLayout.get());
return VTable;
diff --git a/lib/CodeGen/CGVTables.h b/lib/CodeGen/CGVTables.h
index 6377659e4cb3..a47841bfc6c3 100644
--- a/lib/CodeGen/CGVTables.h
+++ b/lib/CodeGen/CGVTables.h
@@ -1,9 +1,8 @@
//===--- CGVTables.h - Emit LLVM Code for C++ vtables -----------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/CodeGen/CGValue.h b/lib/CodeGen/CGValue.h
index da8a8efb840b..71f95abe488a 100644
--- a/lib/CodeGen/CGValue.h
+++ b/lib/CodeGen/CGValue.h
@@ -1,9 +1,8 @@
//===-- CGValue.h - LLVM CodeGen wrappers for llvm::Value* ------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/CodeGen/CodeGenABITypes.cpp b/lib/CodeGen/CodeGenABITypes.cpp
index 27f5d53ffe11..6b6a116cf259 100644
--- a/lib/CodeGen/CodeGenABITypes.cpp
+++ b/lib/CodeGen/CodeGenABITypes.cpp
@@ -1,9 +1,8 @@
//==--- CodeGenABITypes.cpp - Convert Clang types to LLVM types for ABI ----==//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -35,9 +34,8 @@ CodeGen::arrangeObjCMessageSendSignature(CodeGenModule &CGM,
const CGFunctionInfo &
CodeGen::arrangeFreeFunctionType(CodeGenModule &CGM,
- CanQual<FunctionProtoType> Ty,
- const FunctionDecl *FD) {
- return CGM.getTypes().arrangeFreeFunctionType(Ty, FD);
+ CanQual<FunctionProtoType> Ty) {
+ return CGM.getTypes().arrangeFreeFunctionType(Ty);
}
const CGFunctionInfo &
@@ -61,14 +59,14 @@ CodeGen::arrangeFreeFunctionCall(CodeGenModule &CGM,
FunctionType::ExtInfo info,
RequiredArgs args) {
return CGM.getTypes().arrangeLLVMFunctionInfo(
- returnType, /*IsInstanceMethod=*/false, /*IsChainCall=*/false, argTypes,
+ returnType, /*instanceMethod=*/false, /*chainCall=*/false, argTypes,
info, {}, args);
}
llvm::FunctionType *
CodeGen::convertFreeFunctionType(CodeGenModule &CGM, const FunctionDecl *FD) {
assert(FD != nullptr && "Expected a non-null function declaration!");
- llvm::Type *T = CGM.getTypes().ConvertFunctionType(FD->getType(), FD);
+ llvm::Type *T = CGM.getTypes().ConvertType(FD->getType());
if (auto FT = dyn_cast<llvm::FunctionType>(T))
return FT;
diff --git a/lib/CodeGen/CodeGenAction.cpp b/lib/CodeGen/CodeGenAction.cpp
index fd4506f2d197..0ae9ea427d65 100644
--- a/lib/CodeGen/CodeGenAction.cpp
+++ b/lib/CodeGen/CodeGenAction.cpp
@@ -1,9 +1,8 @@
//===--- CodeGenAction.cpp - LLVM Code Generation Frontend Action ---------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -20,6 +19,7 @@
#include "clang/Basic/TargetInfo.h"
#include "clang/CodeGen/BackendUtil.h"
#include "clang/CodeGen/ModuleBuilder.h"
+#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/Lex/Preprocessor.h"
@@ -31,6 +31,7 @@
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
+#include "llvm/IR/RemarkStreamer.h"
#include "llvm/IRReader/IRReader.h"
#include "llvm/Linker/Linker.h"
#include "llvm/Pass.h"
@@ -261,28 +262,37 @@ namespace clang {
Ctx.getDiagnosticHandler();
Ctx.setDiagnosticHandler(llvm::make_unique<ClangDiagnosticHandler>(
CodeGenOpts, this));
- Ctx.setDiagnosticsHotnessRequested(CodeGenOpts.DiagnosticsWithHotness);
- if (CodeGenOpts.DiagnosticsHotnessThreshold != 0)
- Ctx.setDiagnosticsHotnessThreshold(
- CodeGenOpts.DiagnosticsHotnessThreshold);
-
- std::unique_ptr<llvm::ToolOutputFile> OptRecordFile;
- if (!CodeGenOpts.OptRecordFile.empty()) {
- std::error_code EC;
- OptRecordFile = llvm::make_unique<llvm::ToolOutputFile>(
- CodeGenOpts.OptRecordFile, EC, sys::fs::F_None);
- if (EC) {
- Diags.Report(diag::err_cannot_open_file) <<
- CodeGenOpts.OptRecordFile << EC.message();
- return;
- }
-
- Ctx.setDiagnosticsOutputFile(
- llvm::make_unique<yaml::Output>(OptRecordFile->os()));
- if (CodeGenOpts.getProfileUse() != CodeGenOptions::ProfileNone)
- Ctx.setDiagnosticsHotnessRequested(true);
+ Expected<std::unique_ptr<llvm::ToolOutputFile>> OptRecordFileOrErr =
+ setupOptimizationRemarks(Ctx, CodeGenOpts.OptRecordFile,
+ CodeGenOpts.OptRecordPasses,
+ CodeGenOpts.OptRecordFormat,
+ CodeGenOpts.DiagnosticsWithHotness,
+ CodeGenOpts.DiagnosticsHotnessThreshold);
+
+ if (Error E = OptRecordFileOrErr.takeError()) {
+ handleAllErrors(
+ std::move(E),
+ [&](const RemarkSetupFileError &E) {
+ Diags.Report(diag::err_cannot_open_file)
+ << CodeGenOpts.OptRecordFile << E.message();
+ },
+ [&](const RemarkSetupPatternError &E) {
+ Diags.Report(diag::err_drv_optimization_remark_pattern)
+ << E.message() << CodeGenOpts.OptRecordPasses;
+ },
+ [&](const RemarkSetupFormatError &E) {
+ Diags.Report(diag::err_drv_optimization_remark_format)
+ << CodeGenOpts.OptRecordFormat;
+ });
+ return;
}
+ std::unique_ptr<llvm::ToolOutputFile> OptRecordFile =
+ std::move(*OptRecordFileOrErr);
+
+ if (OptRecordFile &&
+ CodeGenOpts.getProfileUse() != CodeGenOptions::ProfileNone)
+ Ctx.setDiagnosticsHotnessRequested(true);
// Link each LinkModule into our module.
if (LinkInModules())
@@ -936,7 +946,8 @@ static void BitcodeInlineAsmDiagHandler(const llvm::SMDiagnostic &SM,
Diags->Report(DiagID).AddString("cannot compile inline asm");
}
-std::unique_ptr<llvm::Module> CodeGenAction::loadModule(MemoryBufferRef MBRef) {
+std::unique_ptr<llvm::Module>
+CodeGenAction::loadModule(MemoryBufferRef MBRef) {
CompilerInstance &CI = getCompilerInstance();
SourceManager &SM = CI.getSourceManager();
@@ -1014,7 +1025,7 @@ void CodeGenAction::ExecuteAction() {
bool Invalid;
SourceManager &SM = CI.getSourceManager();
FileID FID = SM.getMainFileID();
- llvm::MemoryBuffer *MainFile = SM.getBuffer(FID, &Invalid);
+ const llvm::MemoryBuffer *MainFile = SM.getBuffer(FID, &Invalid);
if (Invalid)
return;
diff --git a/lib/CodeGen/CodeGenFunction.cpp b/lib/CodeGen/CodeGenFunction.cpp
index 1713e40c312b..eafe26674434 100644
--- a/lib/CodeGen/CodeGenFunction.cpp
+++ b/lib/CodeGen/CodeGenFunction.cpp
@@ -1,9 +1,8 @@
//===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -256,6 +255,7 @@ llvm::DebugLoc CodeGenFunction::EmitReturnBlock() {
if (CurBB->empty() || ReturnBlock.getBlock()->use_empty()) {
ReturnBlock.getBlock()->replaceAllUsesWith(CurBB);
delete ReturnBlock.getBlock();
+ ReturnBlock = JumpDest();
} else
EmitBlock(ReturnBlock.getBlock());
return llvm::DebugLoc();
@@ -275,6 +275,7 @@ llvm::DebugLoc CodeGenFunction::EmitReturnBlock() {
Builder.SetInsertPoint(BI->getParent());
BI->eraseFromParent();
delete ReturnBlock.getBlock();
+ ReturnBlock = JumpDest();
return Loc;
}
}
@@ -449,6 +450,19 @@ void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
// 5. Width of vector aguments and return types for functions called by this
// function.
CurFn->addFnAttr("min-legal-vector-width", llvm::utostr(LargestVectorWidth));
+
+ // If we generated an unreachable return block, delete it now.
+ if (ReturnBlock.isValid() && ReturnBlock.getBlock()->use_empty()) {
+ Builder.ClearInsertionPoint();
+ ReturnBlock.getBlock()->eraseFromParent();
+ }
+ if (ReturnValue.isValid()) {
+ auto *RetAlloca = dyn_cast<llvm::AllocaInst>(ReturnValue.getPointer());
+ if (RetAlloca && RetAlloca->use_empty()) {
+ RetAlloca->eraseFromParent();
+ ReturnValue = Address::invalid();
+ }
+ }
}
/// ShouldInstrumentFunction - Return true if the current function should be
@@ -522,205 +536,6 @@ CodeGenFunction::DecodeAddrUsedInPrologue(llvm::Value *F,
"decoded_addr");
}
-static void removeImageAccessQualifier(std::string& TyName) {
- std::string ReadOnlyQual("__read_only");
- std::string::size_type ReadOnlyPos = TyName.find(ReadOnlyQual);
- if (ReadOnlyPos != std::string::npos)
- // "+ 1" for the space after access qualifier.
- TyName.erase(ReadOnlyPos, ReadOnlyQual.size() + 1);
- else {
- std::string WriteOnlyQual("__write_only");
- std::string::size_type WriteOnlyPos = TyName.find(WriteOnlyQual);
- if (WriteOnlyPos != std::string::npos)
- TyName.erase(WriteOnlyPos, WriteOnlyQual.size() + 1);
- else {
- std::string ReadWriteQual("__read_write");
- std::string::size_type ReadWritePos = TyName.find(ReadWriteQual);
- if (ReadWritePos != std::string::npos)
- TyName.erase(ReadWritePos, ReadWriteQual.size() + 1);
- }
- }
-}
-
-// Returns the address space id that should be produced to the
-// kernel_arg_addr_space metadata. This is always fixed to the ids
-// as specified in the SPIR 2.0 specification in order to differentiate
-// for example in clGetKernelArgInfo() implementation between the address
-// spaces with targets without unique mapping to the OpenCL address spaces
-// (basically all single AS CPUs).
-static unsigned ArgInfoAddressSpace(LangAS AS) {
- switch (AS) {
- case LangAS::opencl_global: return 1;
- case LangAS::opencl_constant: return 2;
- case LangAS::opencl_local: return 3;
- case LangAS::opencl_generic: return 4; // Not in SPIR 2.0 specs.
- default:
- return 0; // Assume private.
- }
-}
-
-// OpenCL v1.2 s5.6.4.6 allows the compiler to store kernel argument
-// information in the program executable. The argument information stored
-// includes the argument name, its type, the address and access qualifiers used.
-static void GenOpenCLArgMetadata(const FunctionDecl *FD, llvm::Function *Fn,
- CodeGenModule &CGM, llvm::LLVMContext &Context,
- CGBuilderTy &Builder, ASTContext &ASTCtx) {
- // Create MDNodes that represent the kernel arg metadata.
- // Each MDNode is a list in the form of "key", N number of values which is
- // the same number of values as their are kernel arguments.
-
- const PrintingPolicy &Policy = ASTCtx.getPrintingPolicy();
-
- // MDNode for the kernel argument address space qualifiers.
- SmallVector<llvm::Metadata *, 8> addressQuals;
-
- // MDNode for the kernel argument access qualifiers (images only).
- SmallVector<llvm::Metadata *, 8> accessQuals;
-
- // MDNode for the kernel argument type names.
- SmallVector<llvm::Metadata *, 8> argTypeNames;
-
- // MDNode for the kernel argument base type names.
- SmallVector<llvm::Metadata *, 8> argBaseTypeNames;
-
- // MDNode for the kernel argument type qualifiers.
- SmallVector<llvm::Metadata *, 8> argTypeQuals;
-
- // MDNode for the kernel argument names.
- SmallVector<llvm::Metadata *, 8> argNames;
-
- for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i) {
- const ParmVarDecl *parm = FD->getParamDecl(i);
- QualType ty = parm->getType();
- std::string typeQuals;
-
- if (ty->isPointerType()) {
- QualType pointeeTy = ty->getPointeeType();
-
- // Get address qualifier.
- addressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(
- ArgInfoAddressSpace(pointeeTy.getAddressSpace()))));
-
- // Get argument type name.
- std::string typeName =
- pointeeTy.getUnqualifiedType().getAsString(Policy) + "*";
-
- // Turn "unsigned type" to "utype"
- std::string::size_type pos = typeName.find("unsigned");
- if (pointeeTy.isCanonical() && pos != std::string::npos)
- typeName.erase(pos+1, 8);
-
- argTypeNames.push_back(llvm::MDString::get(Context, typeName));
-
- std::string baseTypeName =
- pointeeTy.getUnqualifiedType().getCanonicalType().getAsString(
- Policy) +
- "*";
-
- // Turn "unsigned type" to "utype"
- pos = baseTypeName.find("unsigned");
- if (pos != std::string::npos)
- baseTypeName.erase(pos+1, 8);
-
- argBaseTypeNames.push_back(llvm::MDString::get(Context, baseTypeName));
-
- // Get argument type qualifiers:
- if (ty.isRestrictQualified())
- typeQuals = "restrict";
- if (pointeeTy.isConstQualified() ||
- (pointeeTy.getAddressSpace() == LangAS::opencl_constant))
- typeQuals += typeQuals.empty() ? "const" : " const";
- if (pointeeTy.isVolatileQualified())
- typeQuals += typeQuals.empty() ? "volatile" : " volatile";
- } else {
- uint32_t AddrSpc = 0;
- bool isPipe = ty->isPipeType();
- if (ty->isImageType() || isPipe)
- AddrSpc = ArgInfoAddressSpace(LangAS::opencl_global);
-
- addressQuals.push_back(
- llvm::ConstantAsMetadata::get(Builder.getInt32(AddrSpc)));
-
- // Get argument type name.
- std::string typeName;
- if (isPipe)
- typeName = ty.getCanonicalType()->getAs<PipeType>()->getElementType()
- .getAsString(Policy);
- else
- typeName = ty.getUnqualifiedType().getAsString(Policy);
-
- // Turn "unsigned type" to "utype"
- std::string::size_type pos = typeName.find("unsigned");
- if (ty.isCanonical() && pos != std::string::npos)
- typeName.erase(pos+1, 8);
-
- std::string baseTypeName;
- if (isPipe)
- baseTypeName = ty.getCanonicalType()->getAs<PipeType>()
- ->getElementType().getCanonicalType()
- .getAsString(Policy);
- else
- baseTypeName =
- ty.getUnqualifiedType().getCanonicalType().getAsString(Policy);
-
- // Remove access qualifiers on images
- // (as they are inseparable from type in clang implementation,
- // but OpenCL spec provides a special query to get access qualifier
- // via clGetKernelArgInfo with CL_KERNEL_ARG_ACCESS_QUALIFIER):
- if (ty->isImageType()) {
- removeImageAccessQualifier(typeName);
- removeImageAccessQualifier(baseTypeName);
- }
-
- argTypeNames.push_back(llvm::MDString::get(Context, typeName));
-
- // Turn "unsigned type" to "utype"
- pos = baseTypeName.find("unsigned");
- if (pos != std::string::npos)
- baseTypeName.erase(pos+1, 8);
-
- argBaseTypeNames.push_back(llvm::MDString::get(Context, baseTypeName));
-
- if (isPipe)
- typeQuals = "pipe";
- }
-
- argTypeQuals.push_back(llvm::MDString::get(Context, typeQuals));
-
- // Get image and pipe access qualifier:
- if (ty->isImageType()|| ty->isPipeType()) {
- const Decl *PDecl = parm;
- if (auto *TD = dyn_cast<TypedefType>(ty))
- PDecl = TD->getDecl();
- const OpenCLAccessAttr *A = PDecl->getAttr<OpenCLAccessAttr>();
- if (A && A->isWriteOnly())
- accessQuals.push_back(llvm::MDString::get(Context, "write_only"));
- else if (A && A->isReadWrite())
- accessQuals.push_back(llvm::MDString::get(Context, "read_write"));
- else
- accessQuals.push_back(llvm::MDString::get(Context, "read_only"));
- } else
- accessQuals.push_back(llvm::MDString::get(Context, "none"));
-
- // Get argument name.
- argNames.push_back(llvm::MDString::get(Context, parm->getName()));
- }
-
- Fn->setMetadata("kernel_arg_addr_space",
- llvm::MDNode::get(Context, addressQuals));
- Fn->setMetadata("kernel_arg_access_qual",
- llvm::MDNode::get(Context, accessQuals));
- Fn->setMetadata("kernel_arg_type",
- llvm::MDNode::get(Context, argTypeNames));
- Fn->setMetadata("kernel_arg_base_type",
- llvm::MDNode::get(Context, argBaseTypeNames));
- Fn->setMetadata("kernel_arg_type_qual",
- llvm::MDNode::get(Context, argTypeQuals));
- if (CGM.getCodeGenOpts().EmitOpenCLArgMetadata)
- Fn->setMetadata("kernel_arg_name",
- llvm::MDNode::get(Context, argNames));
-}
-
void CodeGenFunction::EmitOpenCLKernelMetadata(const FunctionDecl *FD,
llvm::Function *Fn)
{
@@ -729,7 +544,7 @@ void CodeGenFunction::EmitOpenCLKernelMetadata(const FunctionDecl *FD,
llvm::LLVMContext &Context = getLLVMContext();
- GenOpenCLArgMetadata(FD, Fn, CGM, Context, Builder, getContext());
+ CGM.GenOpenCLArgMetadata(Fn, FD, this);
if (const VecTypeHintAttr *A = FD->getAttr<VecTypeHintAttr>()) {
QualType HintQTy = A->getTypeHint();
@@ -881,6 +696,8 @@ void CodeGenFunction::StartFunction(GlobalDecl GD,
Fn->addFnAttr(llvm::Attribute::SanitizeAddress);
if (SanOpts.hasOneOf(SanitizerKind::HWAddress | SanitizerKind::KernelHWAddress))
Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress);
+ if (SanOpts.has(SanitizerKind::MemTag))
+ Fn->addFnAttr(llvm::Attribute::SanitizeMemTag);
if (SanOpts.has(SanitizerKind::Thread))
Fn->addFnAttr(llvm::Attribute::SanitizeThread);
if (SanOpts.hasOneOf(SanitizerKind::Memory | SanitizerKind::KernelMemory))
@@ -1080,6 +897,13 @@ void CodeGenFunction::StartFunction(GlobalDecl GD,
if (CurFnInfo->getReturnInfo().isSRetAfterThis())
++AI;
ReturnValue = Address(&*AI, CurFnInfo->getReturnInfo().getIndirectAlign());
+ if (!CurFnInfo->getReturnInfo().getIndirectByVal()) {
+ ReturnValuePointer =
+ CreateDefaultAlignTempAlloca(Int8PtrTy, "result.ptr");
+ Builder.CreateStore(Builder.CreatePointerBitCastOrAddrSpaceCast(
+ ReturnValue.getPointer(), Int8PtrTy),
+ ReturnValuePointer);
+ }
} else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::InAlloca &&
!hasScalarEvaluationKind(CurFnInfo->getReturnType())) {
// Load the sret pointer from the argument struct and return into that.
@@ -1087,6 +911,7 @@ void CodeGenFunction::StartFunction(GlobalDecl GD,
llvm::Function::arg_iterator EI = CurFn->arg_end();
--EI;
llvm::Value *Addr = Builder.CreateStructGEP(nullptr, &*EI, Idx);
+ ReturnValuePointer = Address(Addr, getPointerAlign());
Addr = Builder.CreateAlignedLoad(Addr, getPointerAlign(), "agg.result");
ReturnValue = Address(Addr, getNaturalTypeAlignment(RetTy));
} else {
@@ -2135,6 +1960,7 @@ void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
case Type::Attributed:
case Type::SubstTemplateTypeParm:
case Type::PackExpansion:
+ case Type::MacroQualified:
// Keep walking after single level desugaring.
type = type.getSingleStepDesugaredType(getContext());
break;
@@ -2174,7 +2000,7 @@ Address CodeGenFunction::EmitMSVAListRef(const Expr *E) {
void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E,
const APValue &Init) {
- assert(!Init.isUninit() && "Invalid DeclRefExpr initializer!");
+ assert(Init.hasValue() && "Invalid DeclRefExpr initializer!");
if (CGDebugInfo *Dbg = getDebugInfo())
if (CGM.getCodeGenOpts().getDebugInfo() >= codegenoptions::LimitedDebugInfo)
Dbg->EmitGlobalVariable(E->getDecl(), Init);
@@ -2250,7 +2076,7 @@ void CodeGenFunction::EmitAlignmentAssumption(llvm::Value *PtrValue,
OffsetValue);
}
-llvm::Value *CodeGenFunction::EmitAnnotationCall(llvm::Value *AnnotationFn,
+llvm::Value *CodeGenFunction::EmitAnnotationCall(llvm::Function *AnnotationFn,
llvm::Value *AnnotatedVal,
StringRef AnnotationStr,
SourceLocation Location) {
@@ -2278,7 +2104,7 @@ Address CodeGenFunction::EmitFieldAnnotations(const FieldDecl *D,
assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
llvm::Value *V = Addr.getPointer();
llvm::Type *VTy = V->getType();
- llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::ptr_annotation,
+ llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::ptr_annotation,
CGM.Int8PtrTy);
for (const auto *I : D->specific_attrs<AnnotateAttr>()) {
@@ -2355,6 +2181,13 @@ static bool hasRequiredFeatures(const SmallVectorImpl<StringRef> &ReqFeatures,
// called function.
void CodeGenFunction::checkTargetFeatures(const CallExpr *E,
const FunctionDecl *TargetDecl) {
+ return checkTargetFeatures(E->getBeginLoc(), TargetDecl);
+}
+
+// Emits an error if we don't have a valid set of target features for the
+// called function.
+void CodeGenFunction::checkTargetFeatures(SourceLocation Loc,
+ const FunctionDecl *TargetDecl) {
// Early exit if this is an indirect call.
if (!TargetDecl)
return;
@@ -2379,7 +2212,7 @@ void CodeGenFunction::checkTargetFeatures(const CallExpr *E,
return;
StringRef(FeatureList).split(ReqFeatures, ',');
if (!hasRequiredFeatures(ReqFeatures, CGM, FD, MissingFeature))
- CGM.getDiags().Report(E->getBeginLoc(), diag::err_builtin_needs_feature)
+ CGM.getDiags().Report(Loc, diag::err_builtin_needs_feature)
<< TargetDecl->getDeclName()
<< CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID);
@@ -2405,7 +2238,7 @@ void CodeGenFunction::checkTargetFeatures(const CallExpr *E,
ReqFeatures.push_back(F.getKey());
}
if (!hasRequiredFeatures(ReqFeatures, CGM, FD, MissingFeature))
- CGM.getDiags().Report(E->getBeginLoc(), diag::err_function_needs_feature)
+ CGM.getDiags().Report(Loc, diag::err_function_needs_feature)
<< FD->getDeclName() << TargetDecl->getDeclName() << MissingFeature;
}
}
diff --git a/lib/CodeGen/CodeGenFunction.h b/lib/CodeGen/CodeGenFunction.h
index 89cb850ab1b1..c3060d1fb351 100644
--- a/lib/CodeGen/CodeGenFunction.h
+++ b/lib/CodeGen/CodeGenFunction.h
@@ -1,9 +1,8 @@
//===-- CodeGenFunction.h - Per-Function state for LLVM CodeGen -*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -23,6 +22,7 @@
#include "EHScopeStack.h"
#include "VarBypassDetector.h"
#include "clang/AST/CharUnits.h"
+#include "clang/AST/CurrentSourceLocExprScope.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExprOpenMP.h"
@@ -48,7 +48,6 @@ class Module;
class SwitchInst;
class Twine;
class Value;
-class CallSite;
}
namespace clang {
@@ -115,7 +114,7 @@ enum TypeEvaluationKind {
SANITIZER_CHECK(DivremOverflow, divrem_overflow, 0) \
SANITIZER_CHECK(DynamicTypeCacheMiss, dynamic_type_cache_miss, 0) \
SANITIZER_CHECK(FloatCastOverflow, float_cast_overflow, 0) \
- SANITIZER_CHECK(FunctionTypeMismatch, function_type_mismatch, 0) \
+ SANITIZER_CHECK(FunctionTypeMismatch, function_type_mismatch, 1) \
SANITIZER_CHECK(ImplicitConversion, implicit_conversion, 0) \
SANITIZER_CHECK(InvalidBuiltin, invalid_builtin, 0) \
SANITIZER_CHECK(LoadInvalidValue, load_invalid_value, 0) \
@@ -328,6 +327,10 @@ public:
/// value. This is invalid iff the function has no return value.
Address ReturnValue = Address::invalid();
+ /// ReturnValuePointer - The temporary alloca to hold a pointer to sret.
+ /// This is invalid if sret is not in use.
+ Address ReturnValuePointer = Address::invalid();
+
/// Return true if a label was seen in the current scope.
bool hasLabelBeenSeenInCurrentScope() const {
if (CurLexicalScope)
@@ -477,6 +480,10 @@ public:
/// finally block or filter expression.
bool IsOutlinedSEHHelper = false;
+ /// True if CodeGen currently emits code inside presereved access index
+ /// region.
+ bool IsInPreservedAIRegion = false;
+
const CodeGen::CGBlockInfo *BlockInfo = nullptr;
llvm::Value *BlockPointer = nullptr;
@@ -568,7 +575,7 @@ public:
JumpDest RethrowDest;
/// A function to call to enter the catch.
- llvm::Constant *BeginCatchFn;
+ llvm::FunctionCallee BeginCatchFn;
/// An i1 variable indicating whether or not the @finally is
/// running for an exception.
@@ -580,8 +587,8 @@ public:
public:
void enter(CodeGenFunction &CGF, const Stmt *Finally,
- llvm::Constant *beginCatchFn, llvm::Constant *endCatchFn,
- llvm::Constant *rethrowFn);
+ llvm::FunctionCallee beginCatchFn,
+ llvm::FunctionCallee endCatchFn, llvm::FunctionCallee rethrowFn);
void exit(CodeGenFunction &CGF);
};
@@ -668,7 +675,8 @@ public:
/// PushDestructorCleanup - Push a cleanup to call the
/// complete-object variant of the given destructor on the object at
/// the given address.
- void PushDestructorCleanup(const CXXDestructorDecl *Dtor, Address Addr);
+ void PushDestructorCleanup(const CXXDestructorDecl *Dtor, QualType T,
+ Address Addr);
/// PopCleanupBlock - Will pop the cleanup entry on the stack and
/// process all branch fixups.
@@ -1395,6 +1403,12 @@ private:
SourceLocation LastStopPoint;
public:
+ /// Source location information about the default argument or member
+ /// initializer expression we're evaluating, if any.
+ CurrentSourceLocExprScope CurSourceLocExprScope;
+ using SourceLocExprScopeGuard =
+ CurrentSourceLocExprScope::SourceLocExprScopeGuard;
+
/// A scope within which we are constructing the fields of an object which
/// might use a CXXDefaultInitExpr. This stashes away a 'this' value to use
/// if we need to evaluate a CXXDefaultInitExpr within the evaluation.
@@ -1415,11 +1429,12 @@ public:
/// The scope of a CXXDefaultInitExpr. Within this scope, the value of 'this'
/// is overridden to be the object under construction.
- class CXXDefaultInitExprScope {
+ class CXXDefaultInitExprScope {
public:
- CXXDefaultInitExprScope(CodeGenFunction &CGF)
- : CGF(CGF), OldCXXThisValue(CGF.CXXThisValue),
- OldCXXThisAlignment(CGF.CXXThisAlignment) {
+ CXXDefaultInitExprScope(CodeGenFunction &CGF, const CXXDefaultInitExpr *E)
+ : CGF(CGF), OldCXXThisValue(CGF.CXXThisValue),
+ OldCXXThisAlignment(CGF.CXXThisAlignment),
+ SourceLocScope(E, CGF.CurSourceLocExprScope) {
CGF.CXXThisValue = CGF.CXXDefaultInitExprThis.getPointer();
CGF.CXXThisAlignment = CGF.CXXDefaultInitExprThis.getAlignment();
}
@@ -1432,6 +1447,12 @@ public:
CodeGenFunction &CGF;
llvm::Value *OldCXXThisValue;
CharUnits OldCXXThisAlignment;
+ SourceLocExprScopeGuard SourceLocScope;
+ };
+
+ struct CXXDefaultArgExprScope : SourceLocExprScopeGuard {
+ CXXDefaultArgExprScope(CodeGenFunction &CGF, const CXXDefaultArgExpr *E)
+ : SourceLocExprScopeGuard(E, CGF.CurSourceLocExprScope) {}
};
/// The scope of an ArrayInitLoopExpr. Within this scope, the value of the
@@ -1836,6 +1857,9 @@ public:
void EmitLambdaBlockInvokeBody();
void EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD);
void EmitLambdaStaticInvokeBody(const CXXMethodDecl *MD);
+ void EmitLambdaVLACapture(const VariableArrayType *VAT, LValue LV) {
+ EmitStoreThroughLValue(RValue::get(VLASizeMap[VAT->getSizeExpr()]), LV);
+ }
void EmitAsanPrologueOrEpilogue(bool Prologue);
/// Emit the unified return block, trying to avoid its emission when
@@ -1851,14 +1875,14 @@ public:
void StartThunk(llvm::Function *Fn, GlobalDecl GD,
const CGFunctionInfo &FnInfo, bool IsUnprototyped);
- void EmitCallAndReturnForThunk(llvm::Constant *Callee, const ThunkInfo *Thunk,
- bool IsUnprototyped);
+ void EmitCallAndReturnForThunk(llvm::FunctionCallee Callee,
+ const ThunkInfo *Thunk, bool IsUnprototyped);
void FinishThunk();
/// Emit a musttail call for a thunk with a potentially adjusted this pointer.
void EmitMustTailThunk(GlobalDecl GD, llvm::Value *AdjustedThisPtr,
- llvm::Value *Callee);
+ llvm::FunctionCallee Callee);
/// Generate a thunk for the given method.
void generateThunk(llvm::Function *Fn, const CGFunctionInfo &FnInfo,
@@ -2295,7 +2319,7 @@ public:
}
/// Determine whether a return value slot may overlap some other object.
- AggValueSlot::Overlap_t overlapForReturnValue() {
+ AggValueSlot::Overlap_t getOverlapForReturnValue() {
// FIXME: Assuming no overlap here breaks guaranteed copy elision for base
// class subobjects. These cases may need to be revisited depending on the
// resolution of the relevant core issue.
@@ -2303,20 +2327,13 @@ public:
}
/// Determine whether a field initialization may overlap some other object.
- AggValueSlot::Overlap_t overlapForFieldInit(const FieldDecl *FD) {
- // FIXME: These cases can result in overlap as a result of P0840R0's
- // [[no_unique_address]] attribute. We can still infer NoOverlap in the
- // presence of that attribute if the field is within the nvsize of its
- // containing class, because non-virtual subobjects are initialized in
- // address order.
- return AggValueSlot::DoesNotOverlap;
- }
+ AggValueSlot::Overlap_t getOverlapForFieldInit(const FieldDecl *FD);
/// Determine whether a base class initialization may overlap some other
/// object.
- AggValueSlot::Overlap_t overlapForBaseInit(const CXXRecordDecl *RD,
- const CXXRecordDecl *BaseRD,
- bool IsVirtual);
+ AggValueSlot::Overlap_t getOverlapForBaseInit(const CXXRecordDecl *RD,
+ const CXXRecordDecl *BaseRD,
+ bool IsVirtual);
/// Emit an aggregate assignment.
void EmitAggregateAssign(LValue Dest, LValue Src, QualType EltTy) {
@@ -2502,16 +2519,13 @@ public:
void EmitCXXConstructorCall(const CXXConstructorDecl *D, CXXCtorType Type,
bool ForVirtualBase, bool Delegating,
- Address This, const CXXConstructExpr *E,
- AggValueSlot::Overlap_t Overlap,
- bool NewPointerIsChecked);
+ AggValueSlot ThisAVS, const CXXConstructExpr *E);
void EmitCXXConstructorCall(const CXXConstructorDecl *D, CXXCtorType Type,
bool ForVirtualBase, bool Delegating,
Address This, CallArgList &Args,
AggValueSlot::Overlap_t Overlap,
- SourceLocation Loc,
- bool NewPointerIsChecked);
+ SourceLocation Loc, bool NewPointerIsChecked);
/// Emit assumption load for all bases. Requires to be be called only on
/// most-derived class and not under construction of the object.
@@ -2541,8 +2555,8 @@ public:
static Destroyer destroyCXXObject;
void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type,
- bool ForVirtualBase, bool Delegating,
- Address This);
+ bool ForVirtualBase, bool Delegating, Address This,
+ QualType ThisTy);
void EmitNewArrayInitializer(const CXXNewExpr *E, QualType elementType,
llvm::Type *ElementTy, Address NewPtr,
@@ -2618,10 +2632,12 @@ public:
bool sanitizePerformTypeCheck() const;
/// Emit a check that \p V is the address of storage of the
- /// appropriate size and alignment for an object of type \p Type.
+ /// appropriate size and alignment for an object of type \p Type
+ /// (or if ArraySize is provided, for an array of that bound).
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, llvm::Value *V,
QualType Type, CharUnits Alignment = CharUnits::Zero(),
- SanitizerSet SkippedChecks = SanitizerSet());
+ SanitizerSet SkippedChecks = SanitizerSet(),
+ llvm::Value *ArraySize = nullptr);
/// Emit a check that \p Base points into an array object, which
/// we can access at index \p Index. \p Accessed should be \c false if we
@@ -2637,6 +2653,9 @@ public:
/// Converts Location to a DebugLoc, if debug information is enabled.
llvm::DebugLoc SourceLocToDebugLoc(SourceLocation Location);
+ /// Get the record field index as represented in debug info.
+ unsigned getDebugInfoFIndex(const RecordDecl *Rec, unsigned FieldIndex);
+
//===--------------------------------------------------------------------===//
// Declaration Emission
@@ -3084,7 +3103,7 @@ public:
bool EmitOMPLinearClauseInit(const OMPLoopDirective &D);
typedef const llvm::function_ref<void(CodeGenFunction & /*CGF*/,
- llvm::Value * /*OutlinedFn*/,
+ llvm::Function * /*OutlinedFn*/,
const OMPTaskDataTy & /*Data*/)>
TaskGenTy;
void EmitOMPTaskBasedDirective(const OMPExecutableDirective &S,
@@ -3560,7 +3579,6 @@ public:
LValue EmitCXXConstructLValue(const CXXConstructExpr *E);
LValue EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E);
- LValue EmitLambdaLValue(const LambdaExpr *E);
LValue EmitCXXTypeidLValue(const CXXTypeidExpr *E);
LValue EmitCXXUuidofLValue(const CXXUuidofExpr *E);
@@ -3580,10 +3598,10 @@ public:
/// LLVM arguments and the types they were derived from.
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee,
ReturnValueSlot ReturnValue, const CallArgList &Args,
- llvm::Instruction **callOrInvoke, SourceLocation Loc);
+ llvm::CallBase **callOrInvoke, SourceLocation Loc);
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee,
ReturnValueSlot ReturnValue, const CallArgList &Args,
- llvm::Instruction **callOrInvoke = nullptr) {
+ llvm::CallBase **callOrInvoke = nullptr) {
return EmitCall(CallInfo, Callee, ReturnValue, Args, callOrInvoke,
SourceLocation());
}
@@ -3595,31 +3613,32 @@ public:
CGCallee EmitCallee(const Expr *E);
void checkTargetFeatures(const CallExpr *E, const FunctionDecl *TargetDecl);
+ void checkTargetFeatures(SourceLocation Loc, const FunctionDecl *TargetDecl);
- llvm::CallInst *EmitRuntimeCall(llvm::Value *callee,
+ llvm::CallInst *EmitRuntimeCall(llvm::FunctionCallee callee,
const Twine &name = "");
- llvm::CallInst *EmitRuntimeCall(llvm::Value *callee,
- ArrayRef<llvm::Value*> args,
+ llvm::CallInst *EmitRuntimeCall(llvm::FunctionCallee callee,
+ ArrayRef<llvm::Value *> args,
const Twine &name = "");
- llvm::CallInst *EmitNounwindRuntimeCall(llvm::Value *callee,
+ llvm::CallInst *EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
const Twine &name = "");
- llvm::CallInst *EmitNounwindRuntimeCall(llvm::Value *callee,
- ArrayRef<llvm::Value*> args,
+ llvm::CallInst *EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
+ ArrayRef<llvm::Value *> args,
const Twine &name = "");
SmallVector<llvm::OperandBundleDef, 1>
getBundlesForFunclet(llvm::Value *Callee);
- llvm::CallSite EmitCallOrInvoke(llvm::Value *Callee,
- ArrayRef<llvm::Value *> Args,
- const Twine &Name = "");
- llvm::CallSite EmitRuntimeCallOrInvoke(llvm::Value *callee,
- ArrayRef<llvm::Value*> args,
- const Twine &name = "");
- llvm::CallSite EmitRuntimeCallOrInvoke(llvm::Value *callee,
- const Twine &name = "");
- void EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee,
- ArrayRef<llvm::Value*> args);
+ llvm::CallBase *EmitCallOrInvoke(llvm::FunctionCallee Callee,
+ ArrayRef<llvm::Value *> Args,
+ const Twine &Name = "");
+ llvm::CallBase *EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
+ ArrayRef<llvm::Value *> args,
+ const Twine &name = "");
+ llvm::CallBase *EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
+ const Twine &name = "");
+ void EmitNoreturnRuntimeCallOrInvoke(llvm::FunctionCallee callee,
+ ArrayRef<llvm::Value *> args);
CGCallee BuildAppleKextVirtualCall(const CXXMethodDecl *MD,
NestedNameSpecifier *Qual,
@@ -3659,11 +3678,10 @@ public:
llvm::Value *ImplicitParam,
QualType ImplicitParamTy, const CallExpr *E,
CallArgList *RtlArgs);
- RValue EmitCXXDestructorCall(const CXXDestructorDecl *DD,
- const CGCallee &Callee,
- llvm::Value *This, llvm::Value *ImplicitParam,
- QualType ImplicitParamTy, const CallExpr *E,
- StructorType Type);
+ RValue EmitCXXDestructorCall(GlobalDecl Dtor, const CGCallee &Callee,
+ llvm::Value *This, QualType ThisTy,
+ llvm::Value *ImplicitParam,
+ QualType ImplicitParamTy, const CallExpr *E);
RValue EmitCXXMemberCallExpr(const CXXMemberCallExpr *E,
ReturnValueSlot ReturnValue);
RValue EmitCXXMemberOrOperatorMemberCallExpr(const CallExpr *CE,
@@ -3727,9 +3745,6 @@ public:
Address PtrOp0, Address PtrOp1,
llvm::Triple::ArchType Arch);
- llvm::Value *EmitISOVolatileLoad(const CallExpr *E);
- llvm::Value *EmitISOVolatileStore(const CallExpr *E);
-
llvm::Function *LookupNeonLLVMIntrinsic(unsigned IntrinsicID,
unsigned Modifier, llvm::Type *ArgTy,
const CallExpr *E);
@@ -3825,6 +3840,8 @@ public:
llvm::Type *returnType);
llvm::Value *EmitObjCAllocWithZone(llvm::Value *value,
llvm::Type *returnType);
+ llvm::Value *EmitObjCAllocInit(llvm::Value *value, llvm::Type *resultType);
+
llvm::Value *EmitObjCThrowOperand(const Expr *expr);
llvm::Value *EmitObjCConsumeObject(QualType T, llvm::Value *Ptr);
llvm::Value *EmitObjCExtendObjectLifetime(QualType T, llvm::Value *Ptr);
@@ -3922,12 +3939,12 @@ public:
void EmitCXXGlobalVarDeclInit(const VarDecl &D, llvm::Constant *DeclPtr,
bool PerformInit);
- llvm::Constant *createAtExitStub(const VarDecl &VD, llvm::Constant *Dtor,
+ llvm::Function *createAtExitStub(const VarDecl &VD, llvm::FunctionCallee Dtor,
llvm::Constant *Addr);
/// Call atexit() with a function that passes the given argument to
/// the given function.
- void registerGlobalDtorWithAtExit(const VarDecl &D, llvm::Constant *fn,
+ void registerGlobalDtorWithAtExit(const VarDecl &D, llvm::FunctionCallee fn,
llvm::Constant *addr);
/// Call atexit() with function dtorStub.
@@ -3960,8 +3977,8 @@ public:
/// variables.
void GenerateCXXGlobalDtorsFunc(
llvm::Function *Fn,
- const std::vector<std::pair<llvm::WeakTrackingVH, llvm::Constant *>>
- &DtorsAndObjects);
+ const std::vector<std::tuple<llvm::FunctionType *, llvm::WeakTrackingVH,
+ llvm::Constant *>> &DtorsAndObjects);
void GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
const VarDecl *D,
@@ -3982,16 +3999,14 @@ public:
void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint = true);
- void EmitLambdaExpr(const LambdaExpr *E, AggValueSlot Dest);
-
RValue EmitAtomicExpr(AtomicExpr *E);
//===--------------------------------------------------------------------===//
// Annotations Emission
//===--------------------------------------------------------------------===//
- /// Emit an annotation call (intrinsic or builtin).
- llvm::Value *EmitAnnotationCall(llvm::Value *AnnotationFn,
+ /// Emit an annotation call (intrinsic).
+ llvm::Value *EmitAnnotationCall(llvm::Function *AnnotationFn,
llvm::Value *AnnotatedVal,
StringRef AnnotationStr,
SourceLocation Location);
@@ -4084,8 +4099,8 @@ public:
/// passing to a runtime sanitizer handler.
llvm::Constant *EmitCheckSourceLocation(SourceLocation Loc);
- /// Create a basic block that will call a handler function in a
- /// sanitizer runtime with the provided arguments, and create a conditional
+ /// Create a basic block that will either trap or call a handler function in
+ /// the UBSan runtime with the provided arguments, and create a conditional
/// branch to it.
void EmitCheck(ArrayRef<std::pair<llvm::Value *, SanitizerMask>> Checked,
SanitizerHandler Check, ArrayRef<llvm::Constant *> StaticArgs,
@@ -4177,14 +4192,19 @@ private:
/// If EmittedExpr is non-null, this will use that instead of re-emitting E.
llvm::Value *evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type,
llvm::IntegerType *ResType,
- llvm::Value *EmittedE);
+ llvm::Value *EmittedE,
+ bool IsDynamic);
/// Emits the size of E, as required by __builtin_object_size. This
/// function is aware of pass_object_size parameters, and will act accordingly
/// if E is a parameter with the pass_object_size attribute.
llvm::Value *emitBuiltinObjectSize(const Expr *E, unsigned Type,
llvm::IntegerType *ResType,
- llvm::Value *EmittedE);
+ llvm::Value *EmittedE,
+ bool IsDynamic);
+
+ void emitZeroOrPatternForAutoVarInit(QualType type, const VarDecl &D,
+ Address Loc);
public:
#ifndef NDEBUG
diff --git a/lib/CodeGen/CodeGenModule.cpp b/lib/CodeGen/CodeGenModule.cpp
index 244738042cef..6ff72ec045e6 100644
--- a/lib/CodeGen/CodeGenModule.cpp
+++ b/lib/CodeGen/CodeGenModule.cpp
@@ -1,9 +1,8 @@
//===--- CodeGenModule.cpp - Emit LLVM Code from ASTs for a Module --------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -34,6 +33,7 @@
#include "clang/AST/Mangle.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/AST/StmtVisitor.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/CharInfo.h"
#include "clang/Basic/CodeGenOptions.h"
@@ -47,17 +47,18 @@
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Triple.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
-#include "llvm/IR/CallSite.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
+#include "llvm/IR/ProfileSummary.h"
#include "llvm/ProfileData/InstrProfReader.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Support/ConvertUTF.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MD5.h"
+#include "llvm/Support/TimeProfiler.h"
using namespace clang;
using namespace CodeGen;
@@ -409,6 +410,10 @@ void CodeGenModule::Release() {
AddGlobalCtor(CudaCtorFunction);
}
if (OpenMPRuntime) {
+ if (llvm::Function *OpenMPRequiresDirectiveRegFun =
+ OpenMPRuntime->emitRequiresDirectiveRegFun()) {
+ AddGlobalCtor(OpenMPRequiresDirectiveRegFun, 0);
+ }
if (llvm::Function *OpenMPRegistrationFunction =
OpenMPRuntime->emitRegistrationFunction()) {
auto ComdatKey = OpenMPRegistrationFunction->hasComdat() ?
@@ -418,7 +423,9 @@ void CodeGenModule::Release() {
OpenMPRuntime->clear();
}
if (PGOReader) {
- getModule().setProfileSummary(PGOReader->getSummary().getMD(VMContext));
+ getModule().setProfileSummary(
+ PGOReader->getSummary(/* UseCS */ false).getMD(VMContext),
+ llvm::ProfileSummary::PSK_Instr);
if (PGOStats.hasDiagnostics())
PGOStats.reportDiagnostics(getDiags(), getCodeGenOpts().MainFileName);
}
@@ -443,6 +450,24 @@ void CodeGenModule::Release() {
EmitModuleLinkOptions();
}
+ // On ELF we pass the dependent library specifiers directly to the linker
+ // without manipulating them. This is in contrast to other platforms where
+ // they are mapped to a specific linker option by the compiler. This
+ // difference is a result of the greater variety of ELF linkers and the fact
+ // that ELF linkers tend to handle libraries in a more complicated fashion
+ // than on other platforms. This forces us to defer handling the dependent
+ // libs to the linker.
+ //
+ // CUDA/HIP device and host libraries are different. Currently there is no
+ // way to differentiate dependent libraries for host or device. Existing
+ // usage of #pragma comment(lib, *) is intended for host libraries on
+ // Windows. Therefore emit llvm.dependent-libraries only for host.
+ if (!ELFDependentLibraries.empty() && !Context.getLangOpts().CUDAIsDevice) {
+ auto *NMD = getModule().getOrInsertNamedMetadata("llvm.dependent-libraries");
+ for (auto *MD : ELFDependentLibraries)
+ NMD->addOperand(MD);
+ }
+
// Record mregparm value now so it is visible through rest of codegen.
if (Context.getTargetInfo().getTriple().getArch() == llvm::Triple::x86)
getModule().addModuleFlag(llvm::Module::Error, "NumRegisterParameters",
@@ -536,15 +561,16 @@ void CodeGenModule::Release() {
if (LangOpts.OpenCL) {
EmitOpenCLMetadata();
// Emit SPIR version.
- if (getTriple().getArch() == llvm::Triple::spir ||
- getTriple().getArch() == llvm::Triple::spir64) {
+ if (getTriple().isSPIR()) {
// SPIR v2.0 s2.12 - The SPIR version used by the module is stored in the
// opencl.spir.version named metadata.
+ // C++ is backwards compatible with OpenCL v2.0.
+ auto Version = LangOpts.OpenCLCPlusPlus ? 200 : LangOpts.OpenCLVersion;
llvm::Metadata *SPIRVerElts[] = {
llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
- Int32Ty, LangOpts.OpenCLVersion / 100)),
+ Int32Ty, Version / 100)),
llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
- Int32Ty, (LangOpts.OpenCLVersion / 100 > 1) ? 0 : 2))};
+ Int32Ty, (Version / 100 > 1) ? 0 : 2))};
llvm::NamedMDNode *SPIRVerMD =
TheModule.getOrInsertNamedMetadata("opencl.spir.version");
llvm::LLVMContext &Ctx = TheModule.getContext();
@@ -599,11 +625,14 @@ void CodeGenModule::Release() {
void CodeGenModule::EmitOpenCLMetadata() {
// SPIR v2.0 s2.13 - The OpenCL version used by the module is stored in the
// opencl.ocl.version named metadata node.
+ // C++ is backwards compatible with OpenCL v2.0.
+ // FIXME: We might need to add CXX version at some point too?
+ auto Version = LangOpts.OpenCLCPlusPlus ? 200 : LangOpts.OpenCLVersion;
llvm::Metadata *OCLVerElts[] = {
llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
- Int32Ty, LangOpts.OpenCLVersion / 100)),
+ Int32Ty, Version / 100)),
llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
- Int32Ty, (LangOpts.OpenCLVersion % 100) / 10))};
+ Int32Ty, (Version % 100) / 10))};
llvm::NamedMDNode *OCLVerMD =
TheModule.getOrInsertNamedMetadata("opencl.ocl.version");
llvm::LLVMContext &Ctx = TheModule.getContext();
@@ -731,9 +760,11 @@ void CodeGenModule::setGlobalVisibility(llvm::GlobalValue *GV,
}
if (!D)
return;
- // Set visibility for definitions.
+ // Set visibility for definitions, and for declarations if requested globally
+ // or set explicitly.
LinkageInfo LV = D->getLinkageAndVisibility();
- if (LV.isVisibilityExplicit() || !GV->isDeclarationForLinker())
+ if (LV.isVisibilityExplicit() || getLangOpts().SetVisibilityForExternDecls ||
+ !GV->isDeclarationForLinker())
GV->setVisibility(GetLLVMVisibility(LV.getVisibility()));
}
@@ -758,6 +789,13 @@ static bool shouldAssumeDSOLocal(const CodeGenModule &CGM,
!GV->isThreadLocal())
return false;
}
+
+ // On COFF, don't mark 'extern_weak' symbols as DSO local. If these symbols
+ // remain unresolved in the link, they can be resolved to zero, which is
+ // outside the current DSO.
+ if (TT.isOSBinFormatCOFF() && GV->hasExternalWeakLinkage())
+ return false;
+
// Every other GV is local on COFF.
// Make an exception for windows OS in the triple: Some firmware builds use
// *-win32-macho triples. This (accidentally?) produced windows relocations
@@ -774,7 +812,7 @@ static bool shouldAssumeDSOLocal(const CodeGenModule &CGM,
const auto &CGOpts = CGM.getCodeGenOpts();
llvm::Reloc::Model RM = CGOpts.RelocationModel;
const auto &LOpts = CGM.getLangOpts();
- if (RM != llvm::Reloc::Static && !LOpts.PIE)
+ if (RM != llvm::Reloc::Static && !LOpts.PIE && !LOpts.OpenMPIsDevice)
return false;
// A definition cannot be preempted from an executable.
@@ -837,19 +875,20 @@ void CodeGenModule::setDLLImportDLLExport(llvm::GlobalValue *GV,
void CodeGenModule::setGVProperties(llvm::GlobalValue *GV,
GlobalDecl GD) const {
setDLLImportDLLExport(GV, GD);
- setGlobalVisibilityAndLocal(GV, dyn_cast<NamedDecl>(GD.getDecl()));
+ setGVPropertiesAux(GV, dyn_cast<NamedDecl>(GD.getDecl()));
}
void CodeGenModule::setGVProperties(llvm::GlobalValue *GV,
const NamedDecl *D) const {
setDLLImportDLLExport(GV, D);
- setGlobalVisibilityAndLocal(GV, D);
+ setGVPropertiesAux(GV, D);
}
-void CodeGenModule::setGlobalVisibilityAndLocal(llvm::GlobalValue *GV,
- const NamedDecl *D) const {
+void CodeGenModule::setGVPropertiesAux(llvm::GlobalValue *GV,
+ const NamedDecl *D) const {
setGlobalVisibility(GV, D);
setDSOLocal(GV);
+ GV->setPartition(CodeGenOpts.SymbolPartition);
}
static llvm::GlobalVariable::ThreadLocalMode GetLLVMTLSModel(StringRef S) {
@@ -1047,8 +1086,15 @@ StringRef CodeGenModule::getMangledName(GlobalDecl GD) {
// Keep the first result in the case of a mangling collision.
const auto *ND = cast<NamedDecl>(GD.getDecl());
- auto Result =
- Manglings.insert(std::make_pair(getMangledNameImpl(*this, GD, ND), GD));
+ std::string MangledName = getMangledNameImpl(*this, GD, ND);
+
+ // Adjust kernel stub mangling as we may need to be able to differentiate
+ // them from the kernel itself (e.g., for HIP).
+ if (auto *FD = dyn_cast<FunctionDecl>(GD.getDecl()))
+ if (!getLangOpts().CUDAIsDevice && FD->hasAttr<CUDAGlobalAttr>())
+ MangledName = getCUDARuntime().getDeviceStubName(MangledName);
+
+ auto Result = Manglings.insert(std::make_pair(MangledName, GD));
return MangledDeclNames[CanonicalGD] = Result.first->first();
}
@@ -1153,7 +1199,7 @@ CodeGenModule::getFunctionLinkage(GlobalDecl GD) {
return llvm::GlobalValue::InternalLinkage;
}
- return getLLVMLinkageForDeclarator(D, Linkage, /*isConstantVariable=*/false);
+ return getLLVMLinkageForDeclarator(D, Linkage, /*IsConstantVariable=*/false);
}
llvm::ConstantInt *CodeGenModule::CreateCrossDsoCfiTypeId(llvm::Metadata *MD) {
@@ -1173,6 +1219,212 @@ void CodeGenModule::SetLLVMFunctionAttributes(GlobalDecl GD,
F->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
}
+static void removeImageAccessQualifier(std::string& TyName) {
+ std::string ReadOnlyQual("__read_only");
+ std::string::size_type ReadOnlyPos = TyName.find(ReadOnlyQual);
+ if (ReadOnlyPos != std::string::npos)
+ // "+ 1" for the space after access qualifier.
+ TyName.erase(ReadOnlyPos, ReadOnlyQual.size() + 1);
+ else {
+ std::string WriteOnlyQual("__write_only");
+ std::string::size_type WriteOnlyPos = TyName.find(WriteOnlyQual);
+ if (WriteOnlyPos != std::string::npos)
+ TyName.erase(WriteOnlyPos, WriteOnlyQual.size() + 1);
+ else {
+ std::string ReadWriteQual("__read_write");
+ std::string::size_type ReadWritePos = TyName.find(ReadWriteQual);
+ if (ReadWritePos != std::string::npos)
+ TyName.erase(ReadWritePos, ReadWriteQual.size() + 1);
+ }
+ }
+}
+
+// Returns the address space id that should be produced to the
+// kernel_arg_addr_space metadata. This is always fixed to the ids
+// as specified in the SPIR 2.0 specification in order to differentiate
+// for example in clGetKernelArgInfo() implementation between the address
+// spaces with targets without unique mapping to the OpenCL address spaces
+// (basically all single AS CPUs).
+static unsigned ArgInfoAddressSpace(LangAS AS) {
+ switch (AS) {
+ case LangAS::opencl_global: return 1;
+ case LangAS::opencl_constant: return 2;
+ case LangAS::opencl_local: return 3;
+ case LangAS::opencl_generic: return 4; // Not in SPIR 2.0 specs.
+ default:
+ return 0; // Assume private.
+ }
+}
+
+void CodeGenModule::GenOpenCLArgMetadata(llvm::Function *Fn,
+ const FunctionDecl *FD,
+ CodeGenFunction *CGF) {
+ assert(((FD && CGF) || (!FD && !CGF)) &&
+ "Incorrect use - FD and CGF should either be both null or not!");
+ // Create MDNodes that represent the kernel arg metadata.
+ // Each MDNode is a list in the form of "key", N number of values which is
+ // the same number of values as their are kernel arguments.
+
+ const PrintingPolicy &Policy = Context.getPrintingPolicy();
+
+ // MDNode for the kernel argument address space qualifiers.
+ SmallVector<llvm::Metadata *, 8> addressQuals;
+
+ // MDNode for the kernel argument access qualifiers (images only).
+ SmallVector<llvm::Metadata *, 8> accessQuals;
+
+ // MDNode for the kernel argument type names.
+ SmallVector<llvm::Metadata *, 8> argTypeNames;
+
+ // MDNode for the kernel argument base type names.
+ SmallVector<llvm::Metadata *, 8> argBaseTypeNames;
+
+ // MDNode for the kernel argument type qualifiers.
+ SmallVector<llvm::Metadata *, 8> argTypeQuals;
+
+ // MDNode for the kernel argument names.
+ SmallVector<llvm::Metadata *, 8> argNames;
+
+ if (FD && CGF)
+ for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i) {
+ const ParmVarDecl *parm = FD->getParamDecl(i);
+ QualType ty = parm->getType();
+ std::string typeQuals;
+
+ if (ty->isPointerType()) {
+ QualType pointeeTy = ty->getPointeeType();
+
+ // Get address qualifier.
+ addressQuals.push_back(
+ llvm::ConstantAsMetadata::get(CGF->Builder.getInt32(
+ ArgInfoAddressSpace(pointeeTy.getAddressSpace()))));
+
+ // Get argument type name.
+ std::string typeName =
+ pointeeTy.getUnqualifiedType().getAsString(Policy) + "*";
+
+ // Turn "unsigned type" to "utype"
+ std::string::size_type pos = typeName.find("unsigned");
+ if (pointeeTy.isCanonical() && pos != std::string::npos)
+ typeName.erase(pos + 1, 8);
+
+ argTypeNames.push_back(llvm::MDString::get(VMContext, typeName));
+
+ std::string baseTypeName =
+ pointeeTy.getUnqualifiedType().getCanonicalType().getAsString(
+ Policy) +
+ "*";
+
+ // Turn "unsigned type" to "utype"
+ pos = baseTypeName.find("unsigned");
+ if (pos != std::string::npos)
+ baseTypeName.erase(pos + 1, 8);
+
+ argBaseTypeNames.push_back(
+ llvm::MDString::get(VMContext, baseTypeName));
+
+ // Get argument type qualifiers:
+ if (ty.isRestrictQualified())
+ typeQuals = "restrict";
+ if (pointeeTy.isConstQualified() ||
+ (pointeeTy.getAddressSpace() == LangAS::opencl_constant))
+ typeQuals += typeQuals.empty() ? "const" : " const";
+ if (pointeeTy.isVolatileQualified())
+ typeQuals += typeQuals.empty() ? "volatile" : " volatile";
+ } else {
+ uint32_t AddrSpc = 0;
+ bool isPipe = ty->isPipeType();
+ if (ty->isImageType() || isPipe)
+ AddrSpc = ArgInfoAddressSpace(LangAS::opencl_global);
+
+ addressQuals.push_back(
+ llvm::ConstantAsMetadata::get(CGF->Builder.getInt32(AddrSpc)));
+
+ // Get argument type name.
+ std::string typeName;
+ if (isPipe)
+ typeName = ty.getCanonicalType()
+ ->getAs<PipeType>()
+ ->getElementType()
+ .getAsString(Policy);
+ else
+ typeName = ty.getUnqualifiedType().getAsString(Policy);
+
+ // Turn "unsigned type" to "utype"
+ std::string::size_type pos = typeName.find("unsigned");
+ if (ty.isCanonical() && pos != std::string::npos)
+ typeName.erase(pos + 1, 8);
+
+ std::string baseTypeName;
+ if (isPipe)
+ baseTypeName = ty.getCanonicalType()
+ ->getAs<PipeType>()
+ ->getElementType()
+ .getCanonicalType()
+ .getAsString(Policy);
+ else
+ baseTypeName =
+ ty.getUnqualifiedType().getCanonicalType().getAsString(Policy);
+
+ // Remove access qualifiers on images
+ // (as they are inseparable from type in clang implementation,
+ // but OpenCL spec provides a special query to get access qualifier
+ // via clGetKernelArgInfo with CL_KERNEL_ARG_ACCESS_QUALIFIER):
+ if (ty->isImageType()) {
+ removeImageAccessQualifier(typeName);
+ removeImageAccessQualifier(baseTypeName);
+ }
+
+ argTypeNames.push_back(llvm::MDString::get(VMContext, typeName));
+
+ // Turn "unsigned type" to "utype"
+ pos = baseTypeName.find("unsigned");
+ if (pos != std::string::npos)
+ baseTypeName.erase(pos + 1, 8);
+
+ argBaseTypeNames.push_back(
+ llvm::MDString::get(VMContext, baseTypeName));
+
+ if (isPipe)
+ typeQuals = "pipe";
+ }
+
+ argTypeQuals.push_back(llvm::MDString::get(VMContext, typeQuals));
+
+ // Get image and pipe access qualifier:
+ if (ty->isImageType() || ty->isPipeType()) {
+ const Decl *PDecl = parm;
+ if (auto *TD = dyn_cast<TypedefType>(ty))
+ PDecl = TD->getDecl();
+ const OpenCLAccessAttr *A = PDecl->getAttr<OpenCLAccessAttr>();
+ if (A && A->isWriteOnly())
+ accessQuals.push_back(llvm::MDString::get(VMContext, "write_only"));
+ else if (A && A->isReadWrite())
+ accessQuals.push_back(llvm::MDString::get(VMContext, "read_write"));
+ else
+ accessQuals.push_back(llvm::MDString::get(VMContext, "read_only"));
+ } else
+ accessQuals.push_back(llvm::MDString::get(VMContext, "none"));
+
+ // Get argument name.
+ argNames.push_back(llvm::MDString::get(VMContext, parm->getName()));
+ }
+
+ Fn->setMetadata("kernel_arg_addr_space",
+ llvm::MDNode::get(VMContext, addressQuals));
+ Fn->setMetadata("kernel_arg_access_qual",
+ llvm::MDNode::get(VMContext, accessQuals));
+ Fn->setMetadata("kernel_arg_type",
+ llvm::MDNode::get(VMContext, argTypeNames));
+ Fn->setMetadata("kernel_arg_base_type",
+ llvm::MDNode::get(VMContext, argBaseTypeNames));
+ Fn->setMetadata("kernel_arg_type_qual",
+ llvm::MDNode::get(VMContext, argTypeQuals));
+ if (getCodeGenOpts().EmitOpenCLArgMetadata)
+ Fn->setMetadata("kernel_arg_name",
+ llvm::MDNode::get(VMContext, argNames));
+}
+
/// Determines whether the language options require us to model
/// unwind exceptions. We treat -fexceptions as mandating this
/// except under the fragile ObjC ABI with only ObjC exceptions
@@ -1544,12 +1796,8 @@ void CodeGenModule::SetFunctionAttributes(GlobalDecl GD, llvm::Function *F,
const auto *FD = cast<FunctionDecl>(GD.getDecl());
- if (!IsIncompleteFunction) {
+ if (!IsIncompleteFunction)
SetLLVMFunctionAttributes(GD, getTypes().arrangeGlobalDeclaration(GD), F);
- // Setup target-specific attributes.
- if (F->isDeclaration())
- getTargetCodeGenInfo().setTargetAttributes(FD, F, *this);
- }
// Add the Returned attribute for "this", except for iOS 5 and earlier
// where substantial code, including the libstdc++ dylib, was compiled with
@@ -1569,6 +1817,10 @@ void CodeGenModule::SetFunctionAttributes(GlobalDecl GD, llvm::Function *F,
setLinkageForGV(F, FD);
setGVProperties(F, FD);
+ // Setup target-specific attributes.
+ if (!IsIncompleteFunction && F->isDeclaration())
+ getTargetCodeGenInfo().setTargetAttributes(FD, F, *this);
+
if (const auto *CSA = FD->getAttr<CodeSegAttr>())
F->setSection(CSA->getName());
else if (const auto *SA = FD->getAttr<SectionAttr>())
@@ -1603,6 +1855,23 @@ void CodeGenModule::SetFunctionAttributes(GlobalDecl GD, llvm::Function *F,
if (getLangOpts().OpenMP && FD->hasAttr<OMPDeclareSimdDeclAttr>())
getOpenMPRuntime().emitDeclareSimdFunction(FD, F);
+
+ if (const auto *CB = FD->getAttr<CallbackAttr>()) {
+ // Annotate the callback behavior as metadata:
+ // - The callback callee (as argument number).
+ // - The callback payloads (as argument numbers).
+ llvm::LLVMContext &Ctx = F->getContext();
+ llvm::MDBuilder MDB(Ctx);
+
+ // The payload indices are all but the first one in the encoding. The first
+ // identifies the callback callee.
+ int CalleeIdx = *CB->encoding_begin();
+ ArrayRef<int> PayloadIndices(CB->encoding_begin() + 1, CB->encoding_end());
+ F->addMetadata(llvm::LLVMContext::MD_callback,
+ *llvm::MDNode::get(Ctx, {MDB.createCallbackEncoding(
+ CalleeIdx, PayloadIndices,
+ /* VarArgsArePassed */ false)}));
+ }
}
void CodeGenModule::addUsedGlobal(llvm::GlobalValue *GV) {
@@ -1660,17 +1929,18 @@ void CodeGenModule::AddDetectMismatch(StringRef Name, StringRef Value) {
LinkerOptionsMetadata.push_back(llvm::MDNode::get(getLLVMContext(), MDOpts));
}
-void CodeGenModule::AddELFLibDirective(StringRef Lib) {
+void CodeGenModule::AddDependentLib(StringRef Lib) {
auto &C = getLLVMContext();
- LinkerOptionsMetadata.push_back(llvm::MDNode::get(
- C, {llvm::MDString::get(C, "lib"), llvm::MDString::get(C, Lib)}));
-}
+ if (getTarget().getTriple().isOSBinFormatELF()) {
+ ELFDependentLibraries.push_back(
+ llvm::MDNode::get(C, llvm::MDString::get(C, Lib)));
+ return;
+ }
-void CodeGenModule::AddDependentLib(StringRef Lib) {
llvm::SmallString<24> Opt;
getTargetCodeGenInfo().getDependentLibraryOption(Lib, Opt);
auto *MDOpts = llvm::MDString::get(getLLVMContext(), Opt);
- LinkerOptionsMetadata.push_back(llvm::MDNode::get(getLLVMContext(), MDOpts));
+ LinkerOptionsMetadata.push_back(llvm::MDNode::get(C, MDOpts));
}
/// Add link options implied by the given module, including modules
@@ -1693,7 +1963,6 @@ static void addLinkOptionsPostorder(CodeGenModule &CGM, Module *Mod,
// described by this module.
llvm::LLVMContext &Context = CGM.getLLVMContext();
bool IsELF = CGM.getTarget().getTriple().isOSBinFormatELF();
- bool IsPS4 = CGM.getTarget().getTriple().isPS4();
// For modules that use export_as for linking, use that module
// name instead.
@@ -1713,7 +1982,7 @@ static void addLinkOptionsPostorder(CodeGenModule &CGM, Module *Mod,
}
// Link against a library.
- if (IsELF && !IsPS4) {
+ if (IsELF) {
llvm::Metadata *Args[2] = {
llvm::MDString::get(Context, "lib"),
llvm::MDString::get(Context, Mod->LinkLibraries[I - 1].Library),
@@ -1970,9 +2239,11 @@ bool CodeGenModule::isInSanitizerBlacklist(llvm::GlobalVariable *GV,
SourceLocation Loc, QualType Ty,
StringRef Category) const {
// For now globals can be blacklisted only in ASan and KASan.
- const SanitizerMask EnabledAsanMask = LangOpts.Sanitize.Mask &
+ const SanitizerMask EnabledAsanMask =
+ LangOpts.Sanitize.Mask &
(SanitizerKind::Address | SanitizerKind::KernelAddress |
- SanitizerKind::HWAddress | SanitizerKind::KernelHWAddress);
+ SanitizerKind::HWAddress | SanitizerKind::KernelHWAddress |
+ SanitizerKind::MemTag);
if (!EnabledAsanMask)
return false;
const auto &SanitizerBL = getContext().getSanitizerBlacklist();
@@ -2146,7 +2417,8 @@ void CodeGenModule::EmitGlobal(GlobalDecl GD) {
if (!Global->hasAttr<CUDADeviceAttr>() &&
!Global->hasAttr<CUDAGlobalAttr>() &&
!Global->hasAttr<CUDAConstantAttr>() &&
- !Global->hasAttr<CUDASharedAttr>())
+ !Global->hasAttr<CUDASharedAttr>() &&
+ !(LangOpts.HIP && Global->hasAttr<HIPPinnedShadowAttr>()))
return;
} else {
// We need to emit host-side 'shadows' for all global
@@ -2173,6 +2445,10 @@ void CodeGenModule::EmitGlobal(GlobalDecl GD) {
if (MustBeEmitted(Global))
EmitOMPDeclareReduction(DRD);
return;
+ } else if (auto *DMD = dyn_cast<OMPDeclareMapperDecl>(Global)) {
+ if (MustBeEmitted(Global))
+ EmitOMPDeclareMapper(DMD);
+ return;
}
}
@@ -2202,13 +2478,19 @@ void CodeGenModule::EmitGlobal(GlobalDecl GD) {
// Emit declaration of the must-be-emitted declare target variable.
if (llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD)) {
- if (*Res == OMPDeclareTargetDeclAttr::MT_To) {
+ bool UnifiedMemoryEnabled =
+ getOpenMPRuntime().hasRequiresUnifiedSharedMemory();
+ if (*Res == OMPDeclareTargetDeclAttr::MT_To &&
+ !UnifiedMemoryEnabled) {
(void)GetAddrOfGlobalVar(VD);
} else {
- assert(*Res == OMPDeclareTargetDeclAttr::MT_Link &&
- "link claue expected.");
- (void)getOpenMPRuntime().getAddrOfDeclareTargetLink(VD);
+ assert(((*Res == OMPDeclareTargetDeclAttr::MT_Link) ||
+ (*Res == OMPDeclareTargetDeclAttr::MT_To &&
+ UnifiedMemoryEnabled)) &&
+ "Link clause or to clause with unified memory expected.");
+ (void)getOpenMPRuntime().getAddrOfDeclareTargetVar(VD);
}
+
return;
}
}
@@ -2265,35 +2547,36 @@ static bool HasNonDllImportDtor(QualType T) {
}
namespace {
- struct FunctionIsDirectlyRecursive :
- public RecursiveASTVisitor<FunctionIsDirectlyRecursive> {
+ struct FunctionIsDirectlyRecursive
+ : public ConstStmtVisitor<FunctionIsDirectlyRecursive, bool> {
const StringRef Name;
const Builtin::Context &BI;
- bool Result;
- FunctionIsDirectlyRecursive(StringRef N, const Builtin::Context &C) :
- Name(N), BI(C), Result(false) {
- }
- typedef RecursiveASTVisitor<FunctionIsDirectlyRecursive> Base;
+ FunctionIsDirectlyRecursive(StringRef N, const Builtin::Context &C)
+ : Name(N), BI(C) {}
- bool TraverseCallExpr(CallExpr *E) {
+ bool VisitCallExpr(const CallExpr *E) {
const FunctionDecl *FD = E->getDirectCallee();
if (!FD)
- return true;
- AsmLabelAttr *Attr = FD->getAttr<AsmLabelAttr>();
- if (Attr && Name == Attr->getLabel()) {
- Result = true;
return false;
- }
+ AsmLabelAttr *Attr = FD->getAttr<AsmLabelAttr>();
+ if (Attr && Name == Attr->getLabel())
+ return true;
unsigned BuiltinID = FD->getBuiltinID();
if (!BuiltinID || !BI.isLibFunction(BuiltinID))
- return true;
+ return false;
StringRef BuiltinName = BI.getName(BuiltinID);
if (BuiltinName.startswith("__builtin_") &&
Name == BuiltinName.slice(strlen("__builtin_"), StringRef::npos)) {
- Result = true;
- return false;
+ return true;
}
- return true;
+ return false;
+ }
+
+ bool VisitStmt(const Stmt *S) {
+ for (const Stmt *Child : S->children())
+ if (Child && this->Visit(Child))
+ return true;
+ return false;
}
};
@@ -2378,8 +2661,8 @@ CodeGenModule::isTriviallyRecursive(const FunctionDecl *FD) {
}
FunctionIsDirectlyRecursive Walker(Name, Context.BuiltinInfo);
- Walker.TraverseFunctionDecl(const_cast<FunctionDecl*>(FD));
- return Walker.Result;
+ const Stmt *Body = FD->getBody();
+ return Body ? Walker.Visit(Body) : false;
}
bool CodeGenModule::shouldEmitFunction(GlobalDecl GD) {
@@ -2447,13 +2730,19 @@ void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD, llvm::GlobalValue *GV) {
if (!shouldEmitFunction(GD))
return;
+ llvm::TimeTraceScope TimeScope("CodeGen Function", [&]() {
+ std::string Name;
+ llvm::raw_string_ostream OS(Name);
+ FD->getNameForDiagnostic(OS, getContext().getPrintingPolicy(),
+ /*Qualified=*/true);
+ return Name;
+ });
+
if (const auto *Method = dyn_cast<CXXMethodDecl>(D)) {
// Make sure to emit the definition(s) before we emit the thunks.
// This is necessary for the generation of certain thunks.
- if (const auto *CD = dyn_cast<CXXConstructorDecl>(Method))
- ABI->emitCXXStructor(CD, getFromCtorType(GD.getCtorType()));
- else if (const auto *DD = dyn_cast<CXXDestructorDecl>(Method))
- ABI->emitCXXStructor(DD, getFromDtorType(GD.getDtorType()));
+ if (isa<CXXConstructorDecl>(Method) || isa<CXXDestructorDecl>(Method))
+ ABI->emitCXXStructor(GD);
else if (FD->isMultiVersion())
EmitMultiVersionFunctionDefinition(GD, GV);
else
@@ -2537,10 +2826,9 @@ void CodeGenModule::emitMultiVersionFunctions() {
ResolverFunc->setComdat(
getModule().getOrInsertComdat(ResolverFunc->getName()));
- std::stable_sort(
- Options.begin(), Options.end(),
- [&TI](const CodeGenFunction::MultiVersionResolverOption &LHS,
- const CodeGenFunction::MultiVersionResolverOption &RHS) {
+ llvm::stable_sort(
+ Options, [&TI](const CodeGenFunction::MultiVersionResolverOption &LHS,
+ const CodeGenFunction::MultiVersionResolverOption &RHS) {
return TargetMVPriority(TI, LHS) > TargetMVPriority(TI, RHS);
});
CodeGenFunction CGF(*this);
@@ -2553,8 +2841,7 @@ void CodeGenModule::emitCPUDispatchDefinition(GlobalDecl GD) {
assert(FD && "Not a FunctionDecl?");
const auto *DD = FD->getAttr<CPUDispatchAttr>();
assert(DD && "Not a cpu_dispatch Function?");
- QualType CanonTy = Context.getCanonicalType(FD->getType());
- llvm::Type *DeclTy = getTypes().ConvertFunctionType(CanonTy, FD);
+ llvm::Type *DeclTy = getTypes().ConvertType(FD->getType());
if (const auto *CXXFD = dyn_cast<CXXMethodDecl>(FD)) {
const CGFunctionInfo &FInfo = getTypes().arrangeCXXMethodDeclaration(CXXFD);
@@ -2893,8 +3180,7 @@ llvm::Constant *CodeGenModule::GetAddrOfFunction(GlobalDecl GD,
// If there was no specific requested type, just convert it now.
if (!Ty) {
const auto *FD = cast<FunctionDecl>(GD.getDecl());
- auto CanonTy = Context.getCanonicalType(FD->getType());
- Ty = getTypes().ConvertFunctionType(CanonTy, FD);
+ Ty = getTypes().ConvertType(FD->getType());
}
// Devirtualized destructor calls may come through here instead of via
@@ -2953,7 +3239,7 @@ GetRuntimeFunctionDecl(ASTContext &C, StringRef Name) {
/// CreateRuntimeFunction - Create a new runtime function with the specified
/// type and name.
-llvm::Constant *
+llvm::FunctionCallee
CodeGenModule::CreateRuntimeFunction(llvm::FunctionType *FTy, StringRef Name,
llvm::AttributeList ExtraAttrs,
bool Local) {
@@ -2966,9 +3252,13 @@ CodeGenModule::CreateRuntimeFunction(llvm::FunctionType *FTy, StringRef Name,
if (F->empty()) {
F->setCallingConv(getRuntimeCC());
- if (!Local && getTriple().isOSBinFormatCOFF() &&
- !getCodeGenOpts().LTOVisibilityPublicStd &&
- !getTriple().isWindowsGNUEnvironment()) {
+ // In Windows Itanium environments, try to mark runtime functions
+ // dllimport. For Mingw and MSVC, don't. We don't really know if the user
+ // will link their standard library statically or dynamically. Marking
+ // functions imported when they are not imported can cause linker errors
+ // and warnings.
+ if (!Local && getTriple().isWindowsItaniumEnvironment() &&
+ !getCodeGenOpts().LTOVisibilityPublicStd) {
const FunctionDecl *FD = GetRuntimeFunctionDecl(Context, Name);
if (!FD || FD->hasAttr<DLLImportAttr>()) {
F->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
@@ -2979,15 +3269,7 @@ CodeGenModule::CreateRuntimeFunction(llvm::FunctionType *FTy, StringRef Name,
}
}
- return C;
-}
-
-/// CreateBuiltinFunction - Create a new builtin function with the specified
-/// type and name.
-llvm::Constant *
-CodeGenModule::CreateBuiltinFunction(llvm::FunctionType *FTy, StringRef Name,
- llvm::AttributeList ExtraAttrs) {
- return CreateRuntimeFunction(FTy, Name, ExtraAttrs, true);
+ return {FTy, C};
}
/// isTypeConstant - Determine whether an object of this type can be emitted
@@ -3199,6 +3481,9 @@ CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName,
return getTargetCodeGenInfo().performAddrSpaceCast(*this, GV, AddrSpace,
ExpectedAS, Ty);
+ if (GV->isDeclaration())
+ getTargetCodeGenInfo().setTargetAttributes(D, GV, *this);
+
return GV;
}
@@ -3206,15 +3491,8 @@ llvm::Constant *
CodeGenModule::GetAddrOfGlobal(GlobalDecl GD,
ForDefinition_t IsForDefinition) {
const Decl *D = GD.getDecl();
- if (isa<CXXConstructorDecl>(D))
- return getAddrOfCXXStructor(cast<CXXConstructorDecl>(D),
- getFromCtorType(GD.getCtorType()),
- /*FnInfo=*/nullptr, /*FnType=*/nullptr,
- /*DontDefer=*/false, IsForDefinition);
- else if (isa<CXXDestructorDecl>(D))
- return getAddrOfCXXStructor(cast<CXXDestructorDecl>(D),
- getFromDtorType(GD.getDtorType()),
- /*FnInfo=*/nullptr, /*FnType=*/nullptr,
+ if (isa<CXXConstructorDecl>(D) || isa<CXXDestructorDecl>(D))
+ return getAddrOfCXXStructor(GD, /*FnInfo=*/nullptr, /*FnType=*/nullptr,
/*DontDefer=*/false, IsForDefinition);
else if (isa<CXXMethodDecl>(D)) {
auto FInfo = &getTypes().arrangeCXXMethodDeclaration(
@@ -3301,8 +3579,12 @@ llvm::Constant *CodeGenModule::GetAddrOfGlobalVar(const VarDecl *D,
llvm::Constant *
CodeGenModule::CreateRuntimeVariable(llvm::Type *Ty,
StringRef Name) {
- auto *Ret =
- GetOrCreateLLVMGlobal(Name, llvm::PointerType::getUnqual(Ty), nullptr);
+ auto PtrTy =
+ getContext().getLangOpts().OpenCL
+ ? llvm::PointerType::get(
+ Ty, getContext().getTargetAddressSpace(LangAS::opencl_global))
+ : llvm::PointerType::getUnqual(Ty);
+ auto *Ret = GetOrCreateLLVMGlobal(Name, PtrTy, nullptr);
setDSOLocal(cast<llvm::GlobalValue>(Ret->stripPointerCasts()));
return Ret;
}
@@ -3359,6 +3641,11 @@ LangAS CodeGenModule::GetGlobalVarAddressSpace(const VarDecl *D) {
return LangAS::cuda_device;
}
+ if (LangOpts.OpenMP) {
+ LangAS AS;
+ if (OpenMPRuntime->hasAllocateAttributeForGlobalVar(D, AS))
+ return AS;
+ }
return getTargetCodeGenInfo().getGlobalVarAddressSpace(*this, D);
}
@@ -3432,6 +3719,11 @@ static bool shouldBeInCOMDAT(CodeGenModule &CGM, const Decl &D) {
if (!CGM.supportsCOMDAT())
return false;
+ // Do not set COMDAT attribute for CUDA/HIP stub functions to prevent
+ // them being "merged" by the COMDAT Folding linker optimization.
+ if (D.hasAttr<CUDAGlobalAttr>())
+ return false;
+
if (D.hasAttr<SelectAnyAttr>())
return true;
@@ -3496,7 +3788,12 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D,
!getLangOpts().CUDAIsDevice &&
(D->hasAttr<CUDAConstantAttr>() || D->hasAttr<CUDADeviceAttr>() ||
D->hasAttr<CUDASharedAttr>());
- if (getLangOpts().CUDA && (IsCUDASharedVar || IsCUDAShadowVar))
+ // HIP pinned shadow of initialized host-side global variables are also
+ // left undefined.
+ bool IsHIPPinnedShadowVar =
+ getLangOpts().CUDAIsDevice && D->hasAttr<HIPPinnedShadowAttr>();
+ if (getLangOpts().CUDA &&
+ (IsCUDASharedVar || IsCUDAShadowVar || IsHIPPinnedShadowVar))
Init = llvm::UndefValue::get(getTypes().ConvertType(ASTTy));
else if (!InitExpr) {
// This is a tentative definition; tentative definitions are
@@ -3599,14 +3896,16 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D,
// / cudaMemcpyToSymbol() / cudaMemcpyFromSymbol())."
if (GV && LangOpts.CUDA) {
if (LangOpts.CUDAIsDevice) {
- if (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>())
+ if (Linkage != llvm::GlobalValue::InternalLinkage &&
+ (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>()))
GV->setExternallyInitialized(true);
} else {
// Host-side shadows of external declarations of device-side
// global variables become internal definitions. These have to
// be internal in order to prevent name conflicts with global
// host variables with the same name in a different TUs.
- if (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>()) {
+ if (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>() ||
+ D->hasAttr<HIPPinnedShadowAttr>()) {
Linkage = llvm::GlobalValue::InternalLinkage;
// Shadow variables and their properties must be registered
@@ -3619,7 +3918,7 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D,
// Extern global variables will be registered in the TU where they are
// defined.
if (!D->hasExternalStorage())
- getCUDARuntime().registerDeviceVar(*GV, Flags);
+ getCUDARuntime().registerDeviceVar(D, *GV, Flags);
} else if (D->hasAttr<CUDASharedAttr>())
// __shared__ variables are odd. Shadows do get created, but
// they are not registered with the CUDA runtime, so they
@@ -3630,7 +3929,8 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D,
}
}
- GV->setInitializer(Init);
+ if (!IsHIPPinnedShadowVar)
+ GV->setInitializer(Init);
if (emitter) emitter->finalize(GV);
// If it is safe to mark the global 'constant', do so now.
@@ -3762,13 +4062,15 @@ static bool isVarDeclStrongDefinition(const ASTContext &Context,
}
}
- // Microsoft's link.exe doesn't support alignments greater than 32 for common
- // symbols, so symbols with greater alignment requirements cannot be common.
+ // Microsoft's link.exe doesn't support alignments greater than 32 bytes for
+ // common symbols, so symbols with greater alignment requirements cannot be
+ // common.
// Other COFF linkers (ld.bfd and LLD) support arbitrary power-of-two
// alignments for common symbols via the aligncomm directive, so this
// restriction only applies to MSVC environments.
if (Context.getTargetInfo().getTriple().isKnownWindowsMSVCEnvironment() &&
- Context.getTypeAlignIfKnown(D->getType()) > 32)
+ Context.getTypeAlignIfKnown(D->getType()) >
+ Context.toBits(CharUnits::fromQuantity(32)))
return true;
return false;
@@ -3877,9 +4179,10 @@ static void replaceUsesOfNonProtoConstant(llvm::Constant *old,
}
// Recognize calls to the function.
- llvm::CallSite callSite(user);
+ llvm::CallBase *callSite = dyn_cast<llvm::CallBase>(user);
if (!callSite) continue;
- if (!callSite.isCallee(&*use)) continue;
+ if (!callSite->isCallee(&*use))
+ continue;
// If the return types don't match exactly, then we can't
// transform this call unless it's dead.
@@ -3888,18 +4191,19 @@ static void replaceUsesOfNonProtoConstant(llvm::Constant *old,
// Get the call site's attribute list.
SmallVector<llvm::AttributeSet, 8> newArgAttrs;
- llvm::AttributeList oldAttrs = callSite.getAttributes();
+ llvm::AttributeList oldAttrs = callSite->getAttributes();
// If the function was passed too few arguments, don't transform.
unsigned newNumArgs = newFn->arg_size();
- if (callSite.arg_size() < newNumArgs) continue;
+ if (callSite->arg_size() < newNumArgs)
+ continue;
// If extra arguments were passed, we silently drop them.
// If any of the types mismatch, we don't transform.
unsigned argNo = 0;
bool dontTransform = false;
for (llvm::Argument &A : newFn->args()) {
- if (callSite.getArgument(argNo)->getType() != A.getType()) {
+ if (callSite->getArgOperand(argNo)->getType() != A.getType()) {
dontTransform = true;
break;
}
@@ -3913,35 +4217,33 @@ static void replaceUsesOfNonProtoConstant(llvm::Constant *old,
// Okay, we can transform this. Create the new call instruction and copy
// over the required information.
- newArgs.append(callSite.arg_begin(), callSite.arg_begin() + argNo);
+ newArgs.append(callSite->arg_begin(), callSite->arg_begin() + argNo);
// Copy over any operand bundles.
- callSite.getOperandBundlesAsDefs(newBundles);
+ callSite->getOperandBundlesAsDefs(newBundles);
- llvm::CallSite newCall;
- if (callSite.isCall()) {
- newCall = llvm::CallInst::Create(newFn, newArgs, newBundles, "",
- callSite.getInstruction());
+ llvm::CallBase *newCall;
+ if (dyn_cast<llvm::CallInst>(callSite)) {
+ newCall =
+ llvm::CallInst::Create(newFn, newArgs, newBundles, "", callSite);
} else {
- auto *oldInvoke = cast<llvm::InvokeInst>(callSite.getInstruction());
- newCall = llvm::InvokeInst::Create(newFn,
- oldInvoke->getNormalDest(),
- oldInvoke->getUnwindDest(),
- newArgs, newBundles, "",
- callSite.getInstruction());
+ auto *oldInvoke = cast<llvm::InvokeInst>(callSite);
+ newCall = llvm::InvokeInst::Create(newFn, oldInvoke->getNormalDest(),
+ oldInvoke->getUnwindDest(), newArgs,
+ newBundles, "", callSite);
}
newArgs.clear(); // for the next iteration
if (!newCall->getType()->isVoidTy())
- newCall->takeName(callSite.getInstruction());
- newCall.setAttributes(llvm::AttributeList::get(
+ newCall->takeName(callSite);
+ newCall->setAttributes(llvm::AttributeList::get(
newFn->getContext(), oldAttrs.getFnAttributes(),
oldAttrs.getRetAttributes(), newArgAttrs));
- newCall.setCallingConv(callSite.getCallingConv());
+ newCall->setCallingConv(callSite->getCallingConv());
// Finally, remove the old call, replacing any uses with the new one.
if (!callSite->use_empty())
- callSite->replaceAllUsesWith(newCall.getInstruction());
+ callSite->replaceAllUsesWith(newCall);
// Copy debug location attached to CI.
if (callSite->getDebugLoc())
@@ -4373,9 +4675,12 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
GV = Fields.finishAndCreateGlobal("_unnamed_cfstring_", Alignment,
/*isConstant=*/false,
llvm::GlobalVariable::PrivateLinkage);
+ GV->addAttribute("objc_arc_inert");
switch (Triple.getObjectFormat()) {
case llvm::Triple::UnknownObjectFormat:
llvm_unreachable("unknown file format");
+ case llvm::Triple::XCOFF:
+ llvm_unreachable("XCOFF is not yet implemented");
case llvm::Triple::COFF:
case llvm::Triple::ELF:
case llvm::Triple::Wasm:
@@ -4504,7 +4809,8 @@ CodeGenModule::GetAddrOfConstantStringFromLiteral(const StringLiteral *S,
if (auto GV = *Entry) {
if (Alignment.getQuantity() > GV->getAlignment())
GV->setAlignment(Alignment.getQuantity());
- return ConstantAddress(GV, Alignment);
+ return ConstantAddress(castStringLiteralToDefaultAddressSpace(*this, GV),
+ Alignment);
}
}
@@ -4566,7 +4872,8 @@ ConstantAddress CodeGenModule::GetAddrOfConstantCString(
if (auto GV = *Entry) {
if (Alignment.getQuantity() > GV->getAlignment())
GV->setAlignment(Alignment.getQuantity());
- return ConstantAddress(GV, Alignment);
+ return ConstantAddress(castStringLiteralToDefaultAddressSpace(*this, GV),
+ Alignment);
}
}
@@ -4615,7 +4922,7 @@ ConstantAddress CodeGenModule::GetAddrOfGlobalTemporary(
// evaluating the initializer if the surrounding constant expression
// modifies the temporary.
Value = getContext().getMaterializedTemporaryValue(E, false);
- if (Value && Value->isUninit())
+ if (Value && Value->isAbsent())
Value = nullptr;
}
@@ -4861,6 +5168,7 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
case Decl::UsingShadow:
case Decl::ClassTemplate:
case Decl::VarTemplate:
+ case Decl::Concept:
case Decl::VarTemplatePartialSpecialization:
case Decl::FunctionTemplate:
case Decl::TypeAliasTemplate:
@@ -4943,10 +5251,6 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
AppendLinkerOptions(PCD->getArg());
break;
case PCK_Lib:
- if (getTarget().getTriple().isOSBinFormatELF() &&
- !getTarget().getTriple().isPS4())
- AddELFLibDirective(PCD->getArg());
- else
AddDependentLib(PCD->getArg());
break;
case PCK_Compiler:
@@ -5030,10 +5334,17 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
EmitOMPThreadPrivateDecl(cast<OMPThreadPrivateDecl>(D));
break;
+ case Decl::OMPAllocate:
+ break;
+
case Decl::OMPDeclareReduction:
EmitOMPDeclareReduction(cast<OMPDeclareReductionDecl>(D));
break;
+ case Decl::OMPDeclareMapper:
+ EmitOMPDeclareMapper(cast<OMPDeclareMapperDecl>(D));
+ break;
+
case Decl::OMPRequires:
EmitOMPRequiresDecl(cast<OMPRequiresDecl>(D));
break;
diff --git a/lib/CodeGen/CodeGenModule.h b/lib/CodeGen/CodeGenModule.h
index 75679d11c13c..95964afed4ec 100644
--- a/lib/CodeGen/CodeGenModule.h
+++ b/lib/CodeGen/CodeGenModule.h
@@ -1,9 +1,8 @@
//===--- CodeGenModule.h - Per-Module state for LLVM CodeGen ----*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -120,90 +119,93 @@ struct ObjCEntrypoints {
ObjCEntrypoints() { memset(this, 0, sizeof(*this)); }
/// void objc_alloc(id);
- llvm::Constant *objc_alloc;
+ llvm::FunctionCallee objc_alloc;
/// void objc_allocWithZone(id);
- llvm::Constant *objc_allocWithZone;
+ llvm::FunctionCallee objc_allocWithZone;
+
+ /// void objc_alloc_init(id);
+ llvm::FunctionCallee objc_alloc_init;
/// void objc_autoreleasePoolPop(void*);
- llvm::Constant *objc_autoreleasePoolPop;
+ llvm::FunctionCallee objc_autoreleasePoolPop;
/// void objc_autoreleasePoolPop(void*);
/// Note this method is used when we are using exception handling
- llvm::Constant *objc_autoreleasePoolPopInvoke;
+ llvm::FunctionCallee objc_autoreleasePoolPopInvoke;
/// void *objc_autoreleasePoolPush(void);
- llvm::Constant *objc_autoreleasePoolPush;
+ llvm::Function *objc_autoreleasePoolPush;
/// id objc_autorelease(id);
- llvm::Constant *objc_autorelease;
+ llvm::Function *objc_autorelease;
/// id objc_autorelease(id);
/// Note this is the runtime method not the intrinsic.
- llvm::Constant *objc_autoreleaseRuntimeFunction;
+ llvm::FunctionCallee objc_autoreleaseRuntimeFunction;
/// id objc_autoreleaseReturnValue(id);
- llvm::Constant *objc_autoreleaseReturnValue;
+ llvm::Function *objc_autoreleaseReturnValue;
/// void objc_copyWeak(id *dest, id *src);
- llvm::Constant *objc_copyWeak;
+ llvm::Function *objc_copyWeak;
/// void objc_destroyWeak(id*);
- llvm::Constant *objc_destroyWeak;
+ llvm::Function *objc_destroyWeak;
/// id objc_initWeak(id*, id);
- llvm::Constant *objc_initWeak;
+ llvm::Function *objc_initWeak;
/// id objc_loadWeak(id*);
- llvm::Constant *objc_loadWeak;
+ llvm::Function *objc_loadWeak;
/// id objc_loadWeakRetained(id*);
- llvm::Constant *objc_loadWeakRetained;
+ llvm::Function *objc_loadWeakRetained;
/// void objc_moveWeak(id *dest, id *src);
- llvm::Constant *objc_moveWeak;
+ llvm::Function *objc_moveWeak;
/// id objc_retain(id);
- llvm::Constant *objc_retain;
+ llvm::Function *objc_retain;
/// id objc_retain(id);
/// Note this is the runtime method not the intrinsic.
- llvm::Constant *objc_retainRuntimeFunction;
+ llvm::FunctionCallee objc_retainRuntimeFunction;
/// id objc_retainAutorelease(id);
- llvm::Constant *objc_retainAutorelease;
+ llvm::Function *objc_retainAutorelease;
/// id objc_retainAutoreleaseReturnValue(id);
- llvm::Constant *objc_retainAutoreleaseReturnValue;
+ llvm::Function *objc_retainAutoreleaseReturnValue;
/// id objc_retainAutoreleasedReturnValue(id);
- llvm::Constant *objc_retainAutoreleasedReturnValue;
+ llvm::Function *objc_retainAutoreleasedReturnValue;
/// id objc_retainBlock(id);
- llvm::Constant *objc_retainBlock;
+ llvm::Function *objc_retainBlock;
/// void objc_release(id);
- llvm::Constant *objc_release;
+ llvm::Function *objc_release;
/// void objc_release(id);
/// Note this is the runtime method not the intrinsic.
- llvm::Constant *objc_releaseRuntimeFunction;
+ llvm::FunctionCallee objc_releaseRuntimeFunction;
/// void objc_storeStrong(id*, id);
- llvm::Constant *objc_storeStrong;
+ llvm::Function *objc_storeStrong;
/// id objc_storeWeak(id*, id);
- llvm::Constant *objc_storeWeak;
+ llvm::Function *objc_storeWeak;
/// id objc_unsafeClaimAutoreleasedReturnValue(id);
- llvm::Constant *objc_unsafeClaimAutoreleasedReturnValue;
+ llvm::Function *objc_unsafeClaimAutoreleasedReturnValue;
/// A void(void) inline asm to use to mark that the return value of
/// a call will be immediately retain.
llvm::InlineAsm *retainAutoreleasedReturnValueMarker;
/// void clang.arc.use(...);
- llvm::Constant *clang_arc_use;
+ llvm::Function *clang_arc_use;
};
/// This class records statistics on instrumentation based profiling.
@@ -252,7 +254,8 @@ public:
/// have different helper functions.
CharUnits Alignment;
- BlockByrefHelpers(CharUnits alignment) : Alignment(alignment) {}
+ BlockByrefHelpers(CharUnits alignment)
+ : CopyHelper(nullptr), DisposeHelper(nullptr), Alignment(alignment) {}
BlockByrefHelpers(const BlockByrefHelpers &) = default;
virtual ~BlockByrefHelpers();
@@ -359,6 +362,10 @@ private:
llvm::SmallVector<std::pair<llvm::GlobalValue *, llvm::Constant *>, 8>
GlobalValReplacements;
+ /// Variables for which we've emitted globals containing their constant
+ /// values along with the corresponding globals, for opportunistic reuse.
+ llvm::DenseMap<const VarDecl*, llvm::GlobalVariable*> InitializerConstants;
+
/// Set of global decls for which we already diagnosed mangled name conflict.
/// Required to not issue a warning (on a mangling conflict) multiple times
/// for the same decl.
@@ -452,7 +459,9 @@ private:
SmallVector<GlobalInitData, 8> PrioritizedCXXGlobalInits;
/// Global destructor functions and arguments that need to run on termination.
- std::vector<std::pair<llvm::WeakTrackingVH, llvm::Constant *>> CXXGlobalDtors;
+ std::vector<
+ std::tuple<llvm::FunctionType *, llvm::WeakTrackingVH, llvm::Constant *>>
+ CXXGlobalDtors;
/// The complete set of modules that has been imported.
llvm::SetVector<clang::Module *> ImportedModules;
@@ -461,9 +470,12 @@ private:
/// have been emitted.
llvm::SmallPtrSet<clang::Module *, 16> EmittedModuleInitializers;
- /// A vector of metadata strings.
+ /// A vector of metadata strings for linker options.
SmallVector<llvm::MDNode *, 16> LinkerOptionsMetadata;
+ /// A vector of metadata strings for dependent libraries for ELF.
+ SmallVector<llvm::MDNode *, 16> ELFDependentLibraries;
+
/// @name Cache for Objective-C runtime types
/// @{
@@ -501,8 +513,8 @@ private:
llvm::Constant *NSConcreteGlobalBlock = nullptr;
llvm::Constant *NSConcreteStackBlock = nullptr;
- llvm::Constant *BlockObjectAssign = nullptr;
- llvm::Constant *BlockObjectDispose = nullptr;
+ llvm::FunctionCallee BlockObjectAssign = nullptr;
+ llvm::FunctionCallee BlockObjectDispose = nullptr;
llvm::Type *BlockDescriptorType = nullptr;
llvm::Type *GenericBlockLiteralType = nullptr;
@@ -512,10 +524,10 @@ private:
} Block;
/// void @llvm.lifetime.start(i64 %size, i8* nocapture <ptr>)
- llvm::Constant *LifetimeStartFn = nullptr;
+ llvm::Function *LifetimeStartFn = nullptr;
/// void @llvm.lifetime.end(i64 %size, i8* nocapture <ptr>)
- llvm::Constant *LifetimeEndFn = nullptr;
+ llvm::Function *LifetimeEndFn = nullptr;
GlobalDecl initializedGlobalDecl;
@@ -586,7 +598,7 @@ public:
// Version checking function, used to implement ObjC's @available:
// i32 @__isOSVersionAtLeast(i32, i32, i32)
- llvm::Constant *IsOSVersionAtLeastFn = nullptr;
+ llvm::FunctionCallee IsOSVersionAtLeastFn = nullptr;
InstrProfStats &getPGOStats() { return PGOStats; }
llvm::IndexedInstrProfReader *getPGOReader() const { return PGOReader.get(); }
@@ -615,6 +627,9 @@ public:
StaticLocalDeclGuardMap[D] = C;
}
+ Address createUnnamedGlobalFrom(const VarDecl &D, llvm::Constant *Constant,
+ CharUnits Align);
+
bool lookupRepresentativeDecl(StringRef MangledName,
GlobalDecl &Result) const;
@@ -751,9 +766,6 @@ public:
/// Set the visibility for the given LLVM GlobalValue.
void setGlobalVisibility(llvm::GlobalValue *GV, const NamedDecl *D) const;
- void setGlobalVisibilityAndLocal(llvm::GlobalValue *GV,
- const NamedDecl *D) const;
-
void setDSOLocal(llvm::GlobalValue *GV) const;
void setDLLImportDLLExport(llvm::GlobalValue *GV, GlobalDecl D) const;
@@ -763,6 +775,8 @@ public:
void setGVProperties(llvm::GlobalValue *GV, GlobalDecl GD) const;
void setGVProperties(llvm::GlobalValue *GV, const NamedDecl *D) const;
+ void setGVPropertiesAux(llvm::GlobalValue *GV, const NamedDecl *D) const;
+
/// Set the TLS mode for the given LLVM GlobalValue for the thread-local
/// variable declaration D.
void setTLSMode(llvm::GlobalValue *GV, const VarDecl &D) const;
@@ -950,16 +964,24 @@ public:
// Produce code for this constructor/destructor. This method doesn't try
// to apply any ABI rules about which other constructors/destructors
// are needed or if they are alias to each other.
- llvm::Function *codegenCXXStructor(const CXXMethodDecl *MD,
- StructorType Type);
+ llvm::Function *codegenCXXStructor(GlobalDecl GD);
/// Return the address of the constructor/destructor of the given type.
llvm::Constant *
- getAddrOfCXXStructor(const CXXMethodDecl *MD, StructorType Type,
- const CGFunctionInfo *FnInfo = nullptr,
+ getAddrOfCXXStructor(GlobalDecl GD, const CGFunctionInfo *FnInfo = nullptr,
llvm::FunctionType *FnType = nullptr,
bool DontDefer = false,
- ForDefinition_t IsForDefinition = NotForDefinition);
+ ForDefinition_t IsForDefinition = NotForDefinition) {
+ return cast<llvm::Constant>(getAddrAndTypeOfCXXStructor(GD, FnInfo, FnType,
+ DontDefer,
+ IsForDefinition)
+ .getCallee());
+ }
+
+ llvm::FunctionCallee getAddrAndTypeOfCXXStructor(
+ GlobalDecl GD, const CGFunctionInfo *FnInfo = nullptr,
+ llvm::FunctionType *FnType = nullptr, bool DontDefer = false,
+ ForDefinition_t IsForDefinition = NotForDefinition);
/// Given a builtin id for a function like "__builtin_fabsf", return a
/// Function* for "fabsf".
@@ -999,20 +1021,18 @@ public:
void addCompilerUsedGlobal(llvm::GlobalValue *GV);
/// Add a destructor and object to add to the C++ global destructor function.
- void AddCXXDtorEntry(llvm::Constant *DtorFn, llvm::Constant *Object) {
- CXXGlobalDtors.emplace_back(DtorFn, Object);
+ void AddCXXDtorEntry(llvm::FunctionCallee DtorFn, llvm::Constant *Object) {
+ CXXGlobalDtors.emplace_back(DtorFn.getFunctionType(), DtorFn.getCallee(),
+ Object);
}
- /// Create a new runtime function with the specified type and name.
- llvm::Constant *
+ /// Create or return a runtime function declaration with the specified type
+ /// and name.
+ llvm::FunctionCallee
CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name,
llvm::AttributeList ExtraAttrs = llvm::AttributeList(),
bool Local = false);
- /// Create a new compiler builtin function with the specified type and name.
- llvm::Constant *
- CreateBuiltinFunction(llvm::FunctionType *Ty, StringRef Name,
- llvm::AttributeList ExtraAttrs = llvm::AttributeList());
/// Create a new runtime global variable with the specified type and name.
llvm::Constant *CreateRuntimeVariable(llvm::Type *Ty,
StringRef Name);
@@ -1022,13 +1042,13 @@ public:
llvm::Constant *getNSConcreteGlobalBlock();
llvm::Constant *getNSConcreteStackBlock();
- llvm::Constant *getBlockObjectAssign();
- llvm::Constant *getBlockObjectDispose();
+ llvm::FunctionCallee getBlockObjectAssign();
+ llvm::FunctionCallee getBlockObjectDispose();
///@}
- llvm::Constant *getLLVMLifetimeStartFn();
- llvm::Constant *getLLVMLifetimeEndFn();
+ llvm::Function *getLLVMLifetimeStartFn();
+ llvm::Function *getLLVMLifetimeEndFn();
// Make sure that this type is translated.
void UpdateCompletedType(const TagDecl *TD);
@@ -1142,11 +1162,9 @@ public:
/// Appends a detect mismatch command to the linker options.
void AddDetectMismatch(StringRef Name, StringRef Value);
- /// Appends a dependent lib to the "llvm.linker.options" metadata
- /// value.
+ /// Appends a dependent lib to the appropriate metadata value.
void AddDependentLib(StringRef Lib);
- void AddELFLibDirective(StringRef Lib);
llvm::GlobalVariable::LinkageTypes getFunctionLinkage(GlobalDecl GD);
@@ -1244,6 +1262,10 @@ public:
void EmitOMPDeclareReduction(const OMPDeclareReductionDecl *D,
CodeGenFunction *CGF = nullptr);
+ /// Emit a code for declare mapper construct.
+ void EmitOMPDeclareMapper(const OMPDeclareMapperDecl *D,
+ CodeGenFunction *CGF = nullptr);
+
/// Emit a code for requires directive.
/// \param D Requires declaration
void EmitOMPRequiresDecl(const OMPRequiresDecl *D);
@@ -1294,13 +1316,27 @@ public:
getMostBaseClasses(const CXXRecordDecl *RD);
/// Get the declaration of std::terminate for the platform.
- llvm::Constant *getTerminateFn();
+ llvm::FunctionCallee getTerminateFn();
llvm::SanitizerStatReport &getSanStats();
llvm::Value *
createOpenCLIntToSamplerConversion(const Expr *E, CodeGenFunction &CGF);
+ /// OpenCL v1.2 s5.6.4.6 allows the compiler to store kernel argument
+ /// information in the program executable. The argument information stored
+ /// includes the argument name, its type, the address and access qualifiers
+ /// used. This helper can be used to generate metadata for source code kernel
+ /// function as well as generated implicitly kernels. If a kernel is generated
+ /// implicitly null value has to be passed to the last two parameters,
+ /// otherwise all parameters must have valid non-null values.
+ /// \param FN is a pointer to IR function being generated.
+ /// \param FD is a pointer to function declaration if any.
+ /// \param CGF is a pointer to CodeGenFunction that generates this function.
+ void GenOpenCLArgMetadata(llvm::Function *FN,
+ const FunctionDecl *FD = nullptr,
+ CodeGenFunction *CGF = nullptr);
+
/// Get target specific null pointer.
/// \param T is the LLVM type of the null pointer.
/// \param QT is the clang QualType of the null pointer.
diff --git a/lib/CodeGen/CodeGenPGO.cpp b/lib/CodeGen/CodeGenPGO.cpp
index 776060743a63..d10a321dc3d7 100644
--- a/lib/CodeGen/CodeGenPGO.cpp
+++ b/lib/CodeGen/CodeGenPGO.cpp
@@ -1,9 +1,8 @@
//===--- CodeGenPGO.cpp - PGO Instrumentation for LLVM CodeGen --*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -772,14 +771,14 @@ void CodeGenPGO::assignRegionCounters(GlobalDecl GD, llvm::Function *Fn) {
// If so, instrument only base variant, others are implemented by delegation
// to the base one, it would be counted twice otherwise.
if (CGM.getTarget().getCXXABI().hasConstructorVariants()) {
- if (isa<CXXDestructorDecl>(D) && GD.getDtorType() != Dtor_Base)
- return;
-
if (const auto *CCD = dyn_cast<CXXConstructorDecl>(D))
if (GD.getCtorType() != Ctor_Base &&
CodeGenFunction::IsConstructorDelegationValid(CCD))
return;
}
+ if (isa<CXXDestructorDecl>(D) && GD.getDtorType() != Dtor_Base)
+ return;
+
CGM.ClearUnusedCoverageMapping(D);
setFuncName(Fn);
diff --git a/lib/CodeGen/CodeGenPGO.h b/lib/CodeGen/CodeGenPGO.h
index 120ab651a4a8..2e740f789243 100644
--- a/lib/CodeGen/CodeGenPGO.h
+++ b/lib/CodeGen/CodeGenPGO.h
@@ -1,9 +1,8 @@
//===--- CodeGenPGO.h - PGO Instrumentation for LLVM CodeGen ----*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/CodeGen/CodeGenTBAA.cpp b/lib/CodeGen/CodeGenTBAA.cpp
index 27d39716d22f..09de9591de7e 100644
--- a/lib/CodeGen/CodeGenTBAA.cpp
+++ b/lib/CodeGen/CodeGenTBAA.cpp
@@ -1,9 +1,8 @@
-//===--- CodeGenTypes.cpp - TBAA information for LLVM CodeGen -------------===//
+//===-- CodeGenTBAA.cpp - TBAA information for LLVM CodeGen ---------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -258,6 +257,8 @@ CodeGenTBAA::CollectFields(uint64_t BaseOffset,
unsigned idx = 0;
for (RecordDecl::field_iterator i = RD->field_begin(),
e = RD->field_end(); i != e; ++i, ++idx) {
+ if ((*i)->isZeroSize(Context) || (*i)->isUnnamedBitfield())
+ continue;
uint64_t Offset = BaseOffset +
Layout.getFieldOffset(idx) / Context.getCharWidth();
QualType FieldQTy = i->getType();
@@ -298,6 +299,8 @@ llvm::MDNode *CodeGenTBAA::getBaseTypeInfoHelper(const Type *Ty) {
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
SmallVector<llvm::MDBuilder::TBAAStructField, 4> Fields;
for (FieldDecl *Field : RD->fields()) {
+ if (Field->isZeroSize(Context) || Field->isUnnamedBitfield())
+ continue;
QualType FieldQTy = Field->getType();
llvm::MDNode *TypeNode = isValidBaseType(FieldQTy) ?
getBaseTypeInfo(FieldQTy) : getTypeInfo(FieldQTy);
diff --git a/lib/CodeGen/CodeGenTBAA.h b/lib/CodeGen/CodeGenTBAA.h
index 86ba407c05c6..e8e006f41616 100644
--- a/lib/CodeGen/CodeGenTBAA.h
+++ b/lib/CodeGen/CodeGenTBAA.h
@@ -1,9 +1,8 @@
//===--- CodeGenTBAA.h - TBAA information for LLVM CodeGen ------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/CodeGen/CodeGenTypeCache.h b/lib/CodeGen/CodeGenTypeCache.h
index 901aed6c00b2..ed4b773afd13 100644
--- a/lib/CodeGen/CodeGenTypeCache.h
+++ b/lib/CodeGen/CodeGenTypeCache.h
@@ -1,9 +1,8 @@
//===--- CodeGenTypeCache.h - Commonly used LLVM types and info -*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/CodeGen/CodeGenTypes.cpp b/lib/CodeGen/CodeGenTypes.cpp
index 2acf1ac16180..79b29b3d916f 100644
--- a/lib/CodeGen/CodeGenTypes.cpp
+++ b/lib/CodeGen/CodeGenTypes.cpp
@@ -1,9 +1,8 @@
//===--- CodeGenTypes.cpp - Type translation for LLVM CodeGen -------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -309,8 +308,7 @@ static llvm::Type *getTypeForFormat(llvm::LLVMContext &VMContext,
llvm_unreachable("Unknown float format!");
}
-llvm::Type *CodeGenTypes::ConvertFunctionType(QualType QFT,
- const FunctionDecl *FD) {
+llvm::Type *CodeGenTypes::ConvertFunctionTypeInternal(QualType QFT) {
assert(QFT.isCanonical());
const Type *Ty = QFT.getTypePtr();
const FunctionType *FT = cast<FunctionType>(QFT.getTypePtr());
@@ -348,7 +346,7 @@ llvm::Type *CodeGenTypes::ConvertFunctionType(QualType QFT,
const CGFunctionInfo *FI;
if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT)) {
FI = &arrangeFreeFunctionType(
- CanQual<FunctionProtoType>::CreateUnsafe(QualType(FPT, 0)), FD);
+ CanQual<FunctionProtoType>::CreateUnsafe(QualType(FPT, 0)));
} else {
const FunctionNoProtoType *FNPT = cast<FunctionNoProtoType>(FT);
FI = &arrangeFreeFunctionType(
@@ -597,7 +595,7 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
}
case Type::FunctionNoProto:
case Type::FunctionProto:
- ResultType = ConvertFunctionType(T);
+ ResultType = ConvertFunctionTypeInternal(T);
break;
case Type::ObjCObject:
ResultType = ConvertType(cast<ObjCObjectType>(Ty)->getBaseType());
@@ -637,7 +635,9 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
case Type::BlockPointer: {
const QualType FTy = cast<BlockPointerType>(Ty)->getPointeeType();
- llvm::Type *PointeeType = ConvertTypeForMem(FTy);
+ llvm::Type *PointeeType = CGM.getLangOpts().OpenCL
+ ? CGM.getGenericBlockLiteralType()
+ : ConvertTypeForMem(FTy);
unsigned AS = Context.getTargetAddressSpace(FTy);
ResultType = llvm::PointerType::get(PointeeType, AS);
break;
diff --git a/lib/CodeGen/CodeGenTypes.h b/lib/CodeGen/CodeGenTypes.h
index 8e344e91b8cd..03102329507e 100644
--- a/lib/CodeGen/CodeGenTypes.h
+++ b/lib/CodeGen/CodeGenTypes.h
@@ -1,9 +1,8 @@
//===--- CodeGenTypes.h - Type translation for LLVM CodeGen -----*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -55,65 +54,6 @@ class CGRecordLayout;
class CodeGenModule;
class RequiredArgs;
-enum class StructorType {
- Complete, // constructor or destructor
- Base, // constructor or destructor
- Deleting // destructor only
-};
-
-inline CXXCtorType toCXXCtorType(StructorType T) {
- switch (T) {
- case StructorType::Complete:
- return Ctor_Complete;
- case StructorType::Base:
- return Ctor_Base;
- case StructorType::Deleting:
- llvm_unreachable("cannot have a deleting ctor");
- }
- llvm_unreachable("not a StructorType");
-}
-
-inline StructorType getFromCtorType(CXXCtorType T) {
- switch (T) {
- case Ctor_Complete:
- return StructorType::Complete;
- case Ctor_Base:
- return StructorType::Base;
- case Ctor_Comdat:
- llvm_unreachable("not expecting a COMDAT");
- case Ctor_CopyingClosure:
- case Ctor_DefaultClosure:
- llvm_unreachable("not expecting a closure");
- }
- llvm_unreachable("not a CXXCtorType");
-}
-
-inline CXXDtorType toCXXDtorType(StructorType T) {
- switch (T) {
- case StructorType::Complete:
- return Dtor_Complete;
- case StructorType::Base:
- return Dtor_Base;
- case StructorType::Deleting:
- return Dtor_Deleting;
- }
- llvm_unreachable("not a StructorType");
-}
-
-inline StructorType getFromDtorType(CXXDtorType T) {
- switch (T) {
- case Dtor_Deleting:
- return StructorType::Deleting;
- case Dtor_Complete:
- return StructorType::Complete;
- case Dtor_Base:
- return StructorType::Base;
- case Dtor_Comdat:
- llvm_unreachable("not expecting a COMDAT");
- }
- llvm_unreachable("not a CXXDtorType");
-}
-
/// This class organizes the cross-module state that is used while lowering
/// AST types to LLVM types.
class CodeGenTypes {
@@ -163,6 +103,9 @@ class CodeGenTypes {
llvm::SmallSet<const Type *, 8> RecordsWithOpaqueMemberPointers;
+ /// Helper for ConvertType.
+ llvm::Type *ConvertFunctionTypeInternal(QualType FT);
+
public:
CodeGenTypes(CodeGenModule &cgm);
~CodeGenTypes();
@@ -180,17 +123,13 @@ public:
/// Convert clang calling convention to LLVM callilng convention.
unsigned ClangCallConvToLLVMCallConv(CallingConv CC);
+ /// Derives the 'this' type for codegen purposes, i.e. ignoring method CVR
+ /// qualification.
+ CanQualType DeriveThisType(const CXXRecordDecl *RD, const CXXMethodDecl *MD);
+
/// ConvertType - Convert type T into a llvm::Type.
llvm::Type *ConvertType(QualType T);
- /// Converts the GlobalDecl into an llvm::Type. This should be used
- /// when we know the target of the function we want to convert. This is
- /// because some functions (explicitly, those with pass_object_size
- /// parameters) may not have the same signature as their type portrays, and
- /// can only be called directly.
- llvm::Type *ConvertFunctionType(QualType FT,
- const FunctionDecl *FD = nullptr);
-
/// ConvertTypeForMem - Convert type T into a llvm::Type. This differs from
/// ConvertType in that it is used to convert to the memory representation for
/// a type. For example, the scalar representation for _Bool is i1, but the
@@ -263,8 +202,7 @@ public:
const CGFunctionInfo &arrangeFreeFunctionCall(const CallArgList &Args,
const FunctionType *Ty,
bool ChainCall);
- const CGFunctionInfo &arrangeFreeFunctionType(CanQual<FunctionProtoType> Ty,
- const FunctionDecl *FD);
+ const CGFunctionInfo &arrangeFreeFunctionType(CanQual<FunctionProtoType> Ty);
const CGFunctionInfo &arrangeFreeFunctionType(CanQual<FunctionNoProtoType> Ty);
/// A nullary function is a freestanding function of type 'void ()'.
@@ -299,8 +237,7 @@ public:
/// C++ methods have some special rules and also have implicit parameters.
const CGFunctionInfo &arrangeCXXMethodDeclaration(const CXXMethodDecl *MD);
- const CGFunctionInfo &arrangeCXXStructorDeclaration(const CXXMethodDecl *MD,
- StructorType Type);
+ const CGFunctionInfo &arrangeCXXStructorDeclaration(GlobalDecl GD);
const CGFunctionInfo &arrangeCXXConstructorCall(const CallArgList &Args,
const CXXConstructorDecl *D,
CXXCtorType CtorKind,
diff --git a/lib/CodeGen/ConstantEmitter.h b/lib/CodeGen/ConstantEmitter.h
index 7ad8e5d37cd1..59a19730f4eb 100644
--- a/lib/CodeGen/ConstantEmitter.h
+++ b/lib/CodeGen/ConstantEmitter.h
@@ -1,9 +1,8 @@
//===--- ConstantEmitter.h - IR constant emission ---------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/CodeGen/ConstantInitBuilder.cpp b/lib/CodeGen/ConstantInitBuilder.cpp
index 59e66b88fb01..40b1607b5626 100644
--- a/lib/CodeGen/ConstantInitBuilder.cpp
+++ b/lib/CodeGen/ConstantInitBuilder.cpp
@@ -1,9 +1,8 @@
//===--- ConstantInitBuilder.cpp - Global initializer builder -------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/CodeGen/CoverageMappingGen.cpp b/lib/CodeGen/CoverageMappingGen.cpp
index 35962c73d9a8..6d18027f16a8 100644
--- a/lib/CodeGen/CoverageMappingGen.cpp
+++ b/lib/CodeGen/CoverageMappingGen.cpp
@@ -1,9 +1,8 @@
//===--- CoverageMappingGen.cpp - Coverage mapping generation ---*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -244,7 +243,7 @@ public:
++Depth;
FileLocs.push_back(std::make_pair(Loc, Depth));
}
- std::stable_sort(FileLocs.begin(), FileLocs.end(), llvm::less_second());
+ llvm::stable_sort(FileLocs, llvm::less_second());
for (const auto &FL : FileLocs) {
SourceLocation Loc = FL.first;
@@ -1282,7 +1281,7 @@ std::string getCoverageSection(const CodeGenModule &CGM) {
std::string normalizeFilename(StringRef Filename) {
llvm::SmallString<256> Path(Filename);
llvm::sys::fs::make_absolute(Path);
- llvm::sys::path::remove_dots(Path, /*remove_dot_dots=*/true);
+ llvm::sys::path::remove_dots(Path, /*remove_dot_dot=*/true);
return Path.str().str();
}
@@ -1389,10 +1388,19 @@ void CoverageMappingModuleGen::emit() {
std::string FilenamesAndCoverageMappings;
llvm::raw_string_ostream OS(FilenamesAndCoverageMappings);
CoverageFilenamesSectionWriter(FilenameRefs).write(OS);
- std::string RawCoverageMappings =
- llvm::join(CoverageMappings.begin(), CoverageMappings.end(), "");
- OS << RawCoverageMappings;
- size_t CoverageMappingSize = RawCoverageMappings.size();
+
+ // Stream the content of CoverageMappings to OS while keeping
+ // memory consumption under control.
+ size_t CoverageMappingSize = 0;
+ for (auto &S : CoverageMappings) {
+ CoverageMappingSize += S.size();
+ OS << S;
+ S.clear();
+ S.shrink_to_fit();
+ }
+ CoverageMappings.clear();
+ CoverageMappings.shrink_to_fit();
+
size_t FilenamesSize = OS.str().size() - CoverageMappingSize;
// Append extra zeroes if necessary to ensure that the size of the filenames
// and coverage mappings is a multiple of 8.
diff --git a/lib/CodeGen/CoverageMappingGen.h b/lib/CodeGen/CoverageMappingGen.h
index c62db096952a..3bf51f590479 100644
--- a/lib/CodeGen/CoverageMappingGen.h
+++ b/lib/CodeGen/CoverageMappingGen.h
@@ -1,9 +1,8 @@
//===---- CoverageMappingGen.h - Coverage mapping generation ----*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/CodeGen/EHScopeStack.h b/lib/CodeGen/EHScopeStack.h
index c7bdeac58a1a..3b0db35d982b 100644
--- a/lib/CodeGen/EHScopeStack.h
+++ b/lib/CodeGen/EHScopeStack.h
@@ -1,9 +1,8 @@
//===-- EHScopeStack.h - Stack for cleanup IR generation --------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/CodeGen/ItaniumCXXABI.cpp b/lib/CodeGen/ItaniumCXXABI.cpp
index b53304528c3d..3b2413d960d6 100644
--- a/lib/CodeGen/ItaniumCXXABI.cpp
+++ b/lib/CodeGen/ItaniumCXXABI.cpp
@@ -1,9 +1,8 @@
//===------- ItaniumCXXABI.cpp - Emit LLVM Code from ASTs for a Module ----===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -29,7 +28,6 @@
#include "clang/AST/Mangle.h"
#include "clang/AST/Type.h"
#include "clang/AST/StmtCXX.h"
-#include "llvm/IR/CallSite.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/Instructions.h"
@@ -64,13 +62,9 @@ public:
bool classifyReturnType(CGFunctionInfo &FI) const override;
- bool passClassIndirect(const CXXRecordDecl *RD) const {
- return !canCopyArgument(RD);
- }
-
RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const override {
// If C++ prohibits us from making a copy, pass by address.
- if (passClassIndirect(RD))
+ if (!RD->canPassInRegisters())
return RAA_Indirect;
return RAA_Default;
}
@@ -160,19 +154,6 @@ public:
Address Ptr, QualType ElementType,
const CXXDestructorDecl *Dtor) override;
- /// Itanium says that an _Unwind_Exception has to be "double-word"
- /// aligned (and thus the end of it is also so-aligned), meaning 16
- /// bytes. Of course, that was written for the actual Itanium,
- /// which is a 64-bit platform. Classically, the ABI doesn't really
- /// specify the alignment on other platforms, but in practice
- /// libUnwind declares the struct with __attribute__((aligned)), so
- /// we assume that alignment here. (It's generally 16 bytes, but
- /// some targets overwrite it.)
- CharUnits getAlignmentOfExnObject() {
- auto align = CGM.getContext().getTargetDefaultAlignForAttributeAligned();
- return CGM.getContext().toCharUnitsFromBits(align);
- }
-
void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) override;
void emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) override;
@@ -218,7 +199,7 @@ public:
void EmitCXXConstructors(const CXXConstructorDecl *D) override;
AddedStructorArgs
- buildStructorSignature(const CXXMethodDecl *MD, StructorType T,
+ buildStructorSignature(GlobalDecl GD,
SmallVectorImpl<CanQualType> &ArgTys) override;
bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor,
@@ -243,7 +224,8 @@ public:
void EmitDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *DD,
CXXDtorType Type, bool ForVirtualBase,
- bool Delegating, Address This) override;
+ bool Delegating, Address This,
+ QualType ThisTy) override;
void emitVTableDefinitions(CodeGenVTables &CGVT,
const CXXRecordDecl *RD) override;
@@ -280,9 +262,8 @@ public:
llvm::Value *EmitVirtualDestructorCall(CodeGenFunction &CGF,
const CXXDestructorDecl *Dtor,
- CXXDtorType DtorType,
- Address This,
- const CXXMemberCallExpr *CE) override;
+ CXXDtorType DtorType, Address This,
+ DeleteOrMemberCallExpr E) override;
void emitVirtualInheritanceTables(const CXXRecordDecl *RD) override;
@@ -330,7 +311,8 @@ public:
llvm::GlobalVariable *DeclPtr,
bool PerformInit) override;
void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
- llvm::Constant *dtor, llvm::Constant *addr) override;
+ llvm::FunctionCallee dtor,
+ llvm::Constant *addr) override;
llvm::Function *getOrCreateThreadLocalWrapper(const VarDecl *VD,
llvm::Value *Val);
@@ -377,7 +359,7 @@ public:
llvm::GlobalValue::LinkageTypes Linkage) const;
friend class ItaniumRTTIBuilder;
- void emitCXXStructor(const CXXMethodDecl *MD, StructorType Type) override;
+ void emitCXXStructor(GlobalDecl GD) override;
std::pair<llvm::Value *, const CXXRecordDecl *>
LoadVTablePtr(CodeGenFunction &CGF, Address This,
@@ -1094,7 +1076,7 @@ bool ItaniumCXXABI::classifyReturnType(CGFunctionInfo &FI) const {
return false;
// If C++ prohibits us from making a copy, return by address.
- if (passClassIndirect(RD)) {
+ if (!RD->canPassInRegisters()) {
auto Align = CGM.getContext().getTypeAlignInChars(FI.getReturnType());
FI.getReturnInfo() = ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
return true;
@@ -1146,7 +1128,7 @@ void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
// FIXME: Provide a source location here even though there's no
// CXXMemberCallExpr for dtor call.
CXXDtorType DtorType = UseGlobalDelete ? Dtor_Complete : Dtor_Deleting;
- EmitVirtualDestructorCall(CGF, Dtor, DtorType, Ptr, /*CE=*/nullptr);
+ EmitVirtualDestructorCall(CGF, Dtor, DtorType, Ptr, DE);
if (UseGlobalDelete)
CGF.PopCleanupBlock();
@@ -1156,9 +1138,9 @@ void ItaniumCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) {
// void __cxa_rethrow();
llvm::FunctionType *FTy =
- llvm::FunctionType::get(CGM.VoidTy, /*IsVarArgs=*/false);
+ llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
- llvm::Constant *Fn = CGM.CreateRuntimeFunction(FTy, "__cxa_rethrow");
+ llvm::FunctionCallee Fn = CGM.CreateRuntimeFunction(FTy, "__cxa_rethrow");
if (isNoReturn)
CGF.EmitNoreturnRuntimeCallOrInvoke(Fn, None);
@@ -1166,22 +1148,22 @@ void ItaniumCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) {
CGF.EmitRuntimeCallOrInvoke(Fn);
}
-static llvm::Constant *getAllocateExceptionFn(CodeGenModule &CGM) {
+static llvm::FunctionCallee getAllocateExceptionFn(CodeGenModule &CGM) {
// void *__cxa_allocate_exception(size_t thrown_size);
llvm::FunctionType *FTy =
- llvm::FunctionType::get(CGM.Int8PtrTy, CGM.SizeTy, /*IsVarArgs=*/false);
+ llvm::FunctionType::get(CGM.Int8PtrTy, CGM.SizeTy, /*isVarArg=*/false);
return CGM.CreateRuntimeFunction(FTy, "__cxa_allocate_exception");
}
-static llvm::Constant *getThrowFn(CodeGenModule &CGM) {
+static llvm::FunctionCallee getThrowFn(CodeGenModule &CGM) {
// void __cxa_throw(void *thrown_exception, std::type_info *tinfo,
// void (*dest) (void *));
llvm::Type *Args[3] = { CGM.Int8PtrTy, CGM.Int8PtrTy, CGM.Int8PtrTy };
llvm::FunctionType *FTy =
- llvm::FunctionType::get(CGM.VoidTy, Args, /*IsVarArgs=*/false);
+ llvm::FunctionType::get(CGM.VoidTy, Args, /*isVarArg=*/false);
return CGM.CreateRuntimeFunction(FTy, "__cxa_throw");
}
@@ -1192,11 +1174,11 @@ void ItaniumCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
llvm::Type *SizeTy = CGF.ConvertType(getContext().getSizeType());
uint64_t TypeSize = getContext().getTypeSizeInChars(ThrowType).getQuantity();
- llvm::Constant *AllocExceptionFn = getAllocateExceptionFn(CGM);
+ llvm::FunctionCallee AllocExceptionFn = getAllocateExceptionFn(CGM);
llvm::CallInst *ExceptionPtr = CGF.EmitNounwindRuntimeCall(
AllocExceptionFn, llvm::ConstantInt::get(SizeTy, TypeSize), "exception");
- CharUnits ExnAlign = getAlignmentOfExnObject();
+ CharUnits ExnAlign = CGF.getContext().getExnObjectAlignment();
CGF.EmitAnyExprToExn(E->getSubExpr(), Address(ExceptionPtr, ExnAlign));
// Now throw the exception.
@@ -1210,7 +1192,7 @@ void ItaniumCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordTy->getDecl());
if (!Record->hasTrivialDestructor()) {
CXXDestructorDecl *DtorD = Record->getDestructor();
- Dtor = CGM.getAddrOfCXXStructor(DtorD, StructorType::Complete);
+ Dtor = CGM.getAddrOfCXXStructor(GlobalDecl(DtorD, Dtor_Complete));
Dtor = llvm::ConstantExpr::getBitCast(Dtor, CGM.Int8PtrTy);
}
}
@@ -1220,7 +1202,7 @@ void ItaniumCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
CGF.EmitNoreturnRuntimeCallOrInvoke(getThrowFn(CGM), args);
}
-static llvm::Constant *getItaniumDynamicCastFn(CodeGenFunction &CGF) {
+static llvm::FunctionCallee getItaniumDynamicCastFn(CodeGenFunction &CGF) {
// void *__dynamic_cast(const void *sub,
// const abi::__class_type_info *src,
// const abi::__class_type_info *dst,
@@ -1243,7 +1225,7 @@ static llvm::Constant *getItaniumDynamicCastFn(CodeGenFunction &CGF) {
return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast", Attrs);
}
-static llvm::Constant *getBadCastFn(CodeGenFunction &CGF) {
+static llvm::FunctionCallee getBadCastFn(CodeGenFunction &CGF) {
// void __cxa_bad_cast();
llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast");
@@ -1301,7 +1283,7 @@ static CharUnits computeOffsetHint(ASTContext &Context,
return Offset;
}
-static llvm::Constant *getBadTypeidFn(CodeGenFunction &CGF) {
+static llvm::FunctionCallee getBadTypeidFn(CodeGenFunction &CGF) {
// void __cxa_bad_typeid();
llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
@@ -1314,8 +1296,9 @@ bool ItaniumCXXABI::shouldTypeidBeNullChecked(bool IsDeref,
}
void ItaniumCXXABI::EmitBadTypeidCall(CodeGenFunction &CGF) {
- llvm::Value *Fn = getBadTypeidFn(CGF);
- CGF.EmitRuntimeCallOrInvoke(Fn).setDoesNotReturn();
+ llvm::FunctionCallee Fn = getBadTypeidFn(CGF);
+ llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(Fn);
+ Call->setDoesNotReturn();
CGF.Builder.CreateUnreachable();
}
@@ -1411,8 +1394,9 @@ llvm::Value *ItaniumCXXABI::EmitDynamicCastToVoid(CodeGenFunction &CGF,
}
bool ItaniumCXXABI::EmitBadCastCall(CodeGenFunction &CGF) {
- llvm::Value *Fn = getBadCastFn(CGF);
- CGF.EmitRuntimeCallOrInvoke(Fn).setDoesNotReturn();
+ llvm::FunctionCallee Fn = getBadCastFn(CGF);
+ llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(Fn);
+ Call->setDoesNotReturn();
CGF.Builder.CreateUnreachable();
return true;
}
@@ -1457,7 +1441,7 @@ void ItaniumCXXABI::EmitCXXConstructors(const CXXConstructorDecl *D) {
}
CGCXXABI::AddedStructorArgs
-ItaniumCXXABI::buildStructorSignature(const CXXMethodDecl *MD, StructorType T,
+ItaniumCXXABI::buildStructorSignature(GlobalDecl GD,
SmallVectorImpl<CanQualType> &ArgTys) {
ASTContext &Context = getContext();
@@ -1465,7 +1449,9 @@ ItaniumCXXABI::buildStructorSignature(const CXXMethodDecl *MD, StructorType T,
// These are Clang types, so we don't need to worry about sret yet.
// Check if we need to add a VTT parameter (which has type void **).
- if (T == StructorType::Base && MD->getParent()->getNumVBases() != 0) {
+ if ((isa<CXXConstructorDecl>(GD.getDecl()) ? GD.getCtorType() == Ctor_Base
+ : GD.getDtorType() == Dtor_Base) &&
+ cast<CXXMethodDecl>(GD.getDecl())->getParent()->getNumVBases() != 0) {
ArgTys.insert(ArgTys.begin() + 1,
Context.getPointerType(Context.VoidPtrTy));
return AddedStructorArgs::prefix(1);
@@ -1553,7 +1539,8 @@ CGCXXABI::AddedStructorArgs ItaniumCXXABI::addImplicitConstructorArgs(
void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
const CXXDestructorDecl *DD,
CXXDtorType Type, bool ForVirtualBase,
- bool Delegating, Address This) {
+ bool Delegating, Address This,
+ QualType ThisTy) {
GlobalDecl GD(DD, Type);
llvm::Value *VTT = CGF.GetVTTParameter(GD, ForVirtualBase, Delegating);
QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
@@ -1563,12 +1550,10 @@ void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
Type != Dtor_Base && DD->isVirtual())
Callee = CGF.BuildAppleKextVirtualDestructorCall(DD, Type, DD->getParent());
else
- Callee = CGCallee::forDirect(
- CGM.getAddrOfCXXStructor(DD, getFromDtorType(Type)), GD);
+ Callee = CGCallee::forDirect(CGM.getAddrOfCXXStructor(GD), GD);
- CGF.EmitCXXMemberOrOperatorCall(DD, Callee, ReturnValueSlot(),
- This.getPointer(), VTT, VTTTy,
- nullptr, nullptr);
+ CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, VTT, VTTTy,
+ nullptr);
}
void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
@@ -1756,19 +1741,27 @@ CGCallee ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
llvm::Value *ItaniumCXXABI::EmitVirtualDestructorCall(
CodeGenFunction &CGF, const CXXDestructorDecl *Dtor, CXXDtorType DtorType,
- Address This, const CXXMemberCallExpr *CE) {
+ Address This, DeleteOrMemberCallExpr E) {
+ auto *CE = E.dyn_cast<const CXXMemberCallExpr *>();
+ auto *D = E.dyn_cast<const CXXDeleteExpr *>();
+ assert((CE != nullptr) ^ (D != nullptr));
assert(CE == nullptr || CE->arg_begin() == CE->arg_end());
assert(DtorType == Dtor_Deleting || DtorType == Dtor_Complete);
- const CGFunctionInfo *FInfo = &CGM.getTypes().arrangeCXXStructorDeclaration(
- Dtor, getFromDtorType(DtorType));
+ GlobalDecl GD(Dtor, DtorType);
+ const CGFunctionInfo *FInfo =
+ &CGM.getTypes().arrangeCXXStructorDeclaration(GD);
llvm::FunctionType *Ty = CGF.CGM.getTypes().GetFunctionType(*FInfo);
- CGCallee Callee =
- CGCallee::forVirtual(CE, GlobalDecl(Dtor, DtorType), This, Ty);
+ CGCallee Callee = CGCallee::forVirtual(CE, GD, This, Ty);
+
+ QualType ThisTy;
+ if (CE)
+ ThisTy = CE->getImplicitObjectArgument()->getType()->getPointeeType();
+ else
+ ThisTy = D->getDestroyedType();
- CGF.EmitCXXMemberOrOperatorCall(Dtor, Callee, ReturnValueSlot(),
- This.getPointer(), /*ImplicitParam=*/nullptr,
- QualType(), CE, nullptr);
+ CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, nullptr,
+ QualType(), nullptr);
return nullptr;
}
@@ -1958,7 +1951,7 @@ Address ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
CGM.getSanitizerMetadata()->disableSanitizerForInstruction(SI);
llvm::FunctionType *FTy =
llvm::FunctionType::get(CGM.VoidTy, NumElementsPtr.getType(), false);
- llvm::Constant *F =
+ llvm::FunctionCallee F =
CGM.CreateRuntimeFunction(FTy, "__asan_poison_cxx_array_cookie");
CGF.Builder.CreateCall(F, NumElementsPtr.getPointer());
}
@@ -1989,7 +1982,7 @@ llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
// the metadata may be lost.
llvm::FunctionType *FTy =
llvm::FunctionType::get(CGF.SizeTy, CGF.SizeTy->getPointerTo(0), false);
- llvm::Constant *F =
+ llvm::FunctionCallee F =
CGM.CreateRuntimeFunction(FTy, "__asan_load_cxx_array_cookie");
return CGF.Builder.CreateCall(F, numElementsPtr.getPointer());
}
@@ -2024,7 +2017,7 @@ Address ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
CGF.Builder.CreateStore(elementSize, cookie);
// The second element is the element count.
- cookie = CGF.Builder.CreateConstInBoundsGEP(cookie, 1, CGF.getSizeSize());
+ cookie = CGF.Builder.CreateConstInBoundsGEP(cookie, 1);
CGF.Builder.CreateStore(numElements, cookie);
// Finally, compute a pointer to the actual data buffer by skipping
@@ -2047,8 +2040,8 @@ llvm::Value *ARMCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
/*********************** Static local initialization **************************/
-static llvm::Constant *getGuardAcquireFn(CodeGenModule &CGM,
- llvm::PointerType *GuardPtrTy) {
+static llvm::FunctionCallee getGuardAcquireFn(CodeGenModule &CGM,
+ llvm::PointerType *GuardPtrTy) {
// int __cxa_guard_acquire(__guard *guard_object);
llvm::FunctionType *FTy =
llvm::FunctionType::get(CGM.getTypes().ConvertType(CGM.getContext().IntTy),
@@ -2060,8 +2053,8 @@ static llvm::Constant *getGuardAcquireFn(CodeGenModule &CGM,
llvm::Attribute::NoUnwind));
}
-static llvm::Constant *getGuardReleaseFn(CodeGenModule &CGM,
- llvm::PointerType *GuardPtrTy) {
+static llvm::FunctionCallee getGuardReleaseFn(CodeGenModule &CGM,
+ llvm::PointerType *GuardPtrTy) {
// void __cxa_guard_release(__guard *guard_object);
llvm::FunctionType *FTy =
llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
@@ -2072,8 +2065,8 @@ static llvm::Constant *getGuardReleaseFn(CodeGenModule &CGM,
llvm::Attribute::NoUnwind));
}
-static llvm::Constant *getGuardAbortFn(CodeGenModule &CGM,
- llvm::PointerType *GuardPtrTy) {
+static llvm::FunctionCallee getGuardAbortFn(CodeGenModule &CGM,
+ llvm::PointerType *GuardPtrTy) {
// void __cxa_guard_abort(__guard *guard_object);
llvm::FunctionType *FTy =
llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
@@ -2286,9 +2279,10 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
/// Register a global destructor using __cxa_atexit.
static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
- llvm::Constant *dtor,
- llvm::Constant *addr,
- bool TLS) {
+ llvm::FunctionCallee dtor,
+ llvm::Constant *addr, bool TLS) {
+ assert((TLS || CGF.getTypes().getCodeGenOpts().CXAAtExit) &&
+ "__cxa_atexit is disabled");
const char *Name = "__cxa_atexit";
if (TLS) {
const llvm::Triple &T = CGF.getTarget().getTriple();
@@ -2301,15 +2295,10 @@ static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
llvm::Type *dtorTy =
llvm::FunctionType::get(CGF.VoidTy, CGF.Int8PtrTy, false)->getPointerTo();
- // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d);
- llvm::Type *paramTys[] = { dtorTy, CGF.Int8PtrTy, CGF.Int8PtrTy };
- llvm::FunctionType *atexitTy =
- llvm::FunctionType::get(CGF.IntTy, paramTys, false);
-
- // Fetch the actual function.
- llvm::Constant *atexit = CGF.CGM.CreateRuntimeFunction(atexitTy, Name);
- if (llvm::Function *fn = dyn_cast<llvm::Function>(atexit))
- fn->setDoesNotThrow();
+ // Preserve address space of addr.
+ auto AddrAS = addr ? addr->getType()->getPointerAddressSpace() : 0;
+ auto AddrInt8PtrTy =
+ AddrAS ? CGF.Int8Ty->getPointerTo(AddrAS) : CGF.Int8PtrTy;
// Create a variable that binds the atexit to this shared object.
llvm::Constant *handle =
@@ -2317,6 +2306,16 @@ static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
auto *GV = cast<llvm::GlobalValue>(handle->stripPointerCasts());
GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d);
+ llvm::Type *paramTys[] = {dtorTy, AddrInt8PtrTy, handle->getType()};
+ llvm::FunctionType *atexitTy =
+ llvm::FunctionType::get(CGF.IntTy, paramTys, false);
+
+ // Fetch the actual function.
+ llvm::FunctionCallee atexit = CGF.CGM.CreateRuntimeFunction(atexitTy, Name);
+ if (llvm::Function *fn = dyn_cast<llvm::Function>(atexit.getCallee()))
+ fn->setDoesNotThrow();
+
if (!addr)
// addr is null when we are trying to register a dtor annotated with
// __attribute__((destructor)) in a constructor function. Using null here is
@@ -2324,11 +2323,10 @@ static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
// function.
addr = llvm::Constant::getNullValue(CGF.Int8PtrTy);
- llvm::Value *args[] = {
- llvm::ConstantExpr::getBitCast(dtor, dtorTy),
- llvm::ConstantExpr::getBitCast(addr, CGF.Int8PtrTy),
- handle
- };
+ llvm::Value *args[] = {llvm::ConstantExpr::getBitCast(
+ cast<llvm::Constant>(dtor.getCallee()), dtorTy),
+ llvm::ConstantExpr::getBitCast(addr, AddrInt8PtrTy),
+ handle};
CGF.EmitNounwindRuntimeCall(atexit, args);
}
@@ -2377,20 +2375,19 @@ void CodeGenModule::registerGlobalDtorsWithAtExit() {
}
/// Register a global destructor as best as we know how.
-void ItaniumCXXABI::registerGlobalDtor(CodeGenFunction &CGF,
- const VarDecl &D,
- llvm::Constant *dtor,
+void ItaniumCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
+ llvm::FunctionCallee dtor,
llvm::Constant *addr) {
if (D.isNoDestroy(CGM.getContext()))
return;
- // Use __cxa_atexit if available.
- if (CGM.getCodeGenOpts().CXAAtExit)
+ // emitGlobalDtorWithCXAAtExit will emit a call to either __cxa_thread_atexit
+ // or __cxa_atexit depending on whether this VarDecl is a thread-local storage
+ // or not. CXAAtExit controls only __cxa_atexit, so use it if it is enabled.
+ // We can always use __cxa_thread_atexit.
+ if (CGM.getCodeGenOpts().CXAAtExit || D.getTLSKind())
return emitGlobalDtorWithCXAAtExit(CGF, dtor, addr, D.getTLSKind());
- if (D.getTLSKind())
- CGM.ErrorUnsupported(&D, "non-trivial TLS destruction");
-
// In Apple kexts, we want to add a global destructor entry.
// FIXME: shouldn't this be guarded by some variable?
if (CGM.getLangOpts().AppleKext) {
@@ -2416,7 +2413,7 @@ static bool isThreadWrapperReplaceable(const VarDecl *VD,
static llvm::GlobalValue::LinkageTypes
getThreadLocalWrapperLinkage(const VarDecl *VD, CodeGen::CodeGenModule &CGM) {
llvm::GlobalValue::LinkageTypes VarLinkage =
- CGM.getLLVMLinkageVarDefinition(VD, /*isConstant=*/false);
+ CGM.getLLVMLinkageVarDefinition(VD, /*IsConstant=*/false);
// For internal linkage variables, we don't need an external or weak wrapper.
if (llvm::GlobalValue::isLocalLinkage(VarLinkage))
@@ -2463,10 +2460,12 @@ ItaniumCXXABI::getOrCreateThreadLocalWrapper(const VarDecl *VD,
CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Wrapper);
// Always resolve references to the wrapper at link time.
- if (!Wrapper->hasLocalLinkage() && !(isThreadWrapperReplaceable(VD, CGM) &&
- !llvm::GlobalVariable::isLinkOnceLinkage(Wrapper->getLinkage()) &&
- !llvm::GlobalVariable::isWeakODRLinkage(Wrapper->getLinkage())))
- Wrapper->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ if (!Wrapper->hasLocalLinkage())
+ if (!isThreadWrapperReplaceable(VD, CGM) ||
+ llvm::GlobalVariable::isLinkOnceLinkage(Wrapper->getLinkage()) ||
+ llvm::GlobalVariable::isWeakODRLinkage(Wrapper->getLinkage()) ||
+ VD->getVisibility() == HiddenVisibility)
+ Wrapper->setVisibility(llvm::GlobalValue::HiddenVisibility);
if (isThreadWrapperReplaceable(VD, CGM)) {
Wrapper->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
@@ -2541,6 +2540,8 @@ void ItaniumCXXABI::EmitThreadLocalInitFuncs(
getMangleContext().mangleItaniumThreadLocalInit(VD, Out);
}
+ llvm::FunctionType *InitFnTy = llvm::FunctionType::get(CGM.VoidTy, false);
+
// If we have a definition for the variable, emit the initialization
// function as an alias to the global Init function (if any). Otherwise,
// produce a declaration of the initialization function.
@@ -2559,8 +2560,7 @@ void ItaniumCXXABI::EmitThreadLocalInitFuncs(
// This function will not exist if the TU defining the thread_local
// variable in question does not need any dynamic initialization for
// its thread_local variables.
- llvm::FunctionType *FnTy = llvm::FunctionType::get(CGM.VoidTy, false);
- Init = llvm::Function::Create(FnTy,
+ Init = llvm::Function::Create(InitFnTy,
llvm::GlobalVariable::ExternalWeakLinkage,
InitFnName.str(), &CGM.getModule());
const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
@@ -2578,7 +2578,7 @@ void ItaniumCXXABI::EmitThreadLocalInitFuncs(
CGBuilderTy Builder(CGM, Entry);
if (InitIsInitFunc) {
if (Init) {
- llvm::CallInst *CallVal = Builder.CreateCall(Init);
+ llvm::CallInst *CallVal = Builder.CreateCall(InitFnTy, Init);
if (isThreadWrapperReplaceable(VD, CGM)) {
CallVal->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
llvm::Function *Fn =
@@ -2594,7 +2594,7 @@ void ItaniumCXXABI::EmitThreadLocalInitFuncs(
Builder.CreateCondBr(Have, InitBB, ExitBB);
Builder.SetInsertPoint(InitBB);
- Builder.CreateCall(Init);
+ Builder.CreateCall(InitFnTy, Init);
Builder.CreateBr(ExitBB);
Builder.SetInsertPoint(ExitBB);
@@ -2791,7 +2791,7 @@ ItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) {
// RTTI, check if emitting vtables opportunistically need any adjustment.
GV = new llvm::GlobalVariable(CGM.getModule(), CGM.Int8PtrTy,
- /*Constant=*/true,
+ /*isConstant=*/true,
llvm::GlobalValue::ExternalLinkage, nullptr,
Name);
const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
@@ -2960,7 +2960,7 @@ static bool ShouldUseExternalRTTIDescriptor(CodeGenModule &CGM,
bool IsDLLImport = RD->hasAttr<DLLImportAttr>();
// Don't import the RTTI but emit it locally.
- if (CGM.getTriple().isWindowsGNUEnvironment() && IsDLLImport)
+ if (CGM.getTriple().isWindowsGNUEnvironment())
return false;
if (CGM.getVTables().isVTableExternal(RD))
@@ -3396,7 +3396,7 @@ llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(
llvm::GlobalVariable *OldGV = M.getNamedGlobal(Name);
llvm::GlobalVariable *GV =
new llvm::GlobalVariable(M, Init->getType(),
- /*Constant=*/true, Linkage, Init, Name);
+ /*isConstant=*/true, Linkage, Init, Name);
// If there's already an old global variable, replace it with the new one.
if (OldGV) {
@@ -3438,6 +3438,9 @@ llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(
TypeName->setDLLStorageClass(DLLStorageClass);
GV->setDLLStorageClass(DLLStorageClass);
+ TypeName->setPartition(CGM.getCodeGenOpts().SymbolPartition);
+ GV->setPartition(CGM.getCodeGenOpts().SymbolPartition);
+
return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
}
@@ -3846,31 +3849,28 @@ static void emitConstructorDestructorAlias(CodeGenModule &CGM,
CGM.SetCommonAttributes(AliasDecl, Alias);
}
-void ItaniumCXXABI::emitCXXStructor(const CXXMethodDecl *MD,
- StructorType Type) {
+void ItaniumCXXABI::emitCXXStructor(GlobalDecl GD) {
+ auto *MD = cast<CXXMethodDecl>(GD.getDecl());
auto *CD = dyn_cast<CXXConstructorDecl>(MD);
const CXXDestructorDecl *DD = CD ? nullptr : cast<CXXDestructorDecl>(MD);
StructorCodegen CGType = getCodegenToUse(CGM, MD);
- if (Type == StructorType::Complete) {
- GlobalDecl CompleteDecl;
+ if (CD ? GD.getCtorType() == Ctor_Complete
+ : GD.getDtorType() == Dtor_Complete) {
GlobalDecl BaseDecl;
- if (CD) {
- CompleteDecl = GlobalDecl(CD, Ctor_Complete);
- BaseDecl = GlobalDecl(CD, Ctor_Base);
- } else {
- CompleteDecl = GlobalDecl(DD, Dtor_Complete);
- BaseDecl = GlobalDecl(DD, Dtor_Base);
- }
+ if (CD)
+ BaseDecl = GD.getWithCtorType(Ctor_Base);
+ else
+ BaseDecl = GD.getWithDtorType(Dtor_Base);
if (CGType == StructorCodegen::Alias || CGType == StructorCodegen::COMDAT) {
- emitConstructorDestructorAlias(CGM, CompleteDecl, BaseDecl);
+ emitConstructorDestructorAlias(CGM, GD, BaseDecl);
return;
}
if (CGType == StructorCodegen::RAUW) {
- StringRef MangledName = CGM.getMangledName(CompleteDecl);
+ StringRef MangledName = CGM.getMangledName(GD);
auto *Aliasee = CGM.GetAddrOfGlobal(BaseDecl);
CGM.addReplacement(MangledName, Aliasee);
return;
@@ -3881,7 +3881,8 @@ void ItaniumCXXABI::emitCXXStructor(const CXXMethodDecl *MD,
// base class if there is exactly one non-virtual base class with a
// non-trivial destructor, there are no fields with a non-trivial
// destructor, and the body of the destructor is trivial.
- if (DD && Type == StructorType::Base && CGType != StructorCodegen::COMDAT &&
+ if (DD && GD.getDtorType() == Dtor_Base &&
+ CGType != StructorCodegen::COMDAT &&
!CGM.TryEmitBaseDestructorAsAlias(DD))
return;
@@ -3897,7 +3898,7 @@ void ItaniumCXXABI::emitCXXStructor(const CXXMethodDecl *MD,
// In such cases we should try to emit the deleting dtor as an alias to the
// selected 'operator delete'.
- llvm::Function *Fn = CGM.codegenCXXStructor(MD, Type);
+ llvm::Function *Fn = CGM.codegenCXXStructor(GD);
if (CGType == StructorCodegen::COMDAT) {
SmallString<256> Buffer;
@@ -3913,26 +3914,26 @@ void ItaniumCXXABI::emitCXXStructor(const CXXMethodDecl *MD,
}
}
-static llvm::Constant *getBeginCatchFn(CodeGenModule &CGM) {
+static llvm::FunctionCallee getBeginCatchFn(CodeGenModule &CGM) {
// void *__cxa_begin_catch(void*);
llvm::FunctionType *FTy = llvm::FunctionType::get(
- CGM.Int8PtrTy, CGM.Int8PtrTy, /*IsVarArgs=*/false);
+ CGM.Int8PtrTy, CGM.Int8PtrTy, /*isVarArg=*/false);
return CGM.CreateRuntimeFunction(FTy, "__cxa_begin_catch");
}
-static llvm::Constant *getEndCatchFn(CodeGenModule &CGM) {
+static llvm::FunctionCallee getEndCatchFn(CodeGenModule &CGM) {
// void __cxa_end_catch();
llvm::FunctionType *FTy =
- llvm::FunctionType::get(CGM.VoidTy, /*IsVarArgs=*/false);
+ llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
return CGM.CreateRuntimeFunction(FTy, "__cxa_end_catch");
}
-static llvm::Constant *getGetExceptionPtrFn(CodeGenModule &CGM) {
+static llvm::FunctionCallee getGetExceptionPtrFn(CodeGenModule &CGM) {
// void *__cxa_get_exception_ptr(void*);
llvm::FunctionType *FTy = llvm::FunctionType::get(
- CGM.Int8PtrTy, CGM.Int8PtrTy, /*IsVarArgs=*/false);
+ CGM.Int8PtrTy, CGM.Int8PtrTy, /*isVarArg=*/false);
return CGM.CreateRuntimeFunction(FTy, "__cxa_get_exception_ptr");
}
@@ -4204,14 +4205,14 @@ void ItaniumCXXABI::emitBeginCatch(CodeGenFunction &CGF,
/// Get or define the following function:
/// void @__clang_call_terminate(i8* %exn) nounwind noreturn
/// This code is used only in C++.
-static llvm::Constant *getClangCallTerminateFn(CodeGenModule &CGM) {
+static llvm::FunctionCallee getClangCallTerminateFn(CodeGenModule &CGM) {
llvm::FunctionType *fnTy =
- llvm::FunctionType::get(CGM.VoidTy, CGM.Int8PtrTy, /*IsVarArgs=*/false);
- llvm::Constant *fnRef = CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(CGM.VoidTy, CGM.Int8PtrTy, /*isVarArg=*/false);
+ llvm::FunctionCallee fnRef = CGM.CreateRuntimeFunction(
fnTy, "__clang_call_terminate", llvm::AttributeList(), /*Local=*/true);
-
- llvm::Function *fn = dyn_cast<llvm::Function>(fnRef);
- if (fn && fn->empty()) {
+ llvm::Function *fn =
+ cast<llvm::Function>(fnRef.getCallee()->stripPointerCasts());
+ if (fn->empty()) {
fn->setDoesNotThrow();
fn->setDoesNotReturn();
@@ -4229,7 +4230,7 @@ static llvm::Constant *getClangCallTerminateFn(CodeGenModule &CGM) {
// Set up the function.
llvm::BasicBlock *entry =
- llvm::BasicBlock::Create(CGM.getLLVMContext(), "", fn);
+ llvm::BasicBlock::Create(CGM.getLLVMContext(), "", fn);
CGBuilderTy builder(CGM, entry);
// Pull the exception pointer out of the parameter list.
@@ -4249,7 +4250,6 @@ static llvm::Constant *getClangCallTerminateFn(CodeGenModule &CGM) {
// std::terminate cannot return.
builder.CreateUnreachable();
}
-
return fnRef;
}
diff --git a/lib/CodeGen/MacroPPCallbacks.cpp b/lib/CodeGen/MacroPPCallbacks.cpp
index 013ca15e2391..92800e738b62 100644
--- a/lib/CodeGen/MacroPPCallbacks.cpp
+++ b/lib/CodeGen/MacroPPCallbacks.cpp
@@ -1,9 +1,8 @@
//===--- MacroPPCallbacks.cpp ---------------------------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/CodeGen/MacroPPCallbacks.h b/lib/CodeGen/MacroPPCallbacks.h
index b87a4005d481..32906a000269 100644
--- a/lib/CodeGen/MacroPPCallbacks.h
+++ b/lib/CodeGen/MacroPPCallbacks.h
@@ -1,9 +1,8 @@
//===--- MacroPPCallbacks.h -------------------------------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/CodeGen/MicrosoftCXXABI.cpp b/lib/CodeGen/MicrosoftCXXABI.cpp
index 5545bc6647e6..fa34414de5da 100644
--- a/lib/CodeGen/MicrosoftCXXABI.cpp
+++ b/lib/CodeGen/MicrosoftCXXABI.cpp
@@ -1,9 +1,8 @@
//===--- MicrosoftCXXABI.cpp - Emit LLVM Code from ASTs for a Module ------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -27,7 +26,6 @@
#include "clang/AST/VTableBuilder.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSet.h"
-#include "llvm/IR/CallSite.h"
#include "llvm/IR/Intrinsics.h"
using namespace clang;
@@ -207,7 +205,7 @@ public:
// delegate to or alias the base destructor.
AddedStructorArgs
- buildStructorSignature(const CXXMethodDecl *MD, StructorType T,
+ buildStructorSignature(GlobalDecl GD,
SmallVectorImpl<CanQualType> &ArgTys) override;
/// Non-base dtors should be emitted as delegating thunks in this ABI.
@@ -260,7 +258,8 @@ public:
void EmitDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *DD,
CXXDtorType Type, bool ForVirtualBase,
- bool Delegating, Address This) override;
+ bool Delegating, Address This,
+ QualType ThisTy) override;
void emitVTableTypeMetadata(const VPtrInfo &Info, const CXXRecordDecl *RD,
llvm::GlobalVariable *VTable);
@@ -298,9 +297,8 @@ public:
llvm::Value *EmitVirtualDestructorCall(CodeGenFunction &CGF,
const CXXDestructorDecl *Dtor,
- CXXDtorType DtorType,
- Address This,
- const CXXMemberCallExpr *CE) override;
+ CXXDtorType DtorType, Address This,
+ DeleteOrMemberCallExpr E) override;
void adjustCallArgsForDestructorThunk(CodeGenFunction &CGF, GlobalDecl GD,
CallArgList &CallArgs) override {
@@ -354,7 +352,7 @@ public:
? llvm::GlobalValue::LinkOnceODRLinkage
: llvm::GlobalValue::InternalLinkage;
auto *VDispMap = new llvm::GlobalVariable(
- CGM.getModule(), VDispMapTy, /*Constant=*/true, Linkage,
+ CGM.getModule(), VDispMapTy, /*isConstant=*/true, Linkage,
/*Initializer=*/Init, MangledName);
return VDispMap;
}
@@ -396,7 +394,8 @@ public:
llvm::GlobalVariable *DeclPtr,
bool PerformInit) override;
void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
- llvm::Constant *Dtor, llvm::Constant *Addr) override;
+ llvm::FunctionCallee Dtor,
+ llvm::Constant *Addr) override;
// ==== Notes on array cookies =========
//
@@ -437,7 +436,7 @@ public:
friend struct MSRTTIBuilder;
bool isImageRelative() const {
- return CGM.getTarget().getPointerWidth(/*AddressSpace=*/0) == 64;
+ return CGM.getTarget().getPointerWidth(/*AddrSpace=*/0) == 64;
}
// 5 routines for constructing the llvm types for MS RTTI structs.
@@ -674,7 +673,7 @@ public:
llvm::Value *MemPtr,
const MemberPointerType *MPT) override;
- void emitCXXStructor(const CXXMethodDecl *MD, StructorType Type) override;
+ void emitCXXStructor(GlobalDecl GD) override;
llvm::StructType *getCatchableTypeType() {
if (CatchableTypeType)
@@ -726,18 +725,20 @@ public:
return ThrowInfoType;
}
- llvm::Constant *getThrowFn() {
+ llvm::FunctionCallee getThrowFn() {
// _CxxThrowException is passed an exception object and a ThrowInfo object
// which describes the exception.
llvm::Type *Args[] = {CGM.Int8PtrTy, getThrowInfoType()->getPointerTo()};
llvm::FunctionType *FTy =
- llvm::FunctionType::get(CGM.VoidTy, Args, /*IsVarArgs=*/false);
- auto *Fn = cast<llvm::Function>(
- CGM.CreateRuntimeFunction(FTy, "_CxxThrowException"));
+ llvm::FunctionType::get(CGM.VoidTy, Args, /*isVarArg=*/false);
+ llvm::FunctionCallee Throw =
+ CGM.CreateRuntimeFunction(FTy, "_CxxThrowException");
// _CxxThrowException is stdcall on 32-bit x86 platforms.
- if (CGM.getTarget().getTriple().getArch() == llvm::Triple::x86)
- Fn->setCallingConv(llvm::CallingConv::X86_StdCall);
- return Fn;
+ if (CGM.getTarget().getTriple().getArch() == llvm::Triple::x86) {
+ if (auto *Fn = dyn_cast<llvm::Function>(Throw.getCallee()))
+ Fn->setCallingConv(llvm::CallingConv::X86_StdCall);
+ }
+ return Throw;
}
llvm::Function *getAddrOfCXXCtorClosure(const CXXConstructorDecl *CD,
@@ -810,7 +811,7 @@ MicrosoftCXXABI::getRecordArgABI(const CXXRecordDecl *RD) const {
// Use the simple Itanium rules for now.
// FIXME: This is incompatible with MSVC for arguments with a dtor and no
// copy ctor.
- return !canCopyArgument(RD) ? RAA_Indirect : RAA_Default;
+ return !RD->canPassInRegisters() ? RAA_Indirect : RAA_Default;
case llvm::Triple::x86:
// All record arguments are passed in memory on x86. Decide whether to
@@ -819,7 +820,7 @@ MicrosoftCXXABI::getRecordArgABI(const CXXRecordDecl *RD) const {
// If C++ prohibits us from making a copy, construct the arguments directly
// into argument memory.
- if (!canCopyArgument(RD))
+ if (!RD->canPassInRegisters())
return RAA_DirectInMemory;
// Otherwise, construct the argument into a temporary and copy the bytes
@@ -828,7 +829,7 @@ MicrosoftCXXABI::getRecordArgABI(const CXXRecordDecl *RD) const {
case llvm::Triple::x86_64:
case llvm::Triple::aarch64:
- return !canCopyArgument(RD) ? RAA_Indirect : RAA_Default;
+ return !RD->canPassInRegisters() ? RAA_Indirect : RAA_Default;
}
llvm_unreachable("invalid enum");
@@ -843,8 +844,7 @@ void MicrosoftCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
// CXXMemberCallExpr for dtor call.
bool UseGlobalDelete = DE->isGlobalDelete();
CXXDtorType DtorType = UseGlobalDelete ? Dtor_Complete : Dtor_Deleting;
- llvm::Value *MDThis =
- EmitVirtualDestructorCall(CGF, Dtor, DtorType, Ptr, /*CE=*/nullptr);
+ llvm::Value *MDThis = EmitVirtualDestructorCall(CGF, Dtor, DtorType, Ptr, DE);
if (UseGlobalDelete)
CGF.EmitDeleteCall(DE->getOperatorDelete(), MDThis, ElementType);
}
@@ -853,7 +853,7 @@ void MicrosoftCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) {
llvm::Value *Args[] = {
llvm::ConstantPointerNull::get(CGM.Int8PtrTy),
llvm::ConstantPointerNull::get(getThrowInfoType()->getPointerTo())};
- auto *Fn = getThrowFn();
+ llvm::FunctionCallee Fn = getThrowFn();
if (isNoReturn)
CGF.EmitNoreturnRuntimeCallOrInvoke(Fn, Args);
else
@@ -927,20 +927,20 @@ bool MicrosoftCXXABI::shouldTypeidBeNullChecked(bool IsDeref,
!getContext().getASTRecordLayout(SrcDecl).hasExtendableVFPtr();
}
-static llvm::CallSite emitRTtypeidCall(CodeGenFunction &CGF,
- llvm::Value *Argument) {
+static llvm::CallBase *emitRTtypeidCall(CodeGenFunction &CGF,
+ llvm::Value *Argument) {
llvm::Type *ArgTypes[] = {CGF.Int8PtrTy};
llvm::FunctionType *FTy =
llvm::FunctionType::get(CGF.Int8PtrTy, ArgTypes, false);
llvm::Value *Args[] = {Argument};
- llvm::Constant *Fn = CGF.CGM.CreateRuntimeFunction(FTy, "__RTtypeid");
+ llvm::FunctionCallee Fn = CGF.CGM.CreateRuntimeFunction(FTy, "__RTtypeid");
return CGF.EmitRuntimeCallOrInvoke(Fn, Args);
}
void MicrosoftCXXABI::EmitBadTypeidCall(CodeGenFunction &CGF) {
- llvm::CallSite Call =
+ llvm::CallBase *Call =
emitRTtypeidCall(CGF, llvm::Constant::getNullValue(CGM.VoidPtrTy));
- Call.setDoesNotReturn();
+ Call->setDoesNotReturn();
CGF.Builder.CreateUnreachable();
}
@@ -950,7 +950,7 @@ llvm::Value *MicrosoftCXXABI::EmitTypeid(CodeGenFunction &CGF,
llvm::Type *StdTypeInfoPtrTy) {
std::tie(ThisPtr, std::ignore, std::ignore) =
performBaseAdjustment(CGF, ThisPtr, SrcRecordTy);
- auto Typeid = emitRTtypeidCall(CGF, ThisPtr.getPointer()).getInstruction();
+ llvm::CallBase *Typeid = emitRTtypeidCall(CGF, ThisPtr.getPointer());
return CGF.Builder.CreateBitCast(Typeid, StdTypeInfoPtrTy);
}
@@ -985,13 +985,13 @@ llvm::Value *MicrosoftCXXABI::EmitDynamicCastCall(
// BOOL isReference)
llvm::Type *ArgTypes[] = {CGF.Int8PtrTy, CGF.Int32Ty, CGF.Int8PtrTy,
CGF.Int8PtrTy, CGF.Int32Ty};
- llvm::Constant *Function = CGF.CGM.CreateRuntimeFunction(
+ llvm::FunctionCallee Function = CGF.CGM.CreateRuntimeFunction(
llvm::FunctionType::get(CGF.Int8PtrTy, ArgTypes, false),
"__RTDynamicCast");
llvm::Value *Args[] = {
ThisPtr, Offset, SrcRTTI, DestRTTI,
llvm::ConstantInt::get(CGF.Int32Ty, DestTy->isReferenceType())};
- ThisPtr = CGF.EmitRuntimeCallOrInvoke(Function, Args).getInstruction();
+ ThisPtr = CGF.EmitRuntimeCallOrInvoke(Function, Args);
return CGF.Builder.CreateBitCast(ThisPtr, DestLTy);
}
@@ -1005,7 +1005,7 @@ MicrosoftCXXABI::EmitDynamicCastToVoid(CodeGenFunction &CGF, Address Value,
// PVOID __RTCastToVoid(
// PVOID inptr)
llvm::Type *ArgTypes[] = {CGF.Int8PtrTy};
- llvm::Constant *Function = CGF.CGM.CreateRuntimeFunction(
+ llvm::FunctionCallee Function = CGF.CGM.CreateRuntimeFunction(
llvm::FunctionType::get(CGF.Int8PtrTy, ArgTypes, false),
"__RTCastToVoid");
llvm::Value *Args[] = {Value.getPointer()};
@@ -1050,33 +1050,55 @@ bool MicrosoftCXXABI::hasMostDerivedReturn(GlobalDecl GD) const {
return isDeletingDtor(GD);
}
+static bool IsSizeGreaterThan128(const CXXRecordDecl *RD) {
+ return RD->getASTContext().getTypeSize(RD->getTypeForDecl()) > 128;
+}
+
+static bool hasMicrosoftABIRestrictions(const CXXRecordDecl *RD) {
+ // For AArch64, we use the C++14 definition of an aggregate, so we also
+ // check for:
+ // No private or protected non static data members.
+ // No base classes
+ // No virtual functions
+ // Additionally, we need to ensure that there is a trivial copy assignment
+ // operator, a trivial destructor and no user-provided constructors.
+ if (RD->hasProtectedFields() || RD->hasPrivateFields())
+ return true;
+ if (RD->getNumBases() > 0)
+ return true;
+ if (RD->isPolymorphic())
+ return true;
+ if (RD->hasNonTrivialCopyAssignment())
+ return true;
+ for (const CXXConstructorDecl *Ctor : RD->ctors())
+ if (Ctor->isUserProvided())
+ return true;
+ if (RD->hasNonTrivialDestructor())
+ return true;
+ return false;
+}
+
bool MicrosoftCXXABI::classifyReturnType(CGFunctionInfo &FI) const {
const CXXRecordDecl *RD = FI.getReturnType()->getAsCXXRecordDecl();
if (!RD)
return false;
- CharUnits Align = CGM.getContext().getTypeAlignInChars(FI.getReturnType());
- if (FI.isInstanceMethod()) {
- // If it's an instance method, aggregates are always returned indirectly via
- // the second parameter.
- FI.getReturnInfo() = ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
- FI.getReturnInfo().setSRetAfterThis(FI.isInstanceMethod());
+ bool isAArch64 = CGM.getTarget().getTriple().isAArch64();
+ bool isSimple = !isAArch64 || !hasMicrosoftABIRestrictions(RD);
+ bool isIndirectReturn =
+ isAArch64 ? (!RD->canPassInRegisters() ||
+ IsSizeGreaterThan128(RD))
+ : !RD->isPOD();
+ bool isInstanceMethod = FI.isInstanceMethod();
- // aarch64-windows requires that instance methods use X1 for the return
- // address. So for aarch64-windows we do not mark the
- // return as SRet.
- FI.getReturnInfo().setSuppressSRet(CGM.getTarget().getTriple().getArch() ==
- llvm::Triple::aarch64);
- return true;
- } else if (!RD->isPOD()) {
- // If it's a free function, non-POD types are returned indirectly.
+ if (isIndirectReturn || !isSimple || isInstanceMethod) {
+ CharUnits Align = CGM.getContext().getTypeAlignInChars(FI.getReturnType());
FI.getReturnInfo() = ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
+ FI.getReturnInfo().setSRetAfterThis(isInstanceMethod);
+
+ FI.getReturnInfo().setInReg(isAArch64 &&
+ !(isSimple && IsSizeGreaterThan128(RD)));
- // aarch64-windows requires that non-POD, non-instance returns use X0 for
- // the return address. So for aarch64-windows we do not mark the return as
- // SRet.
- FI.getReturnInfo().setSuppressSRet(CGM.getTarget().getTriple().getArch() ==
- llvm::Triple::aarch64);
return true;
}
@@ -1233,16 +1255,17 @@ void MicrosoftCXXABI::EmitVBPtrStores(CodeGenFunction &CGF,
}
CGCXXABI::AddedStructorArgs
-MicrosoftCXXABI::buildStructorSignature(const CXXMethodDecl *MD, StructorType T,
+MicrosoftCXXABI::buildStructorSignature(GlobalDecl GD,
SmallVectorImpl<CanQualType> &ArgTys) {
AddedStructorArgs Added;
// TODO: 'for base' flag
- if (T == StructorType::Deleting) {
+ if (isa<CXXDestructorDecl>(GD.getDecl()) &&
+ GD.getDtorType() == Dtor_Deleting) {
// The scalar deleting destructor takes an implicit int parameter.
ArgTys.push_back(getContext().IntTy);
++Added.Suffix;
}
- auto *CD = dyn_cast<CXXConstructorDecl>(MD);
+ auto *CD = dyn_cast<CXXConstructorDecl>(GD.getDecl());
if (!CD)
return Added;
@@ -1289,7 +1312,7 @@ llvm::GlobalValue::LinkageTypes MicrosoftCXXABI::getCXXDestructorLinkage(
// The base destructor most closely tracks the user-declared constructor, so
// we delegate back to the normal declarator case.
return CGM.getLLVMLinkageForDeclarator(Dtor, Linkage,
- /*isConstantVariable=*/false);
+ /*IsConstantVariable=*/false);
case Dtor_Complete:
// The complete destructor is like an inline function, but it may be
// imported and therefore must be exported as well. This requires changing
@@ -1545,16 +1568,16 @@ CGCXXABI::AddedStructorArgs MicrosoftCXXABI::addImplicitConstructorArgs(
void MicrosoftCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
const CXXDestructorDecl *DD,
CXXDtorType Type, bool ForVirtualBase,
- bool Delegating, Address This) {
+ bool Delegating, Address This,
+ QualType ThisTy) {
// Use the base destructor variant in place of the complete destructor variant
// if the class has no virtual bases. This effectively implements some of the
// -mconstructor-aliases optimization, but as part of the MS C++ ABI.
if (Type == Dtor_Complete && DD->getParent()->getNumVBases() == 0)
Type = Dtor_Base;
- CGCallee Callee =
- CGCallee::forDirect(CGM.getAddrOfCXXStructor(DD, getFromDtorType(Type)),
- GlobalDecl(DD, Type));
+ GlobalDecl GD(DD, Type);
+ CGCallee Callee = CGCallee::forDirect(CGM.getAddrOfCXXStructor(GD), GD);
if (DD->isVirtual()) {
assert(Type != CXXDtorType::Dtor_Deleting &&
@@ -1568,10 +1591,9 @@ void MicrosoftCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
BaseDtorEndBB = EmitDtorCompleteObjectHandler(CGF);
}
- CGF.EmitCXXDestructorCall(DD, Callee, This.getPointer(),
+ CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy,
/*ImplicitParam=*/nullptr,
- /*ImplicitParamTy=*/QualType(), nullptr,
- getFromDtorType(Type));
+ /*ImplicitParamTy=*/QualType(), nullptr);
if (BaseDtorEndBB) {
// Complete object handler should continue to be the remaining
CGF.Builder.CreateBr(BaseDtorEndBB);
@@ -1878,15 +1900,18 @@ CGCallee MicrosoftCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
llvm::Value *MicrosoftCXXABI::EmitVirtualDestructorCall(
CodeGenFunction &CGF, const CXXDestructorDecl *Dtor, CXXDtorType DtorType,
- Address This, const CXXMemberCallExpr *CE) {
+ Address This, DeleteOrMemberCallExpr E) {
+ auto *CE = E.dyn_cast<const CXXMemberCallExpr *>();
+ auto *D = E.dyn_cast<const CXXDeleteExpr *>();
+ assert((CE != nullptr) ^ (D != nullptr));
assert(CE == nullptr || CE->arg_begin() == CE->arg_end());
assert(DtorType == Dtor_Deleting || DtorType == Dtor_Complete);
// We have only one destructor in the vftable but can get both behaviors
// by passing an implicit int parameter.
GlobalDecl GD(Dtor, Dtor_Deleting);
- const CGFunctionInfo *FInfo = &CGM.getTypes().arrangeCXXStructorDeclaration(
- Dtor, StructorType::Deleting);
+ const CGFunctionInfo *FInfo =
+ &CGM.getTypes().arrangeCXXStructorDeclaration(GD);
llvm::FunctionType *Ty = CGF.CGM.getTypes().GetFunctionType(*FInfo);
CGCallee Callee = CGCallee::forVirtual(CE, GD, This, Ty);
@@ -1895,10 +1920,15 @@ llvm::Value *MicrosoftCXXABI::EmitVirtualDestructorCall(
llvm::IntegerType::getInt32Ty(CGF.getLLVMContext()),
DtorType == Dtor_Deleting);
+ QualType ThisTy;
+ if (CE)
+ ThisTy = CE->getImplicitObjectArgument()->getType()->getPointeeType();
+ else
+ ThisTy = D->getDestroyedType();
+
This = adjustThisArgumentForVirtualFunctionCall(CGF, GD, This, true);
- RValue RV =
- CGF.EmitCXXDestructorCall(Dtor, Callee, This.getPointer(), ImplicitParam,
- Context.IntTy, CE, StructorType::Deleting);
+ RValue RV = CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy,
+ ImplicitParam, Context.IntTy, CE);
return RV.getScalarVal();
}
@@ -1996,7 +2026,7 @@ MicrosoftCXXABI::EmitVirtualMemPtrThunk(const CXXMethodDecl *MD,
llvm::Value *Callee =
CGF.Builder.CreateAlignedLoad(VFuncPtr, CGF.getPointerAlign());
- CGF.EmitMustTailThunk(MD, getThisValue(CGF), Callee);
+ CGF.EmitMustTailThunk(MD, getThisValue(CGF), {ThunkTy, Callee});
return ThunkFn;
}
@@ -2222,25 +2252,26 @@ Address MicrosoftCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
}
static void emitGlobalDtorWithTLRegDtor(CodeGenFunction &CGF, const VarDecl &VD,
- llvm::Constant *Dtor,
+ llvm::FunctionCallee Dtor,
llvm::Constant *Addr) {
// Create a function which calls the destructor.
llvm::Constant *DtorStub = CGF.createAtExitStub(VD, Dtor, Addr);
// extern "C" int __tlregdtor(void (*f)(void));
llvm::FunctionType *TLRegDtorTy = llvm::FunctionType::get(
- CGF.IntTy, DtorStub->getType(), /*IsVarArg=*/false);
+ CGF.IntTy, DtorStub->getType(), /*isVarArg=*/false);
- llvm::Constant *TLRegDtor = CGF.CGM.CreateRuntimeFunction(
+ llvm::FunctionCallee TLRegDtor = CGF.CGM.CreateRuntimeFunction(
TLRegDtorTy, "__tlregdtor", llvm::AttributeList(), /*Local=*/true);
- if (llvm::Function *TLRegDtorFn = dyn_cast<llvm::Function>(TLRegDtor))
+ if (llvm::Function *TLRegDtorFn =
+ dyn_cast<llvm::Function>(TLRegDtor.getCallee()))
TLRegDtorFn->setDoesNotThrow();
CGF.EmitNounwindRuntimeCall(TLRegDtor, DtorStub);
}
void MicrosoftCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
- llvm::Constant *Dtor,
+ llvm::FunctionCallee Dtor,
llvm::Constant *Addr) {
if (D.isNoDestroy(CGM.getContext()))
return;
@@ -2269,7 +2300,7 @@ void MicrosoftCXXABI::EmitThreadLocalInitFuncs(
// pointers at start-up time and, eventually, at thread-creation time.
auto AddToXDU = [&CGM](llvm::Function *InitFunc) {
llvm::GlobalVariable *InitFuncPtr = new llvm::GlobalVariable(
- CGM.getModule(), InitFunc->getType(), /*IsConstant=*/true,
+ CGM.getModule(), InitFunc->getType(), /*isConstant=*/true,
llvm::GlobalVariable::InternalLinkage, InitFunc,
Twine(InitFunc->getName(), "$initializer$"));
InitFuncPtr->setSection(".CRT$XDU");
@@ -2318,14 +2349,14 @@ static ConstantAddress getInitThreadEpochPtr(CodeGenModule &CGM) {
return ConstantAddress(GV, Align);
auto *GV = new llvm::GlobalVariable(
CGM.getModule(), CGM.IntTy,
- /*Constant=*/false, llvm::GlobalVariable::ExternalLinkage,
+ /*isConstant=*/false, llvm::GlobalVariable::ExternalLinkage,
/*Initializer=*/nullptr, VarName,
/*InsertBefore=*/nullptr, llvm::GlobalVariable::GeneralDynamicTLSModel);
GV->setAlignment(Align.getQuantity());
return ConstantAddress(GV, Align);
}
-static llvm::Constant *getInitThreadHeaderFn(CodeGenModule &CGM) {
+static llvm::FunctionCallee getInitThreadHeaderFn(CodeGenModule &CGM) {
llvm::FunctionType *FTy =
llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()),
CGM.IntTy->getPointerTo(), /*isVarArg=*/false);
@@ -2337,7 +2368,7 @@ static llvm::Constant *getInitThreadHeaderFn(CodeGenModule &CGM) {
/*Local=*/true);
}
-static llvm::Constant *getInitThreadFooterFn(CodeGenModule &CGM) {
+static llvm::FunctionCallee getInitThreadFooterFn(CodeGenModule &CGM) {
llvm::FunctionType *FTy =
llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()),
CGM.IntTy->getPointerTo(), /*isVarArg=*/false);
@@ -2349,7 +2380,7 @@ static llvm::Constant *getInitThreadFooterFn(CodeGenModule &CGM) {
/*Local=*/true);
}
-static llvm::Constant *getInitThreadAbortFn(CodeGenModule &CGM) {
+static llvm::FunctionCallee getInitThreadAbortFn(CodeGenModule &CGM) {
llvm::FunctionType *FTy =
llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()),
CGM.IntTy->getPointerTo(), /*isVarArg=*/false);
@@ -3378,7 +3409,7 @@ static llvm::GlobalVariable *getTypeInfoVTable(CodeGenModule &CGM) {
if (auto VTable = CGM.getModule().getNamedGlobal(MangledName))
return VTable;
return new llvm::GlobalVariable(CGM.getModule(), CGM.Int8PtrTy,
- /*Constant=*/true,
+ /*isConstant=*/true,
llvm::GlobalVariable::ExternalLinkage,
/*Initializer=*/nullptr, MangledName);
}
@@ -3558,7 +3589,7 @@ llvm::GlobalVariable *MSRTTIBuilder::getClassHierarchyDescriptor() {
// Forward-declare the class hierarchy descriptor
auto Type = ABI.getClassHierarchyDescriptorType();
- auto CHD = new llvm::GlobalVariable(Module, Type, /*Constant=*/true, Linkage,
+ auto CHD = new llvm::GlobalVariable(Module, Type, /*isConstant=*/true, Linkage,
/*Initializer=*/nullptr,
MangledName);
if (CHD->isWeakForLinker())
@@ -3597,7 +3628,7 @@ MSRTTIBuilder::getBaseClassArray(SmallVectorImpl<MSRTTIClass> &Classes) {
auto *ArrType = llvm::ArrayType::get(PtrType, Classes.size() + 1);
auto *BCA =
new llvm::GlobalVariable(Module, ArrType,
- /*Constant=*/true, Linkage,
+ /*isConstant=*/true, Linkage,
/*Initializer=*/nullptr, MangledName);
if (BCA->isWeakForLinker())
BCA->setComdat(CGM.getModule().getOrInsertComdat(BCA->getName()));
@@ -3639,7 +3670,7 @@ MSRTTIBuilder::getBaseClassDescriptor(const MSRTTIClass &Class) {
// Forward-declare the base class descriptor.
auto Type = ABI.getBaseClassDescriptorType();
auto BCD =
- new llvm::GlobalVariable(Module, Type, /*Constant=*/true, Linkage,
+ new llvm::GlobalVariable(Module, Type, /*isConstant=*/true, Linkage,
/*Initializer=*/nullptr, MangledName);
if (BCD->isWeakForLinker())
BCD->setComdat(CGM.getModule().getOrInsertComdat(BCD->getName()));
@@ -3685,7 +3716,7 @@ MSRTTIBuilder::getCompleteObjectLocator(const VPtrInfo &Info) {
// Forward-declare the complete object locator.
llvm::StructType *Type = ABI.getCompleteObjectLocatorType();
- auto COL = new llvm::GlobalVariable(Module, Type, /*Constant=*/true, Linkage,
+ auto COL = new llvm::GlobalVariable(Module, Type, /*isConstant=*/true, Linkage,
/*Initializer=*/nullptr, MangledName);
// Initialize the CompleteObjectLocator.
@@ -3800,7 +3831,7 @@ llvm::Constant *MicrosoftCXXABI::getAddrOfRTTIDescriptor(QualType Type) {
llvm::StructType *TypeDescriptorType =
getTypeDescriptorType(TypeInfoString);
auto *Var = new llvm::GlobalVariable(
- CGM.getModule(), TypeDescriptorType, /*Constant=*/false,
+ CGM.getModule(), TypeDescriptorType, /*isConstant=*/false,
getLinkageForRTTI(Type),
llvm::ConstantStruct::get(TypeDescriptorType, Fields),
MangledName);
@@ -3816,44 +3847,36 @@ MicrosoftCXXABI::getMSCompleteObjectLocator(const CXXRecordDecl *RD,
return MSRTTIBuilder(*this, RD).getCompleteObjectLocator(Info);
}
-static void emitCXXConstructor(CodeGenModule &CGM,
- const CXXConstructorDecl *ctor,
- StructorType ctorType) {
- // There are no constructor variants, always emit the complete destructor.
- llvm::Function *Fn = CGM.codegenCXXStructor(ctor, StructorType::Complete);
- CGM.maybeSetTrivialComdat(*ctor, *Fn);
-}
+void MicrosoftCXXABI::emitCXXStructor(GlobalDecl GD) {
+ if (auto *ctor = dyn_cast<CXXConstructorDecl>(GD.getDecl())) {
+ // There are no constructor variants, always emit the complete destructor.
+ llvm::Function *Fn =
+ CGM.codegenCXXStructor(GD.getWithCtorType(Ctor_Complete));
+ CGM.maybeSetTrivialComdat(*ctor, *Fn);
+ return;
+ }
+
+ auto *dtor = cast<CXXDestructorDecl>(GD.getDecl());
-static void emitCXXDestructor(CodeGenModule &CGM, const CXXDestructorDecl *dtor,
- StructorType dtorType) {
// Emit the base destructor if the base and complete (vbase) destructors are
// equivalent. This effectively implements -mconstructor-aliases as part of
// the ABI.
- if (dtorType == StructorType::Complete &&
+ if (GD.getDtorType() == Dtor_Complete &&
dtor->getParent()->getNumVBases() == 0)
- dtorType = StructorType::Base;
+ GD = GD.getWithDtorType(Dtor_Base);
// The base destructor is equivalent to the base destructor of its
// base class if there is exactly one non-virtual base class with a
// non-trivial destructor, there are no fields with a non-trivial
// destructor, and the body of the destructor is trivial.
- if (dtorType == StructorType::Base && !CGM.TryEmitBaseDestructorAsAlias(dtor))
+ if (GD.getDtorType() == Dtor_Base && !CGM.TryEmitBaseDestructorAsAlias(dtor))
return;
- llvm::Function *Fn = CGM.codegenCXXStructor(dtor, dtorType);
+ llvm::Function *Fn = CGM.codegenCXXStructor(GD);
if (Fn->isWeakForLinker())
Fn->setComdat(CGM.getModule().getOrInsertComdat(Fn->getName()));
}
-void MicrosoftCXXABI::emitCXXStructor(const CXXMethodDecl *MD,
- StructorType Type) {
- if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
- emitCXXConstructor(CGM, CD, Type);
- return;
- }
- emitCXXDestructor(CGM, cast<CXXDestructorDecl>(MD), Type);
-}
-
llvm::Function *
MicrosoftCXXABI::getAddrOfCXXCtorClosure(const CXXConstructorDecl *CD,
CXXCtorType CT) {
@@ -3955,7 +3978,7 @@ MicrosoftCXXABI::getAddrOfCXXCtorClosure(const CXXConstructorDecl *CD,
/*Delegating=*/false, Args);
// Call the destructor with our arguments.
llvm::Constant *CalleePtr =
- CGM.getAddrOfCXXStructor(CD, StructorType::Complete);
+ CGM.getAddrOfCXXStructor(GlobalDecl(CD, Ctor_Complete));
CGCallee Callee =
CGCallee::forDirect(CalleePtr, GlobalDecl(CD, Ctor_Complete));
const CGFunctionInfo &CalleeInfo = CGM.getTypes().arrangeCXXConstructorCall(
@@ -4006,7 +4029,7 @@ llvm::Constant *MicrosoftCXXABI::getCatchableType(QualType T,
if (CT == Ctor_CopyingClosure)
CopyCtor = getAddrOfCXXCtorClosure(CD, Ctor_CopyingClosure);
else
- CopyCtor = CGM.getAddrOfCXXStructor(CD, StructorType::Complete);
+ CopyCtor = CGM.getAddrOfCXXStructor(GlobalDecl(CD, Ctor_Complete));
CopyCtor = llvm::ConstantExpr::getBitCast(CopyCtor, CGM.Int8PtrTy);
} else {
@@ -4047,7 +4070,7 @@ llvm::Constant *MicrosoftCXXABI::getCatchableType(QualType T,
};
llvm::StructType *CTType = getCatchableTypeType();
auto *GV = new llvm::GlobalVariable(
- CGM.getModule(), CTType, /*Constant=*/true, getLinkageForRTTI(T),
+ CGM.getModule(), CTType, /*isConstant=*/true, getLinkageForRTTI(T),
llvm::ConstantStruct::get(CTType, Fields), MangledName);
GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
GV->setSection(".xdata");
@@ -4165,7 +4188,7 @@ llvm::GlobalVariable *MicrosoftCXXABI::getCatchableTypeArray(QualType T) {
getMangleContext().mangleCXXCatchableTypeArray(T, NumEntries, Out);
}
CTA = new llvm::GlobalVariable(
- CGM.getModule(), CTAType, /*Constant=*/true, getLinkageForRTTI(T),
+ CGM.getModule(), CTAType, /*isConstant=*/true, getLinkageForRTTI(T),
llvm::ConstantStruct::get(CTAType, Fields), MangledName);
CTA->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
CTA->setSection(".xdata");
@@ -4219,7 +4242,7 @@ llvm::GlobalVariable *MicrosoftCXXABI::getThrowInfo(QualType T) {
if (CXXDestructorDecl *DtorD = RD->getDestructor())
if (!DtorD->isTrivial())
CleanupFn = llvm::ConstantExpr::getBitCast(
- CGM.getAddrOfCXXStructor(DtorD, StructorType::Complete),
+ CGM.getAddrOfCXXStructor(GlobalDecl(DtorD, Dtor_Complete)),
CGM.Int8PtrTy);
// This is unused as far as we can tell, initialize it to null.
llvm::Constant *ForwardCompat =
@@ -4234,7 +4257,7 @@ llvm::GlobalVariable *MicrosoftCXXABI::getThrowInfo(QualType T) {
PointerToCatchableTypes // CatchableTypeArray
};
auto *GV = new llvm::GlobalVariable(
- CGM.getModule(), TIType, /*Constant=*/true, getLinkageForRTTI(T),
+ CGM.getModule(), TIType, /*isConstant=*/true, getLinkageForRTTI(T),
llvm::ConstantStruct::get(TIType, Fields), StringRef(MangledName));
GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
GV->setSection(".xdata");
diff --git a/lib/CodeGen/ModuleBuilder.cpp b/lib/CodeGen/ModuleBuilder.cpp
index c0a37698e762..3b4e06045a37 100644
--- a/lib/CodeGen/ModuleBuilder.cpp
+++ b/lib/CodeGen/ModuleBuilder.cpp
@@ -1,9 +1,8 @@
//===--- ModuleBuilder.cpp - Emit LLVM Code from ASTs ---------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/CodeGen/ObjectFilePCHContainerOperations.cpp b/lib/CodeGen/ObjectFilePCHContainerOperations.cpp
index 6f00c836f93d..15a2ab99fdac 100644
--- a/lib/CodeGen/ObjectFilePCHContainerOperations.cpp
+++ b/lib/CodeGen/ObjectFilePCHContainerOperations.cpp
@@ -1,9 +1,8 @@
//===--- ObjectFilePCHContainerOperations.cpp -----------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -22,7 +21,7 @@
#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/Preprocessor.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/Bitcode/BitstreamReader.h"
+#include "llvm/Bitstream/BitstreamReader.h"
#include "llvm/DebugInfo/DWARF/DWARFContext.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
@@ -338,8 +337,14 @@ ObjectFilePCHContainerReader::ExtractPCH(llvm::MemoryBufferRef Buffer) const {
StringRef Name;
Section.getName(Name);
if ((!IsCOFF && Name == "__clangast") || (IsCOFF && Name == "clangast")) {
- Section.getContents(PCH);
- return PCH;
+ if (Expected<StringRef> E = Section.getContents())
+ return *E;
+ else {
+ handleAllErrors(E.takeError(), [&](const llvm::ErrorInfoBase &EIB) {
+ EIB.log(llvm::errs());
+ });
+ return "";
+ }
}
}
}
diff --git a/lib/CodeGen/PatternInit.cpp b/lib/CodeGen/PatternInit.cpp
new file mode 100644
index 000000000000..3410c7f21533
--- /dev/null
+++ b/lib/CodeGen/PatternInit.cpp
@@ -0,0 +1,85 @@
+//===--- PatternInit.cpp - Pattern Initialization -------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "PatternInit.h"
+#include "CodeGenModule.h"
+#include "llvm/IR/Constant.h"
+#include "llvm/IR/Type.h"
+
+llvm::Constant *clang::CodeGen::initializationPatternFor(CodeGenModule &CGM,
+ llvm::Type *Ty) {
+ // The following value is a guaranteed unmappable pointer value and has a
+ // repeated byte-pattern which makes it easier to synthesize. We use it for
+ // pointers as well as integers so that aggregates are likely to be
+ // initialized with this repeated value.
+ // For 32-bit platforms it's a bit trickier because, across systems, only the
+ // zero page can reasonably be expected to be unmapped. We use max 0xFFFFFFFF
+ // assuming that memory access will overlap into zero page.
+ const uint64_t IntValue =
+ CGM.getContext().getTargetInfo().getMaxPointerWidth() < 64
+ ? 0xFFFFFFFFFFFFFFFFull
+ : 0xAAAAAAAAAAAAAAAAull;
+ // Floating-point values are initialized as NaNs because they propagate. Using
+ // a repeated byte pattern means that it will be easier to initialize
+ // all-floating-point aggregates and arrays with memset. Further, aggregates
+ // which mix integral and a few floats might also initialize with memset
+ // followed by a handful of stores for the floats. Using fairly unique NaNs
+ // also means they'll be easier to distinguish in a crash.
+ constexpr bool NegativeNaN = true;
+ constexpr uint64_t NaNPayload = 0xFFFFFFFFFFFFFFFFull;
+ if (Ty->isIntOrIntVectorTy()) {
+ unsigned BitWidth = cast<llvm::IntegerType>(
+ Ty->isVectorTy() ? Ty->getVectorElementType() : Ty)
+ ->getBitWidth();
+ if (BitWidth <= 64)
+ return llvm::ConstantInt::get(Ty, IntValue);
+ return llvm::ConstantInt::get(
+ Ty, llvm::APInt::getSplat(BitWidth, llvm::APInt(64, IntValue)));
+ }
+ if (Ty->isPtrOrPtrVectorTy()) {
+ auto *PtrTy = cast<llvm::PointerType>(
+ Ty->isVectorTy() ? Ty->getVectorElementType() : Ty);
+ unsigned PtrWidth = CGM.getContext().getTargetInfo().getPointerWidth(
+ PtrTy->getAddressSpace());
+ if (PtrWidth > 64)
+ llvm_unreachable("pattern initialization of unsupported pointer width");
+ llvm::Type *IntTy = llvm::IntegerType::get(CGM.getLLVMContext(), PtrWidth);
+ auto *Int = llvm::ConstantInt::get(IntTy, IntValue);
+ return llvm::ConstantExpr::getIntToPtr(Int, PtrTy);
+ }
+ if (Ty->isFPOrFPVectorTy()) {
+ unsigned BitWidth = llvm::APFloat::semanticsSizeInBits(
+ (Ty->isVectorTy() ? Ty->getVectorElementType() : Ty)
+ ->getFltSemantics());
+ llvm::APInt Payload(64, NaNPayload);
+ if (BitWidth >= 64)
+ Payload = llvm::APInt::getSplat(BitWidth, Payload);
+ return llvm::ConstantFP::getQNaN(Ty, NegativeNaN, &Payload);
+ }
+ if (Ty->isArrayTy()) {
+ // Note: this doesn't touch tail padding (at the end of an object, before
+ // the next array object). It is instead handled by replaceUndef.
+ auto *ArrTy = cast<llvm::ArrayType>(Ty);
+ llvm::SmallVector<llvm::Constant *, 8> Element(
+ ArrTy->getNumElements(),
+ initializationPatternFor(CGM, ArrTy->getElementType()));
+ return llvm::ConstantArray::get(ArrTy, Element);
+ }
+
+ // Note: this doesn't touch struct padding. It will initialize as much union
+ // padding as is required for the largest type in the union. Padding is
+ // instead handled by replaceUndef. Stores to structs with volatile members
+ // don't have a volatile qualifier when initialized according to C++. This is
+ // fine because stack-based volatiles don't really have volatile semantics
+ // anyways, and the initialization shouldn't be observable.
+ auto *StructTy = cast<llvm::StructType>(Ty);
+ llvm::SmallVector<llvm::Constant *, 8> Struct(StructTy->getNumElements());
+ for (unsigned El = 0; El != Struct.size(); ++El)
+ Struct[El] = initializationPatternFor(CGM, StructTy->getElementType(El));
+ return llvm::ConstantStruct::get(StructTy, Struct);
+}
diff --git a/lib/CodeGen/PatternInit.h b/lib/CodeGen/PatternInit.h
new file mode 100644
index 000000000000..f117dde9ac66
--- /dev/null
+++ b/lib/CodeGen/PatternInit.h
@@ -0,0 +1,27 @@
+//===- PatternInit - Pattern initialization ---------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_CODEGEN_PATTERNINIT_H
+#define LLVM_CLANG_LIB_CODEGEN_PATTERNINIT_H
+
+namespace llvm {
+class Constant;
+class Type;
+} // namespace llvm
+
+namespace clang {
+namespace CodeGen {
+
+class CodeGenModule;
+
+llvm::Constant *initializationPatternFor(CodeGenModule &, llvm::Type *);
+
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/lib/CodeGen/SanitizerMetadata.cpp b/lib/CodeGen/SanitizerMetadata.cpp
index 23cf9e490828..ebc9cd5529bc 100644
--- a/lib/CodeGen/SanitizerMetadata.cpp
+++ b/lib/CodeGen/SanitizerMetadata.cpp
@@ -1,9 +1,8 @@
//===--- SanitizerMetadata.cpp - Blacklist for sanitizers -----------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -21,14 +20,17 @@ using namespace CodeGen;
SanitizerMetadata::SanitizerMetadata(CodeGenModule &CGM) : CGM(CGM) {}
+static bool isAsanHwasanOrMemTag(const SanitizerSet& SS) {
+ return SS.hasOneOf(SanitizerKind::Address | SanitizerKind::KernelAddress |
+ SanitizerKind::HWAddress | SanitizerKind::KernelHWAddress |
+ SanitizerKind::MemTag);
+}
+
void SanitizerMetadata::reportGlobalToASan(llvm::GlobalVariable *GV,
SourceLocation Loc, StringRef Name,
QualType Ty, bool IsDynInit,
bool IsBlacklisted) {
- if (!CGM.getLangOpts().Sanitize.hasOneOf(SanitizerKind::Address |
- SanitizerKind::KernelAddress |
- SanitizerKind::HWAddress |
- SanitizerKind::KernelHWAddress))
+ if (!isAsanHwasanOrMemTag(CGM.getLangOpts().Sanitize))
return;
IsDynInit &= !CGM.isInSanitizerBlacklist(GV, Loc, Ty, "init");
IsBlacklisted |= CGM.isInSanitizerBlacklist(GV, Loc, Ty);
@@ -59,10 +61,7 @@ void SanitizerMetadata::reportGlobalToASan(llvm::GlobalVariable *GV,
void SanitizerMetadata::reportGlobalToASan(llvm::GlobalVariable *GV,
const VarDecl &D, bool IsDynInit) {
- if (!CGM.getLangOpts().Sanitize.hasOneOf(SanitizerKind::Address |
- SanitizerKind::KernelAddress |
- SanitizerKind::HWAddress |
- SanitizerKind::KernelHWAddress))
+ if (!isAsanHwasanOrMemTag(CGM.getLangOpts().Sanitize))
return;
std::string QualName;
llvm::raw_string_ostream OS(QualName);
@@ -79,10 +78,7 @@ void SanitizerMetadata::reportGlobalToASan(llvm::GlobalVariable *GV,
void SanitizerMetadata::disableSanitizerForGlobal(llvm::GlobalVariable *GV) {
// For now, just make sure the global is not modified by the ASan
// instrumentation.
- if (CGM.getLangOpts().Sanitize.hasOneOf(SanitizerKind::Address |
- SanitizerKind::KernelAddress |
- SanitizerKind::HWAddress |
- SanitizerKind::KernelHWAddress))
+ if (isAsanHwasanOrMemTag(CGM.getLangOpts().Sanitize))
reportGlobalToASan(GV, SourceLocation(), "", QualType(), false, true);
}
diff --git a/lib/CodeGen/SanitizerMetadata.h b/lib/CodeGen/SanitizerMetadata.h
index 166f0e6c9b58..7ffac4360d9c 100644
--- a/lib/CodeGen/SanitizerMetadata.h
+++ b/lib/CodeGen/SanitizerMetadata.h
@@ -1,9 +1,8 @@
//===--- SanitizerMetadata.h - Metadata for sanitizers ----------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/CodeGen/SwiftCallingConv.cpp b/lib/CodeGen/SwiftCallingConv.cpp
index 75a0fa5ce189..8bce93b71c0c 100644
--- a/lib/CodeGen/SwiftCallingConv.cpp
+++ b/lib/CodeGen/SwiftCallingConv.cpp
@@ -1,9 +1,8 @@
//===--- SwiftCallingConv.cpp - Lowering for the Swift calling convention -===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/CodeGen/TargetInfo.cpp b/lib/CodeGen/TargetInfo.cpp
index 89ec73670a73..5da988fb8a3c 100644
--- a/lib/CodeGen/TargetInfo.cpp
+++ b/lib/CodeGen/TargetInfo.cpp
@@ -1,9 +1,8 @@
//===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -310,10 +309,9 @@ static Address emitVoidPtrDirectVAArg(CodeGenFunction &CGF,
// Advance the pointer past the argument, then store that back.
CharUnits FullDirectSize = DirectSize.alignTo(SlotSize);
- llvm::Value *NextPtr =
- CGF.Builder.CreateConstInBoundsByteGEP(Addr.getPointer(), FullDirectSize,
- "argp.next");
- CGF.Builder.CreateStore(NextPtr, VAListAddr);
+ Address NextPtr =
+ CGF.Builder.CreateConstInBoundsByteGEP(Addr, FullDirectSize, "argp.next");
+ CGF.Builder.CreateStore(NextPtr.getPointer(), VAListAddr);
// If the argument is smaller than a slot, and this is a big-endian
// target, the argument will be right-adjusted in its slot.
@@ -451,7 +449,9 @@ llvm::Value *TargetCodeGenInfo::performAddrSpaceCast(
// space, an address space conversion may end up as a bitcast.
if (auto *C = dyn_cast<llvm::Constant>(Src))
return performAddrSpaceCast(CGF.CGM, C, SrcAddr, DestAddr, DestTy);
- return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DestTy);
+ // Try to preserve the source's name to make IR more readable.
+ return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ Src, DestTy, Src->hasName() ? Src->getName() + ".ascast" : "");
}
llvm::Constant *
@@ -464,8 +464,11 @@ TargetCodeGenInfo::performAddrSpaceCast(CodeGenModule &CGM, llvm::Constant *Src,
}
llvm::SyncScope::ID
-TargetCodeGenInfo::getLLVMSyncScopeID(SyncScope S, llvm::LLVMContext &C) const {
- return C.getOrInsertSyncScopeID(""); /* default sync scope */
+TargetCodeGenInfo::getLLVMSyncScopeID(const LangOptions &LangOpts,
+ SyncScope Scope,
+ llvm::AtomicOrdering Ordering,
+ llvm::LLVMContext &Ctx) const {
+ return Ctx.getOrInsertSyncScopeID(""); /* default sync scope */
}
static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays);
@@ -761,6 +764,22 @@ public:
void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &CGM) const override {
+ TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
+ if (const auto *FD = dyn_cast_or_null<FunctionDecl>(D)) {
+ if (const auto *Attr = FD->getAttr<WebAssemblyImportModuleAttr>()) {
+ llvm::Function *Fn = cast<llvm::Function>(GV);
+ llvm::AttrBuilder B;
+ B.addAttribute("wasm-import-module", Attr->getImportModule());
+ Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
+ }
+ if (const auto *Attr = FD->getAttr<WebAssemblyImportNameAttr>()) {
+ llvm::Function *Fn = cast<llvm::Function>(GV);
+ llvm::AttrBuilder B;
+ B.addAttribute("wasm-import-name", Attr->getImportName());
+ Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
+ }
+ }
+
if (auto *FD = dyn_cast_or_null<FunctionDecl>(D)) {
llvm::Function *Fn = cast<llvm::Function>(GV);
if (!FD->doesThisDeclarationHaveABody() && !FD->hasPrototype())
@@ -814,7 +833,7 @@ ABIArgInfo WebAssemblyABIInfo::classifyReturnType(QualType RetTy) const {
Address WebAssemblyABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
QualType Ty) const {
- return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect=*/ false,
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*IsIndirect=*/ false,
getContext().getTypeInfoInChars(Ty),
CharUnits::fromQuantity(4),
/*AllowHigherAlign=*/ true);
@@ -2205,8 +2224,8 @@ public:
/// WinX86_64ABIInfo - The Windows X86_64 ABI information.
class WinX86_64ABIInfo : public SwiftABIInfo {
public:
- WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT)
- : SwiftABIInfo(CGT),
+ WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel)
+ : SwiftABIInfo(CGT), AVXLevel(AVXLevel),
IsMingw64(getTarget().getTriple().isWindowsGNUEnvironment()) {}
void computeInfo(CGFunctionInfo &FI) const override;
@@ -2242,7 +2261,9 @@ private:
void computeVectorCallArgs(CGFunctionInfo &FI, unsigned FreeSSERegs,
bool IsVectorCall, bool IsRegCall) const;
- bool IsMingw64;
+ X86AVXABILevel AVXLevel;
+
+ bool IsMingw64;
};
class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {
@@ -2254,6 +2275,12 @@ public:
return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo());
}
+ /// Disable tail call on x86-64. The epilogue code before the tail jump blocks
+ /// the autoreleaseRV/retainRV optimization.
+ bool shouldSuppressTailCallsOfRetainAutoreleasedReturnValue() const override {
+ return true;
+ }
+
int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
return 7;
}
@@ -2325,22 +2352,6 @@ public:
}
};
-class PS4TargetCodeGenInfo : public X86_64TargetCodeGenInfo {
-public:
- PS4TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel)
- : X86_64TargetCodeGenInfo(CGT, AVXLevel) {}
-
- void getDependentLibraryOption(llvm::StringRef Lib,
- llvm::SmallString<24> &Opt) const override {
- Opt = "\01";
- // If the argument contains a space, enclose it in quotes.
- if (Lib.find(" ") != StringRef::npos)
- Opt += "\"" + Lib.str() + "\"";
- else
- Opt += Lib;
- }
-};
-
static std::string qualifyWindowsLibrary(llvm::StringRef Lib) {
// If the argument does not end in .lib, automatically add the suffix.
// If the argument contains a space, enclose it in quotes.
@@ -2402,7 +2413,7 @@ class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo {
public:
WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
X86AVXABILevel AVXLevel)
- : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)) {}
+ : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT, AVXLevel)) {}
void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &CGM) const override;
@@ -3555,7 +3566,7 @@ void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
// using __attribute__((ms_abi)). In such case to correctly emit Win64
// compatible code delegate this call to WinX86_64ABIInfo::computeInfo.
if (CallingConv == llvm::CallingConv::Win64) {
- WinX86_64ABIInfo Win64ABIInfo(CGT);
+ WinX86_64ABIInfo Win64ABIInfo(CGT, AVXLevel);
Win64ABIInfo.computeInfo(FI);
return;
}
@@ -3627,8 +3638,8 @@ void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
static Address EmitX86_64VAArgFromMemory(CodeGenFunction &CGF,
Address VAListAddr, QualType Ty) {
- Address overflow_arg_area_p = CGF.Builder.CreateStructGEP(
- VAListAddr, 2, CharUnits::fromQuantity(8), "overflow_arg_area_p");
+ Address overflow_arg_area_p =
+ CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p");
llvm::Value *overflow_arg_area =
CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");
@@ -3699,18 +3710,14 @@ Address X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
Address gp_offset_p = Address::invalid(), fp_offset_p = Address::invalid();
llvm::Value *gp_offset = nullptr, *fp_offset = nullptr;
if (neededInt) {
- gp_offset_p =
- CGF.Builder.CreateStructGEP(VAListAddr, 0, CharUnits::Zero(),
- "gp_offset_p");
+ gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p");
gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8);
InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp");
}
if (neededSSE) {
- fp_offset_p =
- CGF.Builder.CreateStructGEP(VAListAddr, 1, CharUnits::fromQuantity(4),
- "fp_offset_p");
+ fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p");
fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
llvm::Value *FitsInFP =
llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16);
@@ -3739,8 +3746,7 @@ Address X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
// loads than necessary. Can we clean this up?
llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
llvm::Value *RegSaveArea = CGF.Builder.CreateLoad(
- CGF.Builder.CreateStructGEP(VAListAddr, 3, CharUnits::fromQuantity(16)),
- "reg_save_area");
+ CGF.Builder.CreateStructGEP(VAListAddr, 3), "reg_save_area");
Address RegAddr = Address::invalid();
if (neededInt && neededSSE) {
@@ -3766,16 +3772,13 @@ Address X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
llvm::Value *V = CGF.Builder.CreateAlignedLoad(
TyLo, CGF.Builder.CreateBitCast(RegLoAddr, PTyLo),
CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(TyLo)));
- CGF.Builder.CreateStore(V,
- CGF.Builder.CreateStructGEP(Tmp, 0, CharUnits::Zero()));
+ CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
// Copy the second element.
V = CGF.Builder.CreateAlignedLoad(
TyHi, CGF.Builder.CreateBitCast(RegHiAddr, PTyHi),
CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(TyHi)));
- CharUnits Offset = CharUnits::fromQuantity(
- getDataLayout().getStructLayout(ST)->getElementOffset(1));
- CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1, Offset));
+ CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy);
} else if (neededInt) {
@@ -3822,12 +3825,10 @@ Address X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST);
V = CGF.Builder.CreateLoad(CGF.Builder.CreateElementBitCast(
RegAddrLo, ST->getStructElementType(0)));
- CGF.Builder.CreateStore(V,
- CGF.Builder.CreateStructGEP(Tmp, 0, CharUnits::Zero()));
+ CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
V = CGF.Builder.CreateLoad(CGF.Builder.CreateElementBitCast(
RegAddrHi, ST->getStructElementType(1)));
- CGF.Builder.CreateStore(V,
- CGF.Builder.CreateStructGEP(Tmp, 1, CharUnits::fromQuantity(8)));
+ CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy);
}
@@ -4019,9 +4020,17 @@ void WinX86_64ABIInfo::computeVectorCallArgs(CGFunctionInfo &FI,
}
void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
- bool IsVectorCall =
- FI.getCallingConvention() == llvm::CallingConv::X86_VectorCall;
- bool IsRegCall = FI.getCallingConvention() == llvm::CallingConv::X86_RegCall;
+ const unsigned CC = FI.getCallingConvention();
+ bool IsVectorCall = CC == llvm::CallingConv::X86_VectorCall;
+ bool IsRegCall = CC == llvm::CallingConv::X86_RegCall;
+
+ // If __attribute__((sysv_abi)) is in use, use the SysV argument
+ // classification rules.
+ if (CC == llvm::CallingConv::X86_64_SysV) {
+ X86_64ABIInfo SysVABIInfo(CGT, AVXLevel);
+ SysVABIInfo.computeInfo(FI);
+ return;
+ }
unsigned FreeSSERegs = 0;
if (IsVectorCall) {
@@ -4169,9 +4178,9 @@ Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList,
// The calling convention either uses 1-2 GPRs or 1 FPR.
Address NumRegsAddr = Address::invalid();
if (isInt || IsSoftFloatABI) {
- NumRegsAddr = Builder.CreateStructGEP(VAList, 0, CharUnits::Zero(), "gpr");
+ NumRegsAddr = Builder.CreateStructGEP(VAList, 0, "gpr");
} else {
- NumRegsAddr = Builder.CreateStructGEP(VAList, 1, CharUnits::One(), "fpr");
+ NumRegsAddr = Builder.CreateStructGEP(VAList, 1, "fpr");
}
llvm::Value *NumRegs = Builder.CreateLoad(NumRegsAddr, "numUsedRegs");
@@ -4199,8 +4208,7 @@ Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList,
{
CGF.EmitBlock(UsingRegs);
- Address RegSaveAreaPtr =
- Builder.CreateStructGEP(VAList, 4, CharUnits::fromQuantity(8));
+ Address RegSaveAreaPtr = Builder.CreateStructGEP(VAList, 4);
RegAddr = Address(Builder.CreateLoad(RegSaveAreaPtr),
CharUnits::fromQuantity(8));
assert(RegAddr.getElementType() == CGF.Int8Ty);
@@ -4248,8 +4256,7 @@ Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList,
Size = CGF.getPointerSize();
}
- Address OverflowAreaAddr =
- Builder.CreateStructGEP(VAList, 3, CharUnits::fromQuantity(4));
+ Address OverflowAreaAddr = Builder.CreateStructGEP(VAList, 3);
Address OverflowArea(Builder.CreateLoad(OverflowAreaAddr, "argp.cur"),
OverflowAreaAlign);
// Round up address of argument to alignment
@@ -5283,31 +5290,24 @@ Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr,
llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack");
llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
- auto TyInfo = getContext().getTypeInfoInChars(Ty);
- CharUnits TyAlign = TyInfo.second;
+ CharUnits TySize = getContext().getTypeSizeInChars(Ty);
+ CharUnits TyAlign = getContext().getTypeUnadjustedAlignInChars(Ty);
Address reg_offs_p = Address::invalid();
llvm::Value *reg_offs = nullptr;
int reg_top_index;
- CharUnits reg_top_offset;
- int RegSize = IsIndirect ? 8 : TyInfo.first.getQuantity();
+ int RegSize = IsIndirect ? 8 : TySize.getQuantity();
if (!IsFPR) {
// 3 is the field number of __gr_offs
- reg_offs_p =
- CGF.Builder.CreateStructGEP(VAListAddr, 3, CharUnits::fromQuantity(24),
- "gr_offs_p");
+ reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 3, "gr_offs_p");
reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs");
reg_top_index = 1; // field number for __gr_top
- reg_top_offset = CharUnits::fromQuantity(8);
RegSize = llvm::alignTo(RegSize, 8);
} else {
// 4 is the field number of __vr_offs.
- reg_offs_p =
- CGF.Builder.CreateStructGEP(VAListAddr, 4, CharUnits::fromQuantity(28),
- "vr_offs_p");
+ reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 4, "vr_offs_p");
reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs");
reg_top_index = 2; // field number for __vr_top
- reg_top_offset = CharUnits::fromQuantity(16);
RegSize = 16 * NumRegs;
}
@@ -5369,8 +5369,8 @@ Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr,
CGF.EmitBlock(InRegBlock);
llvm::Value *reg_top = nullptr;
- Address reg_top_p = CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index,
- reg_top_offset, "reg_top_p");
+ Address reg_top_p =
+ CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index, "reg_top_p");
reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top");
Address BaseAddr(CGF.Builder.CreateInBoundsGEP(reg_top, reg_offs),
CharUnits::fromQuantity(IsFPR ? 16 : 8));
@@ -5410,8 +5410,7 @@ Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr,
CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, BaseOffset);
LoadAddr = CGF.Builder.CreateElementBitCast(LoadAddr, BaseTy);
- Address StoreAddr =
- CGF.Builder.CreateConstArrayGEP(Tmp, i, BaseTyInfo.first);
+ Address StoreAddr = CGF.Builder.CreateConstArrayGEP(Tmp, i);
llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr);
CGF.Builder.CreateStore(Elem, StoreAddr);
@@ -5425,8 +5424,8 @@ Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr,
CharUnits SlotSize = BaseAddr.getAlignment();
if (CGF.CGM.getDataLayout().isBigEndian() && !IsIndirect &&
(IsHFA || !isAggregateTypeForABI(Ty)) &&
- TyInfo.first < SlotSize) {
- CharUnits Offset = SlotSize - TyInfo.first;
+ TySize < SlotSize) {
+ CharUnits Offset = SlotSize - TySize;
BaseAddr = CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, Offset);
}
@@ -5440,8 +5439,7 @@ Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr,
//=======================================
CGF.EmitBlock(OnStackBlock);
- Address stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0,
- CharUnits::Zero(), "stack_p");
+ Address stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "stack_p");
llvm::Value *OnStackPtr = CGF.Builder.CreateLoad(stack_p, "stack");
// Again, stack arguments may need realignment. In this case both integer and
@@ -5469,7 +5467,7 @@ Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr,
if (IsIndirect)
StackSize = StackSlotSize;
else
- StackSize = TyInfo.first.alignTo(StackSlotSize);
+ StackSize = TySize.alignTo(StackSlotSize);
llvm::Value *StackSizeC = CGF.Builder.getSize(StackSize);
llvm::Value *NewStack =
@@ -5479,8 +5477,8 @@ Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr,
CGF.Builder.CreateStore(NewStack, stack_p);
if (CGF.CGM.getDataLayout().isBigEndian() && !isAggregateTypeForABI(Ty) &&
- TyInfo.first < StackSlotSize) {
- CharUnits Offset = StackSlotSize - TyInfo.first;
+ TySize < StackSlotSize) {
+ CharUnits Offset = StackSlotSize - TySize;
OnStackAddr = CGF.Builder.CreateConstInBoundsByteGEP(OnStackAddr, Offset);
}
@@ -5498,7 +5496,7 @@ Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr,
if (IsIndirect)
return Address(CGF.Builder.CreateLoad(ResAddr, "vaarg.addr"),
- TyInfo.second);
+ TyAlign);
return ResAddr;
}
@@ -5598,17 +5596,22 @@ public:
ABIKind getABIKind() const { return Kind; }
private:
- ABIArgInfo classifyReturnType(QualType RetTy, bool isVariadic) const;
- ABIArgInfo classifyArgumentType(QualType RetTy, bool isVariadic) const;
+ ABIArgInfo classifyReturnType(QualType RetTy, bool isVariadic,
+ unsigned functionCallConv) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy, bool isVariadic,
+ unsigned functionCallConv) const;
ABIArgInfo classifyHomogeneousAggregate(QualType Ty, const Type *Base,
uint64_t Members) const;
ABIArgInfo coerceIllegalVector(QualType Ty) const;
bool isIllegalVectorType(QualType Ty) const;
+ bool containsAnyFP16Vectors(QualType Ty) const;
bool isHomogeneousAggregateBaseType(QualType Ty) const override;
bool isHomogeneousAggregateSmallEnough(const Type *Ty,
uint64_t Members) const override;
+ bool isEffectivelyAAPCS_VFP(unsigned callConvention, bool acceptHalf) const;
+
void computeInfo(CGFunctionInfo &FI) const override;
Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
@@ -5729,11 +5732,13 @@ void WindowsARMTargetCodeGenInfo::setTargetAttributes(
void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
if (!::classifyReturnType(getCXXABI(), FI, *this))
- FI.getReturnInfo() =
- classifyReturnType(FI.getReturnType(), FI.isVariadic());
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), FI.isVariadic(),
+ FI.getCallingConvention());
for (auto &I : FI.arguments())
- I.info = classifyArgumentType(I.type, FI.isVariadic());
+ I.info = classifyArgumentType(I.type, FI.isVariadic(),
+ FI.getCallingConvention());
+
// Always honor user-specified calling convention.
if (FI.getCallingConvention() != llvm::CallingConv::C)
@@ -5799,9 +5804,7 @@ ABIArgInfo ARMABIInfo::classifyHomogeneousAggregate(QualType Ty,
// Base can be a floating-point or a vector.
if (const VectorType *VT = Base->getAs<VectorType>()) {
// FP16 vectors should be converted to integer vectors
- if (!getTarget().hasLegalHalfType() &&
- (VT->getElementType()->isFloat16Type() ||
- VT->getElementType()->isHalfType())) {
+ if (!getTarget().hasLegalHalfType() && containsAnyFP16Vectors(Ty)) {
uint64_t Size = getContext().getTypeSize(VT);
llvm::Type *NewVecTy = llvm::VectorType::get(
llvm::Type::getInt32Ty(getVMContext()), Size / 32);
@@ -5812,8 +5815,8 @@ ABIArgInfo ARMABIInfo::classifyHomogeneousAggregate(QualType Ty,
return ABIArgInfo::getDirect(nullptr, 0, nullptr, false);
}
-ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty,
- bool isVariadic) const {
+ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool isVariadic,
+ unsigned functionCallConv) const {
// 6.1.2.1 The following argument types are VFP CPRCs:
// A single-precision floating-point type (including promoted
// half-precision types); A double-precision floating-point type;
@@ -5821,7 +5824,9 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty,
// with a Base Type of a single- or double-precision floating-point type,
// 64-bit containerized vectors or 128-bit containerized vectors with one
// to four Elements.
- bool IsEffectivelyAAPCS_VFP = getABIKind() == AAPCS_VFP && !isVariadic;
+ // Variadic functions should always marshal to the base standard.
+ bool IsAAPCS_VFP =
+ !isVariadic && isEffectivelyAAPCS_VFP(functionCallConv, /* AAPCS16 */ false);
Ty = useFirstFieldIfTransparentUnion(Ty);
@@ -5834,7 +5839,7 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty,
// half type natively, and does not need to interwork with AAPCS code.
if ((Ty->isFloat16Type() || Ty->isHalfType()) &&
!getContext().getLangOpts().NativeHalfArgsAndReturns) {
- llvm::Type *ResType = IsEffectivelyAAPCS_VFP ?
+ llvm::Type *ResType = IsAAPCS_VFP ?
llvm::Type::getFloatTy(getVMContext()) :
llvm::Type::getInt32Ty(getVMContext());
return ABIArgInfo::getDirect(ResType);
@@ -5858,7 +5863,7 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty,
if (isEmptyRecord(getContext(), Ty, true))
return ABIArgInfo::getIgnore();
- if (IsEffectivelyAAPCS_VFP) {
+ if (IsAAPCS_VFP) {
// Homogeneous Aggregates need to be expanded when we can fit the aggregate
// into VFP registers.
const Type *Base = nullptr;
@@ -6015,10 +6020,12 @@ static bool isIntegerLikeType(QualType Ty, ASTContext &Context,
return true;
}
-ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
- bool isVariadic) const {
- bool IsEffectivelyAAPCS_VFP =
- (getABIKind() == AAPCS_VFP || getABIKind() == AAPCS16_VFP) && !isVariadic;
+ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy, bool isVariadic,
+ unsigned functionCallConv) const {
+
+ // Variadic functions should always marshal to the base standard.
+ bool IsAAPCS_VFP =
+ !isVariadic && isEffectivelyAAPCS_VFP(functionCallConv, /* AAPCS16 */ true);
if (RetTy->isVoidType())
return ABIArgInfo::getIgnore();
@@ -6039,7 +6046,7 @@ ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
// half type natively, and does not need to interwork with AAPCS code.
if ((RetTy->isFloat16Type() || RetTy->isHalfType()) &&
!getContext().getLangOpts().NativeHalfArgsAndReturns) {
- llvm::Type *ResType = IsEffectivelyAAPCS_VFP ?
+ llvm::Type *ResType = IsAAPCS_VFP ?
llvm::Type::getFloatTy(getVMContext()) :
llvm::Type::getInt32Ty(getVMContext());
return ABIArgInfo::getDirect(ResType);
@@ -6088,7 +6095,7 @@ ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
return ABIArgInfo::getIgnore();
// Check for homogeneous aggregates with AAPCS-VFP.
- if (IsEffectivelyAAPCS_VFP) {
+ if (IsAAPCS_VFP) {
const Type *Base = nullptr;
uint64_t Members = 0;
if (isHomogeneousAggregate(RetTy, Base, Members))
@@ -6158,6 +6165,37 @@ bool ARMABIInfo::isIllegalVectorType(QualType Ty) const {
return false;
}
+/// Return true if a type contains any 16-bit floating point vectors
+bool ARMABIInfo::containsAnyFP16Vectors(QualType Ty) const {
+ if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
+ uint64_t NElements = AT->getSize().getZExtValue();
+ if (NElements == 0)
+ return false;
+ return containsAnyFP16Vectors(AT->getElementType());
+ } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ const RecordDecl *RD = RT->getDecl();
+
+ // If this is a C++ record, check the bases first.
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
+ if (llvm::any_of(CXXRD->bases(), [this](const CXXBaseSpecifier &B) {
+ return containsAnyFP16Vectors(B.getType());
+ }))
+ return true;
+
+ if (llvm::any_of(RD->fields(), [this](FieldDecl *FD) {
+ return FD && containsAnyFP16Vectors(FD->getType());
+ }))
+ return true;
+
+ return false;
+ } else {
+ if (const VectorType *VT = Ty->getAs<VectorType>())
+ return (VT->getElementType()->isFloat16Type() ||
+ VT->getElementType()->isHalfType());
+ return false;
+ }
+}
+
bool ARMABIInfo::isLegalVectorTypeForSwift(CharUnits vectorSize,
llvm::Type *eltTy,
unsigned numElts) const {
@@ -6193,6 +6231,16 @@ bool ARMABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
return Members <= 4;
}
+bool ARMABIInfo::isEffectivelyAAPCS_VFP(unsigned callConvention,
+ bool acceptHalf) const {
+ // Give precedence to user-specified calling conventions.
+ if (callConvention != llvm::CallingConv::C)
+ return (callConvention == llvm::CallingConv::ARM_AAPCS_VFP);
+ else
+ return (getABIKind() == AAPCS_VFP) ||
+ (acceptHalf && (getABIKind() == AAPCS16_VFP));
+}
+
Address ARMABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
QualType Ty) const {
CharUnits SlotSize = CharUnits::fromQuantity(4);
@@ -6204,19 +6252,19 @@ Address ARMABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
return Addr;
}
- auto TyInfo = getContext().getTypeInfoInChars(Ty);
- CharUnits TyAlignForABI = TyInfo.second;
+ CharUnits TySize = getContext().getTypeSizeInChars(Ty);
+ CharUnits TyAlignForABI = getContext().getTypeUnadjustedAlignInChars(Ty);
// Use indirect if size of the illegal vector is bigger than 16 bytes.
bool IsIndirect = false;
const Type *Base = nullptr;
uint64_t Members = 0;
- if (TyInfo.first > CharUnits::fromQuantity(16) && isIllegalVectorType(Ty)) {
+ if (TySize > CharUnits::fromQuantity(16) && isIllegalVectorType(Ty)) {
IsIndirect = true;
// ARMv7k passes structs bigger than 16 bytes indirectly, in space
// allocated by the caller.
- } else if (TyInfo.first > CharUnits::fromQuantity(16) &&
+ } else if (TySize > CharUnits::fromQuantity(16) &&
getABIKind() == ARMABIInfo::AAPCS16_VFP &&
!isHomogeneousAggregate(Ty, Base, Members)) {
IsIndirect = true;
@@ -6236,8 +6284,8 @@ Address ARMABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
} else {
TyAlignForABI = CharUnits::fromQuantity(4);
}
- TyInfo.second = TyAlignForABI;
+ std::pair<CharUnits, CharUnits> TyInfo = { TySize, TyAlignForABI };
return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TyInfo,
SlotSize, /*AllowHigherAlign*/ true);
}
@@ -6275,10 +6323,58 @@ private:
static void addNVVMMetadata(llvm::Function *F, StringRef Name, int Operand);
};
+/// Checks if the type is unsupported directly by the current target.
+static bool isUnsupportedType(ASTContext &Context, QualType T) {
+ if (!Context.getTargetInfo().hasFloat16Type() && T->isFloat16Type())
+ return true;
+ if (!Context.getTargetInfo().hasFloat128Type() &&
+ (T->isFloat128Type() ||
+ (T->isRealFloatingType() && Context.getTypeSize(T) == 128)))
+ return true;
+ if (!Context.getTargetInfo().hasInt128Type() && T->isIntegerType() &&
+ Context.getTypeSize(T) > 64)
+ return true;
+ if (const auto *AT = T->getAsArrayTypeUnsafe())
+ return isUnsupportedType(Context, AT->getElementType());
+ const auto *RT = T->getAs<RecordType>();
+ if (!RT)
+ return false;
+ const RecordDecl *RD = RT->getDecl();
+
+ // If this is a C++ record, check the bases first.
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
+ for (const CXXBaseSpecifier &I : CXXRD->bases())
+ if (isUnsupportedType(Context, I.getType()))
+ return true;
+
+ for (const FieldDecl *I : RD->fields())
+ if (isUnsupportedType(Context, I->getType()))
+ return true;
+ return false;
+}
+
+/// Coerce the given type into an array with maximum allowed size of elements.
+static ABIArgInfo coerceToIntArrayWithLimit(QualType Ty, ASTContext &Context,
+ llvm::LLVMContext &LLVMContext,
+ unsigned MaxSize) {
+ // Alignment and Size are measured in bits.
+ const uint64_t Size = Context.getTypeSize(Ty);
+ const uint64_t Alignment = Context.getTypeAlign(Ty);
+ const unsigned Div = std::min<unsigned>(MaxSize, Alignment);
+ llvm::Type *IntType = llvm::Type::getIntNTy(LLVMContext, Div);
+ const uint64_t NumElements = (Size + Div - 1) / Div;
+ return ABIArgInfo::getDirect(llvm::ArrayType::get(IntType, NumElements));
+}
+
ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const {
if (RetTy->isVoidType())
return ABIArgInfo::getIgnore();
+ if (getContext().getLangOpts().OpenMP &&
+ getContext().getLangOpts().OpenMPIsDevice &&
+ isUnsupportedType(getContext(), RetTy))
+ return coerceToIntArrayWithLimit(RetTy, getContext(), getVMContext(), 64);
+
// note: this is different from default ABI
if (!RetTy->isScalarType())
return ABIArgInfo::getDirect();
@@ -6584,8 +6680,7 @@ Address SystemZABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
// Vector arguments are always passed in the high bits of a
// single (8 byte) or double (16 byte) stack slot.
Address OverflowArgAreaPtr =
- CGF.Builder.CreateStructGEP(VAListAddr, 2, CharUnits::fromQuantity(16),
- "overflow_arg_area_ptr");
+ CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_ptr");
Address OverflowArgArea =
Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"),
TyInfo.second);
@@ -6617,9 +6712,8 @@ Address SystemZABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
RegPadding = Padding; // values are passed in the low bits of a GPR
}
- Address RegCountPtr = CGF.Builder.CreateStructGEP(
- VAListAddr, RegCountField, RegCountField * CharUnits::fromQuantity(8),
- "reg_count_ptr");
+ Address RegCountPtr =
+ CGF.Builder.CreateStructGEP(VAListAddr, RegCountField, "reg_count_ptr");
llvm::Value *RegCount = CGF.Builder.CreateLoad(RegCountPtr, "reg_count");
llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs);
llvm::Value *InRegs = CGF.Builder.CreateICmpULT(RegCount, MaxRegsV,
@@ -6642,8 +6736,7 @@ Address SystemZABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
llvm::Value *RegOffset =
CGF.Builder.CreateAdd(ScaledRegCount, RegBase, "reg_offset");
Address RegSaveAreaPtr =
- CGF.Builder.CreateStructGEP(VAListAddr, 3, CharUnits::fromQuantity(24),
- "reg_save_area_ptr");
+ CGF.Builder.CreateStructGEP(VAListAddr, 3, "reg_save_area_ptr");
llvm::Value *RegSaveArea =
CGF.Builder.CreateLoad(RegSaveAreaPtr, "reg_save_area");
Address RawRegAddr(CGF.Builder.CreateGEP(RegSaveArea, RegOffset,
@@ -6663,8 +6756,8 @@ Address SystemZABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
CGF.EmitBlock(InMemBlock);
// Work out the address of a stack argument.
- Address OverflowArgAreaPtr = CGF.Builder.CreateStructGEP(
- VAListAddr, 2, CharUnits::fromQuantity(16), "overflow_arg_area_ptr");
+ Address OverflowArgAreaPtr =
+ CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_ptr");
Address OverflowArgArea =
Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"),
PaddedSize);
@@ -6774,21 +6867,19 @@ void MSP430TargetCodeGenInfo::setTargetAttributes(
if (GV->isDeclaration())
return;
if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
- if (const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) {
- // Handle 'interrupt' attribute:
- llvm::Function *F = cast<llvm::Function>(GV);
+ const auto *InterruptAttr = FD->getAttr<MSP430InterruptAttr>();
+ if (!InterruptAttr)
+ return;
- // Step 1: Set ISR calling convention.
- F->setCallingConv(llvm::CallingConv::MSP430_INTR);
+ // Handle 'interrupt' attribute:
+ llvm::Function *F = cast<llvm::Function>(GV);
- // Step 2: Add attributes goodness.
- F->addFnAttr(llvm::Attribute::NoInline);
+ // Step 1: Set ISR calling convention.
+ F->setCallingConv(llvm::CallingConv::MSP430_INTR);
- // Step 3: Emit ISR vector alias.
- unsigned Num = attr->getNumber() / 2;
- llvm::GlobalAlias::create(llvm::Function::ExternalLinkage,
- "__isr_" + Twine(Num), F);
- }
+ // Step 2: Add attributes goodness.
+ F->addFnAttr(llvm::Attribute::NoInline);
+ F->addFnAttr("interrupt", llvm::utostr(InterruptAttr->getNumber()));
}
}
@@ -7764,8 +7855,10 @@ public:
}
LangAS getGlobalVarAddressSpace(CodeGenModule &CGM,
const VarDecl *D) const override;
- llvm::SyncScope::ID getLLVMSyncScopeID(SyncScope S,
- llvm::LLVMContext &C) const override;
+ llvm::SyncScope::ID getLLVMSyncScopeID(const LangOptions &LangOpts,
+ SyncScope Scope,
+ llvm::AtomicOrdering Ordering,
+ llvm::LLVMContext &Ctx) const override;
llvm::Function *
createEnqueuedBlockKernel(CodeGenFunction &CGF,
llvm::Function *BlockInvokeFunc,
@@ -7775,8 +7868,36 @@ public:
};
}
+static bool requiresAMDGPUProtectedVisibility(const Decl *D,
+ llvm::GlobalValue *GV) {
+ if (GV->getVisibility() != llvm::GlobalValue::HiddenVisibility)
+ return false;
+
+ return D->hasAttr<OpenCLKernelAttr>() ||
+ (isa<FunctionDecl>(D) && D->hasAttr<CUDAGlobalAttr>()) ||
+ (isa<VarDecl>(D) &&
+ (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>() ||
+ D->hasAttr<HIPPinnedShadowAttr>()));
+}
+
+static bool requiresAMDGPUDefaultVisibility(const Decl *D,
+ llvm::GlobalValue *GV) {
+ if (GV->getVisibility() != llvm::GlobalValue::HiddenVisibility)
+ return false;
+
+ return isa<VarDecl>(D) && D->hasAttr<HIPPinnedShadowAttr>();
+}
+
void AMDGPUTargetCodeGenInfo::setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
+ if (requiresAMDGPUDefaultVisibility(D, GV)) {
+ GV->setVisibility(llvm::GlobalValue::DefaultVisibility);
+ GV->setDSOLocal(false);
+ } else if (requiresAMDGPUProtectedVisibility(D, GV)) {
+ GV->setVisibility(llvm::GlobalValue::ProtectedVisibility);
+ GV->setDSOLocal(true);
+ }
+
if (GV->isDeclaration())
return;
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
@@ -7788,14 +7909,23 @@ void AMDGPUTargetCodeGenInfo::setTargetAttributes(
const auto *ReqdWGS = M.getLangOpts().OpenCL ?
FD->getAttr<ReqdWorkGroupSizeAttr>() : nullptr;
- if (M.getLangOpts().OpenCL && FD->hasAttr<OpenCLKernelAttr>() &&
+ if (((M.getLangOpts().OpenCL && FD->hasAttr<OpenCLKernelAttr>()) ||
+ (M.getLangOpts().HIP && FD->hasAttr<CUDAGlobalAttr>())) &&
(M.getTriple().getOS() == llvm::Triple::AMDHSA))
- F->addFnAttr("amdgpu-implicitarg-num-bytes", "48");
+ F->addFnAttr("amdgpu-implicitarg-num-bytes", "56");
const auto *FlatWGS = FD->getAttr<AMDGPUFlatWorkGroupSizeAttr>();
if (ReqdWGS || FlatWGS) {
- unsigned Min = FlatWGS ? FlatWGS->getMin() : 0;
- unsigned Max = FlatWGS ? FlatWGS->getMax() : 0;
+ unsigned Min = 0;
+ unsigned Max = 0;
+ if (FlatWGS) {
+ Min = FlatWGS->getMin()
+ ->EvaluateKnownConstInt(M.getContext())
+ .getExtValue();
+ Max = FlatWGS->getMax()
+ ->EvaluateKnownConstInt(M.getContext())
+ .getExtValue();
+ }
if (ReqdWGS && Min == 0 && Max == 0)
Min = Max = ReqdWGS->getXDim() * ReqdWGS->getYDim() * ReqdWGS->getZDim();
@@ -7809,8 +7939,12 @@ void AMDGPUTargetCodeGenInfo::setTargetAttributes(
}
if (const auto *Attr = FD->getAttr<AMDGPUWavesPerEUAttr>()) {
- unsigned Min = Attr->getMin();
- unsigned Max = Attr->getMax();
+ unsigned Min =
+ Attr->getMin()->EvaluateKnownConstInt(M.getContext()).getExtValue();
+ unsigned Max = Attr->getMax() ? Attr->getMax()
+ ->EvaluateKnownConstInt(M.getContext())
+ .getExtValue()
+ : 0;
if (Min != 0) {
assert((Max == 0 || Min <= Max) && "Min must be less than or equal Max");
@@ -7884,10 +8018,12 @@ AMDGPUTargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM,
}
llvm::SyncScope::ID
-AMDGPUTargetCodeGenInfo::getLLVMSyncScopeID(SyncScope S,
- llvm::LLVMContext &C) const {
- StringRef Name;
- switch (S) {
+AMDGPUTargetCodeGenInfo::getLLVMSyncScopeID(const LangOptions &LangOpts,
+ SyncScope Scope,
+ llvm::AtomicOrdering Ordering,
+ llvm::LLVMContext &Ctx) const {
+ std::string Name;
+ switch (Scope) {
case SyncScope::OpenCLWorkGroup:
Name = "workgroup";
break;
@@ -7898,9 +8034,17 @@ AMDGPUTargetCodeGenInfo::getLLVMSyncScopeID(SyncScope S,
Name = "";
break;
case SyncScope::OpenCLSubGroup:
- Name = "subgroup";
+ Name = "wavefront";
}
- return C.getOrInsertSyncScopeID(Name);
+
+ if (Ordering != llvm::AtomicOrdering::SequentiallyConsistent) {
+ if (!Name.empty())
+ Name = Twine(Twine(Name) + Twine("-")).str();
+
+ Name = Twine(Twine(Name) + Twine("one-as")).str();
+ }
+
+ return Ctx.getOrInsertSyncScopeID(Name);
}
bool AMDGPUTargetCodeGenInfo::shouldEmitStaticExternCAliases() const {
@@ -8198,9 +8342,8 @@ Address SparcV9ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
}
// Update VAList.
- llvm::Value *NextPtr =
- Builder.CreateConstInBoundsByteGEP(Addr.getPointer(), Stride, "ap.next");
- Builder.CreateStore(NextPtr, VAListAddr);
+ Address NextPtr = Builder.CreateConstInBoundsByteGEP(Addr, Stride, "ap.next");
+ Builder.CreateStore(NextPtr.getPointer(), VAListAddr);
return Builder.CreateBitCast(ArgAddr, ArgPtrTy, "arg.addr");
}
@@ -8553,9 +8696,8 @@ Address XCoreABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
// Increment the VAList.
if (!ArgSize.isZero()) {
- llvm::Value *APN =
- Builder.CreateConstInBoundsByteGEP(AP.getPointer(), ArgSize);
- Builder.CreateStore(APN, VAListAddr);
+ Address APN = Builder.CreateConstInBoundsByteGEP(AP, ArgSize);
+ Builder.CreateStore(APN.getPointer(), VAListAddr);
}
return Val;
@@ -9392,8 +9534,6 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
switch (Triple.getOS()) {
case llvm::Triple::Win32:
return SetCGInfo(new WinX86_64TargetCodeGenInfo(Types, AVXLevel));
- case llvm::Triple::PS4:
- return SetCGInfo(new PS4TargetCodeGenInfo(Types, AVXLevel));
default:
return SetCGInfo(new X86_64TargetCodeGenInfo(Types, AVXLevel));
}
diff --git a/lib/CodeGen/TargetInfo.h b/lib/CodeGen/TargetInfo.h
index b530260ea48f..e1e90e73cb58 100644
--- a/lib/CodeGen/TargetInfo.h
+++ b/lib/CodeGen/TargetInfo.h
@@ -1,9 +1,8 @@
//===---- TargetInfo.h - Encapsulate target details -------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -157,6 +156,12 @@ public:
return "";
}
+ /// Determine whether a call to objc_retainAutoreleasedReturnValue should be
+ /// marked as 'notail'.
+ virtual bool shouldSuppressTailCallsOfRetainAutoreleasedReturnValue() const {
+ return false;
+ }
+
/// Return a constant used by UBSan as a signature to identify functions
/// possessing type information, or 0 if the platform is unsupported.
virtual llvm::Constant *
@@ -262,9 +267,16 @@ public:
LangAS SrcAddr, LangAS DestAddr,
llvm::Type *DestTy) const;
+ /// Get address space of pointer parameter for __cxa_atexit.
+ virtual LangAS getAddrSpaceOfCxaAtexitPtrParam() const {
+ return LangAS::Default;
+ }
+
/// Get the syncscope used in LLVM IR.
- virtual llvm::SyncScope::ID getLLVMSyncScopeID(SyncScope S,
- llvm::LLVMContext &C) const;
+ virtual llvm::SyncScope::ID getLLVMSyncScopeID(const LangOptions &LangOpts,
+ SyncScope Scope,
+ llvm::AtomicOrdering Ordering,
+ llvm::LLVMContext &Ctx) const;
/// Interface class for filling custom fields of a block literal for OpenCL.
class TargetOpenCLBlockHelper {
diff --git a/lib/CodeGen/VarBypassDetector.cpp b/lib/CodeGen/VarBypassDetector.cpp
index 859cdd4282cc..f3a172e91c4f 100644
--- a/lib/CodeGen/VarBypassDetector.cpp
+++ b/lib/CodeGen/VarBypassDetector.cpp
@@ -1,9 +1,8 @@
//===--- VarBypassDetector.h - Bypass jumps detector --------------*- C++ -*-=//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/CodeGen/VarBypassDetector.h b/lib/CodeGen/VarBypassDetector.h
index 47fe13cfacd6..8a2e388eae3f 100644
--- a/lib/CodeGen/VarBypassDetector.h
+++ b/lib/CodeGen/VarBypassDetector.h
@@ -1,9 +1,8 @@
//===--- VarBypassDetector.cpp - Bypass jumps detector ------------*- C++ -*-=//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/CrossTU/CrossTranslationUnit.cpp b/lib/CrossTU/CrossTranslationUnit.cpp
index 7c97beb498a5..977fd4b8dd30 100644
--- a/lib/CrossTU/CrossTranslationUnit.cpp
+++ b/lib/CrossTU/CrossTranslationUnit.cpp
@@ -1,9 +1,8 @@
//===--- CrossTranslationUnit.cpp - -----------------------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -41,8 +40,15 @@ STATISTIC(
STATISTIC(NumGetCTUSuccess,
"The # of getCTUDefinition successfully returned the "
"requested function's body");
+STATISTIC(NumUnsupportedNodeFound, "The # of imports when the ASTImporter "
+ "encountered an unsupported AST Node");
+STATISTIC(NumNameConflicts, "The # of imports when the ASTImporter "
+ "encountered an ODR error");
STATISTIC(NumTripleMismatch, "The # of triple mismatches");
STATISTIC(NumLangMismatch, "The # of language mismatches");
+STATISTIC(NumLangDialectMismatch, "The # of language dialect mismatches");
+STATISTIC(NumASTLoadThresholdReached,
+ "The # of ASTs not loaded because of threshold");
// Same as Triple's equality operator, but we check a field only if that is
// known in both instances.
@@ -100,6 +106,10 @@ public:
return "Triple mismatch";
case index_error_code::lang_mismatch:
return "Language mismatch";
+ case index_error_code::lang_dialect_mismatch:
+ return "Language dialect mismatch";
+ case index_error_code::load_threshold_reached:
+ return "Load threshold reached";
}
llvm_unreachable("Unrecognized index_error_code.");
}
@@ -156,55 +166,79 @@ createCrossTUIndexString(const llvm::StringMap<std::string> &Index) {
return Result.str();
}
+bool containsConst(const VarDecl *VD, const ASTContext &ACtx) {
+ CanQualType CT = ACtx.getCanonicalType(VD->getType());
+ if (!CT.isConstQualified()) {
+ const RecordType *RTy = CT->getAs<RecordType>();
+ if (!RTy || !RTy->hasConstFields())
+ return false;
+ }
+ return true;
+}
+
+static bool hasBodyOrInit(const FunctionDecl *D, const FunctionDecl *&DefD) {
+ return D->hasBody(DefD);
+}
+static bool hasBodyOrInit(const VarDecl *D, const VarDecl *&DefD) {
+ return D->getAnyInitializer(DefD);
+}
+template <typename T> static bool hasBodyOrInit(const T *D) {
+ const T *Unused;
+ return hasBodyOrInit(D, Unused);
+}
+
CrossTranslationUnitContext::CrossTranslationUnitContext(CompilerInstance &CI)
- : CI(CI), Context(CI.getASTContext()) {}
+ : CI(CI), Context(CI.getASTContext()),
+ CTULoadThreshold(CI.getAnalyzerOpts()->CTUImportThreshold) {}
CrossTranslationUnitContext::~CrossTranslationUnitContext() {}
std::string CrossTranslationUnitContext::getLookupName(const NamedDecl *ND) {
SmallString<128> DeclUSR;
- bool Ret = index::generateUSRForDecl(ND, DeclUSR); (void)Ret;
+ bool Ret = index::generateUSRForDecl(ND, DeclUSR);
+ (void)Ret;
assert(!Ret && "Unable to generate USR");
return DeclUSR.str();
}
-/// Recursively visits the function decls of a DeclContext, and looks up a
-/// function based on USRs.
-const FunctionDecl *
-CrossTranslationUnitContext::findFunctionInDeclContext(const DeclContext *DC,
- StringRef LookupFnName) {
+/// Recursively visits the decls of a DeclContext, and returns one with the
+/// given USR.
+template <typename T>
+const T *
+CrossTranslationUnitContext::findDefInDeclContext(const DeclContext *DC,
+ StringRef LookupName) {
assert(DC && "Declaration Context must not be null");
for (const Decl *D : DC->decls()) {
const auto *SubDC = dyn_cast<DeclContext>(D);
if (SubDC)
- if (const auto *FD = findFunctionInDeclContext(SubDC, LookupFnName))
- return FD;
+ if (const auto *ND = findDefInDeclContext<T>(SubDC, LookupName))
+ return ND;
- const auto *ND = dyn_cast<FunctionDecl>(D);
- const FunctionDecl *ResultDecl;
- if (!ND || !ND->hasBody(ResultDecl))
+ const auto *ND = dyn_cast<T>(D);
+ const T *ResultDecl;
+ if (!ND || !hasBodyOrInit(ND, ResultDecl))
continue;
- if (getLookupName(ResultDecl) != LookupFnName)
+ if (getLookupName(ResultDecl) != LookupName)
continue;
return ResultDecl;
}
return nullptr;
}
-llvm::Expected<const FunctionDecl *>
-CrossTranslationUnitContext::getCrossTUDefinition(const FunctionDecl *FD,
- StringRef CrossTUDir,
- StringRef IndexName,
- bool DisplayCTUProgress) {
- assert(FD && "FD is missing, bad call to this function!");
- assert(!FD->hasBody() && "FD has a definition in current translation unit!");
+template <typename T>
+llvm::Expected<const T *> CrossTranslationUnitContext::getCrossTUDefinitionImpl(
+ const T *D, StringRef CrossTUDir, StringRef IndexName,
+ bool DisplayCTUProgress) {
+ assert(D && "D is missing, bad call to this function!");
+ assert(!hasBodyOrInit(D) &&
+ "D has a body or init in current translation unit!");
++NumGetCTUCalled;
- const std::string LookupFnName = getLookupName(FD);
- if (LookupFnName.empty())
+ const std::string LookupName = getLookupName(D);
+ if (LookupName.empty())
return llvm::make_error<IndexError>(
index_error_code::failed_to_generate_usr);
- llvm::Expected<ASTUnit *> ASTUnitOrError =
- loadExternalAST(LookupFnName, CrossTUDir, IndexName, DisplayCTUProgress);
+ llvm::Expected<ASTUnit *> ASTUnitOrError = loadExternalAST(
+ LookupName, CrossTUDir, IndexName, DisplayCTUProgress);
if (!ASTUnitOrError)
return ASTUnitOrError.takeError();
ASTUnit *Unit = *ASTUnitOrError;
@@ -229,6 +263,7 @@ CrossTranslationUnitContext::getCrossTUDefinition(const FunctionDecl *FD,
const auto &LangTo = Context.getLangOpts();
const auto &LangFrom = Unit->getASTContext().getLangOpts();
+
// FIXME: Currenty we do not support CTU across C++ and C and across
// different dialects of C++.
if (LangTo.CPlusPlus != LangFrom.CPlusPlus) {
@@ -236,13 +271,52 @@ CrossTranslationUnitContext::getCrossTUDefinition(const FunctionDecl *FD,
return llvm::make_error<IndexError>(index_error_code::lang_mismatch);
}
+ // If CPP dialects are different then return with error.
+ //
+ // Consider this STL code:
+ // template<typename _Alloc>
+ // struct __alloc_traits
+ // #if __cplusplus >= 201103L
+ // : std::allocator_traits<_Alloc>
+ // #endif
+ // { // ...
+ // };
+ // This class template would create ODR errors during merging the two units,
+ // since in one translation unit the class template has a base class, however
+ // in the other unit it has none.
+ if (LangTo.CPlusPlus11 != LangFrom.CPlusPlus11 ||
+ LangTo.CPlusPlus14 != LangFrom.CPlusPlus14 ||
+ LangTo.CPlusPlus17 != LangFrom.CPlusPlus17 ||
+ LangTo.CPlusPlus2a != LangFrom.CPlusPlus2a) {
+ ++NumLangDialectMismatch;
+ return llvm::make_error<IndexError>(
+ index_error_code::lang_dialect_mismatch);
+ }
+
TranslationUnitDecl *TU = Unit->getASTContext().getTranslationUnitDecl();
- if (const FunctionDecl *ResultDecl =
- findFunctionInDeclContext(TU, LookupFnName))
+ if (const T *ResultDecl = findDefInDeclContext<T>(TU, LookupName))
return importDefinition(ResultDecl);
return llvm::make_error<IndexError>(index_error_code::failed_import);
}
+llvm::Expected<const FunctionDecl *>
+CrossTranslationUnitContext::getCrossTUDefinition(const FunctionDecl *FD,
+ StringRef CrossTUDir,
+ StringRef IndexName,
+ bool DisplayCTUProgress) {
+ return getCrossTUDefinitionImpl(FD, CrossTUDir, IndexName,
+ DisplayCTUProgress);
+}
+
+llvm::Expected<const VarDecl *>
+CrossTranslationUnitContext::getCrossTUDefinition(const VarDecl *VD,
+ StringRef CrossTUDir,
+ StringRef IndexName,
+ bool DisplayCTUProgress) {
+ return getCrossTUDefinitionImpl(VD, CrossTUDir, IndexName,
+ DisplayCTUProgress);
+}
+
void CrossTranslationUnitContext::emitCrossTUDiagnostics(const IndexError &IE) {
switch (IE.getCode()) {
case index_error_code::missing_index_file:
@@ -269,14 +343,21 @@ void CrossTranslationUnitContext::emitCrossTUDiagnostics(const IndexError &IE) {
llvm::Expected<ASTUnit *> CrossTranslationUnitContext::loadExternalAST(
StringRef LookupName, StringRef CrossTUDir, StringRef IndexName,
bool DisplayCTUProgress) {
- // FIXME: The current implementation only supports loading functions with
+ // FIXME: The current implementation only supports loading decls with
// a lookup name from a single translation unit. If multiple
- // translation units contains functions with the same lookup name an
+ // translation units contains decls with the same lookup name an
// error will be returned.
+
+ if (NumASTLoaded >= CTULoadThreshold) {
+ ++NumASTLoadThresholdReached;
+ return llvm::make_error<IndexError>(
+ index_error_code::load_threshold_reached);
+ }
+
ASTUnit *Unit = nullptr;
- auto FnUnitCacheEntry = FunctionASTUnitMap.find(LookupName);
- if (FnUnitCacheEntry == FunctionASTUnitMap.end()) {
- if (FunctionFileMap.empty()) {
+ auto NameUnitCacheEntry = NameASTUnitMap.find(LookupName);
+ if (NameUnitCacheEntry == NameASTUnitMap.end()) {
+ if (NameFileMap.empty()) {
SmallString<256> IndexFile = CrossTUDir;
if (llvm::sys::path::is_absolute(IndexName))
IndexFile = IndexName;
@@ -285,13 +366,13 @@ llvm::Expected<ASTUnit *> CrossTranslationUnitContext::loadExternalAST(
llvm::Expected<llvm::StringMap<std::string>> IndexOrErr =
parseCrossTUIndex(IndexFile, CrossTUDir);
if (IndexOrErr)
- FunctionFileMap = *IndexOrErr;
+ NameFileMap = *IndexOrErr;
else
return IndexOrErr.takeError();
}
- auto It = FunctionFileMap.find(LookupName);
- if (It == FunctionFileMap.end()) {
+ auto It = NameFileMap.find(LookupName);
+ if (It == NameFileMap.end()) {
++NumNotInOtherTU;
return llvm::make_error<IndexError>(index_error_code::missing_definition);
}
@@ -310,6 +391,7 @@ llvm::Expected<ASTUnit *> CrossTranslationUnitContext::loadExternalAST(
ASTUnit::LoadEverything, Diags, CI.getFileSystemOpts()));
Unit = LoadedUnit.get();
FileASTUnitMap[ASTFileName] = std::move(LoadedUnit);
+ ++NumASTLoaded;
if (DisplayCTUProgress) {
llvm::errs() << "CTU loaded AST file: "
<< ASTFileName << "\n";
@@ -317,9 +399,9 @@ llvm::Expected<ASTUnit *> CrossTranslationUnitContext::loadExternalAST(
} else {
Unit = ASTCacheEntry->second.get();
}
- FunctionASTUnitMap[LookupName] = Unit;
+ NameASTUnitMap[LookupName] = Unit;
} else {
- Unit = FnUnitCacheEntry->second;
+ Unit = NameUnitCacheEntry->second;
}
if (!Unit)
return llvm::make_error<IndexError>(
@@ -327,25 +409,51 @@ llvm::Expected<ASTUnit *> CrossTranslationUnitContext::loadExternalAST(
return Unit;
}
-llvm::Expected<const FunctionDecl *>
-CrossTranslationUnitContext::importDefinition(const FunctionDecl *FD) {
- assert(FD->hasBody() && "Functions to be imported should have body.");
+template <typename T>
+llvm::Expected<const T *>
+CrossTranslationUnitContext::importDefinitionImpl(const T *D) {
+ assert(hasBodyOrInit(D) && "Decls to be imported should have body or init.");
- ASTImporter &Importer = getOrCreateASTImporter(FD->getASTContext());
- auto *ToDecl =
- cast_or_null<FunctionDecl>(Importer.Import(const_cast<FunctionDecl *>(FD)));
- if (!ToDecl)
+ ASTImporter &Importer = getOrCreateASTImporter(D->getASTContext());
+ auto ToDeclOrError = Importer.Import(D);
+ if (!ToDeclOrError) {
+ handleAllErrors(ToDeclOrError.takeError(),
+ [&](const ImportError &IE) {
+ switch (IE.Error) {
+ case ImportError::NameConflict:
+ ++NumNameConflicts;
+ break;
+ case ImportError::UnsupportedConstruct:
+ ++NumUnsupportedNodeFound;
+ break;
+ case ImportError::Unknown:
+ llvm_unreachable("Unknown import error happened.");
+ break;
+ }
+ });
return llvm::make_error<IndexError>(index_error_code::failed_import);
- assert(ToDecl->hasBody());
- assert(FD->hasBody() && "Functions already imported should have body.");
+ }
+ auto *ToDecl = cast<T>(*ToDeclOrError);
+ assert(hasBodyOrInit(ToDecl) && "Imported Decl should have body or init.");
++NumGetCTUSuccess;
+
return ToDecl;
}
-void CrossTranslationUnitContext::lazyInitLookupTable(
+llvm::Expected<const FunctionDecl *>
+CrossTranslationUnitContext::importDefinition(const FunctionDecl *FD) {
+ return importDefinitionImpl(FD);
+}
+
+llvm::Expected<const VarDecl *>
+CrossTranslationUnitContext::importDefinition(const VarDecl *VD) {
+ return importDefinitionImpl(VD);
+}
+
+void CrossTranslationUnitContext::lazyInitImporterSharedSt(
TranslationUnitDecl *ToTU) {
- if (!LookupTable)
- LookupTable = llvm::make_unique<ASTImporterLookupTable>(*ToTU);
+ if (!ImporterSharedSt)
+ ImporterSharedSt = std::make_shared<ASTImporterSharedState>(*ToTU);
}
ASTImporter &
@@ -353,10 +461,10 @@ CrossTranslationUnitContext::getOrCreateASTImporter(ASTContext &From) {
auto I = ASTUnitImporterMap.find(From.getTranslationUnitDecl());
if (I != ASTUnitImporterMap.end())
return *I->second;
- lazyInitLookupTable(Context.getTranslationUnitDecl());
+ lazyInitImporterSharedSt(Context.getTranslationUnitDecl());
ASTImporter *NewImporter = new ASTImporter(
Context, Context.getSourceManager().getFileManager(), From,
- From.getSourceManager().getFileManager(), false, LookupTable.get());
+ From.getSourceManager().getFileManager(), false, ImporterSharedSt);
ASTUnitImporterMap[From.getTranslationUnitDecl()].reset(NewImporter);
return *NewImporter;
}
diff --git a/lib/DirectoryWatcher/DirectoryScanner.cpp b/lib/DirectoryWatcher/DirectoryScanner.cpp
new file mode 100644
index 000000000000..ecfec52f459e
--- /dev/null
+++ b/lib/DirectoryWatcher/DirectoryScanner.cpp
@@ -0,0 +1,54 @@
+//===- DirectoryScanner.cpp - Utility functions for DirectoryWatcher ------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "DirectoryScanner.h"
+
+#include "llvm/Support/Path.h"
+
+namespace clang {
+
+using namespace llvm;
+
+Optional<sys::fs::file_status> getFileStatus(StringRef Path) {
+ sys::fs::file_status Status;
+ std::error_code EC = status(Path, Status);
+ if (EC)
+ return None;
+ return Status;
+}
+
+std::vector<std::string> scanDirectory(StringRef Path) {
+ using namespace llvm::sys;
+ std::vector<std::string> Result;
+
+ std::error_code EC;
+ for (auto It = fs::directory_iterator(Path, EC),
+ End = fs::directory_iterator();
+ !EC && It != End; It.increment(EC)) {
+ auto status = getFileStatus(It->path());
+ if (!status.hasValue())
+ continue;
+ Result.emplace_back(sys::path::filename(It->path()));
+ }
+
+ return Result;
+}
+
+std::vector<DirectoryWatcher::Event>
+getAsFileEvents(const std::vector<std::string> &Scan) {
+ std::vector<DirectoryWatcher::Event> Events;
+ Events.reserve(Scan.size());
+
+ for (const auto &File : Scan) {
+ Events.emplace_back(DirectoryWatcher::Event{
+ DirectoryWatcher::Event::EventKind::Modified, File});
+ }
+ return Events;
+}
+
+} // namespace clang \ No newline at end of file
diff --git a/lib/DirectoryWatcher/DirectoryScanner.h b/lib/DirectoryWatcher/DirectoryScanner.h
new file mode 100644
index 000000000000..55731225e251
--- /dev/null
+++ b/lib/DirectoryWatcher/DirectoryScanner.h
@@ -0,0 +1,29 @@
+//===- DirectoryScanner.h - Utility functions for DirectoryWatcher --------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/DirectoryWatcher/DirectoryWatcher.h"
+#include "llvm/Support/FileSystem.h"
+#include <string>
+#include <vector>
+
+namespace clang {
+
+/// Gets names (filenames) of items in directory at \p Path.
+/// \returns empty vector if \p Path is not a directory, doesn't exist or can't
+/// be read from.
+std::vector<std::string> scanDirectory(llvm::StringRef Path);
+
+/// Create event with EventKind::Added for every element in \p Scan.
+std::vector<DirectoryWatcher::Event>
+getAsFileEvents(const std::vector<std::string> &Scan);
+
+/// Gets status of file (or directory) at \p Path.
+/// \returns llvm::None if \p Path doesn't exist or can't get the status.
+llvm::Optional<llvm::sys::fs::file_status> getFileStatus(llvm::StringRef Path);
+
+} // namespace clang \ No newline at end of file
diff --git a/lib/DirectoryWatcher/default/DirectoryWatcher-not-implemented.cpp b/lib/DirectoryWatcher/default/DirectoryWatcher-not-implemented.cpp
new file mode 100644
index 000000000000..e330ff06f504
--- /dev/null
+++ b/lib/DirectoryWatcher/default/DirectoryWatcher-not-implemented.cpp
@@ -0,0 +1,19 @@
+//===- DirectoryWatcher-not-implemented.cpp -------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/DirectoryWatcher/DirectoryWatcher.h"
+
+using namespace llvm;
+using namespace clang;
+
+std::unique_ptr<DirectoryWatcher> clang::DirectoryWatcher::create(
+ StringRef Path,
+ std::function<void(llvm::ArrayRef<DirectoryWatcher::Event>, bool)> Receiver,
+ bool WaitForInitialSync) {
+ return nullptr;
+} \ No newline at end of file
diff --git a/lib/DirectoryWatcher/linux/DirectoryWatcher-linux.cpp b/lib/DirectoryWatcher/linux/DirectoryWatcher-linux.cpp
new file mode 100644
index 000000000000..6d7d69da4db5
--- /dev/null
+++ b/lib/DirectoryWatcher/linux/DirectoryWatcher-linux.cpp
@@ -0,0 +1,353 @@
+//===- DirectoryWatcher-linux.cpp - Linux-platform directory watching -----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "DirectoryScanner.h"
+#include "clang/DirectoryWatcher/DirectoryWatcher.h"
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/ScopeExit.h"
+#include "llvm/Support/AlignOf.h"
+#include "llvm/Support/Errno.h"
+#include "llvm/Support/Mutex.h"
+#include "llvm/Support/Path.h"
+#include <atomic>
+#include <condition_variable>
+#include <mutex>
+#include <queue>
+#include <string>
+#include <thread>
+#include <vector>
+
+#include <fcntl.h>
+#include <linux/version.h>
+#include <sys/epoll.h>
+#include <sys/inotify.h>
+#include <unistd.h>
+
+namespace {
+
+using namespace llvm;
+using namespace clang;
+
+/// Pipe for inter-thread synchronization - for epoll-ing on multiple
+/// conditions. It is meant for uni-directional 1:1 signalling - specifically:
+/// no multiple consumers, no data passing. Thread waiting for signal should
+/// poll the FDRead. Signalling thread should call signal() which writes single
+/// character to FDRead.
+struct SemaphorePipe {
+ // Expects two file-descriptors opened as a pipe in the canonical POSIX
+ // order: pipefd[0] refers to the read end of the pipe. pipefd[1] refers to
+ // the write end of the pipe.
+ SemaphorePipe(int pipefd[2])
+ : FDRead(pipefd[0]), FDWrite(pipefd[1]), OwnsFDs(true) {}
+ SemaphorePipe(const SemaphorePipe &) = delete;
+ void operator=(const SemaphorePipe &) = delete;
+ SemaphorePipe(SemaphorePipe &&other)
+ : FDRead(other.FDRead), FDWrite(other.FDWrite),
+ OwnsFDs(other.OwnsFDs) // Someone could have moved from the other
+ // instance before.
+ {
+ other.OwnsFDs = false;
+ };
+
+ void signal() {
+#ifndef NDEBUG
+ ssize_t Result =
+#endif
+ llvm::sys::RetryAfterSignal(-1, write, FDWrite, "A", 1);
+ assert(Result != -1);
+ }
+ ~SemaphorePipe() {
+ if (OwnsFDs) {
+ close(FDWrite);
+ close(FDRead);
+ }
+ }
+ const int FDRead;
+ const int FDWrite;
+ bool OwnsFDs;
+
+ static llvm::Optional<SemaphorePipe> create() {
+ int InotifyPollingStopperFDs[2];
+ if (pipe2(InotifyPollingStopperFDs, O_CLOEXEC) == -1)
+ return llvm::None;
+ return SemaphorePipe(InotifyPollingStopperFDs);
+ }
+};
+
+/// Mutex-protected queue of Events.
+class EventQueue {
+ std::mutex Mtx;
+ std::condition_variable NonEmpty;
+ std::queue<DirectoryWatcher::Event> Events;
+
+public:
+ void push_back(const DirectoryWatcher::Event::EventKind K,
+ StringRef Filename) {
+ {
+ std::unique_lock<std::mutex> L(Mtx);
+ Events.emplace(K, Filename);
+ }
+ NonEmpty.notify_one();
+ }
+
+ // Blocks on caller thread and uses codition_variable to wait until there's an
+ // event to return.
+ DirectoryWatcher::Event pop_front_blocking() {
+ std::unique_lock<std::mutex> L(Mtx);
+ while (true) {
+ // Since we might have missed all the prior notifications on NonEmpty we
+ // have to check the queue first (under lock).
+ if (!Events.empty()) {
+ DirectoryWatcher::Event Front = Events.front();
+ Events.pop();
+ return Front;
+ }
+ NonEmpty.wait(L, [this]() { return !Events.empty(); });
+ }
+ }
+};
+
+class DirectoryWatcherLinux : public clang::DirectoryWatcher {
+public:
+ DirectoryWatcherLinux(
+ llvm::StringRef WatchedDirPath,
+ std::function<void(llvm::ArrayRef<Event>, bool)> Receiver,
+ bool WaitForInitialSync, int InotifyFD, int InotifyWD,
+ SemaphorePipe &&InotifyPollingStopSignal);
+
+ ~DirectoryWatcherLinux() override {
+ StopWork();
+ InotifyPollingThread.join();
+ EventsReceivingThread.join();
+ inotify_rm_watch(InotifyFD, InotifyWD);
+ llvm::sys::RetryAfterSignal(-1, close, InotifyFD);
+ }
+
+private:
+ const std::string WatchedDirPath;
+ // inotify file descriptor
+ int InotifyFD = -1;
+ // inotify watch descriptor
+ int InotifyWD = -1;
+
+ EventQueue Queue;
+
+ // Make sure lifetime of Receiver fully contains lifetime of
+ // EventsReceivingThread.
+ std::function<void(llvm::ArrayRef<Event>, bool)> Receiver;
+
+ // Consumes inotify events and pushes directory watcher events to the Queue.
+ void InotifyPollingLoop();
+ std::thread InotifyPollingThread;
+ // Using pipe so we can epoll two file descriptors at once - inotify and
+ // stopping condition.
+ SemaphorePipe InotifyPollingStopSignal;
+
+ // Does the initial scan of the directory - directly calling Receiver,
+ // bypassing the Queue. Both InitialScan and EventReceivingLoop use Receiver
+ // which isn't necessarily thread-safe.
+ void InitialScan();
+
+ // Processing events from the Queue.
+ // In case client doesn't want to do the initial scan synchronously
+ // (WaitForInitialSync=false in ctor) we do the initial scan at the beginning
+ // of this thread.
+ std::thread EventsReceivingThread;
+ // Push event of WatcherGotInvalidated kind to the Queue to stop the loop.
+ // Both InitialScan and EventReceivingLoop use Receiver which isn't
+ // necessarily thread-safe.
+ void EventReceivingLoop();
+
+ // Stops all the async work. Reentrant.
+ void StopWork() {
+ Queue.push_back(DirectoryWatcher::Event::EventKind::WatcherGotInvalidated,
+ "");
+ InotifyPollingStopSignal.signal();
+ }
+};
+
+void DirectoryWatcherLinux::InotifyPollingLoop() {
+ // We want to be able to read ~30 events at once even in the worst case
+ // (obscenely long filenames).
+ constexpr size_t EventBufferLength =
+ 30 * (sizeof(struct inotify_event) + NAME_MAX + 1);
+ // http://man7.org/linux/man-pages/man7/inotify.7.html
+ // Some systems cannot read integer variables if they are not
+ // properly aligned. On other systems, incorrect alignment may
+ // decrease performance. Hence, the buffer used for reading from
+ // the inotify file descriptor should have the same alignment as
+ // struct inotify_event.
+
+ auto ManagedBuffer =
+ llvm::make_unique<llvm::AlignedCharArray<alignof(struct inotify_event),
+ EventBufferLength>>();
+ char *const Buf = ManagedBuffer->buffer;
+
+ const int EpollFD = epoll_create1(EPOLL_CLOEXEC);
+ if (EpollFD == -1) {
+ StopWork();
+ return;
+ }
+ auto EpollFDGuard = llvm::make_scope_exit([EpollFD]() { close(EpollFD); });
+
+ struct epoll_event EventSpec;
+ EventSpec.events = EPOLLIN;
+ EventSpec.data.fd = InotifyFD;
+ if (epoll_ctl(EpollFD, EPOLL_CTL_ADD, InotifyFD, &EventSpec) == -1) {
+ StopWork();
+ return;
+ }
+
+ EventSpec.data.fd = InotifyPollingStopSignal.FDRead;
+ if (epoll_ctl(EpollFD, EPOLL_CTL_ADD, InotifyPollingStopSignal.FDRead,
+ &EventSpec) == -1) {
+ StopWork();
+ return;
+ }
+
+ std::array<struct epoll_event, 2> EpollEventBuffer;
+
+ while (true) {
+ const int EpollWaitResult = llvm::sys::RetryAfterSignal(
+ -1, epoll_wait, EpollFD, EpollEventBuffer.data(),
+ EpollEventBuffer.size(), /*timeout=*/-1 /*== infinity*/);
+ if (EpollWaitResult == -1) {
+ StopWork();
+ return;
+ }
+
+ // Multiple epoll_events can be received for a single file descriptor per
+ // epoll_wait call.
+ for (int i = 0; i < EpollWaitResult; ++i) {
+ if (EpollEventBuffer[i].data.fd == InotifyPollingStopSignal.FDRead) {
+ StopWork();
+ return;
+ }
+ }
+
+ // epoll_wait() always return either error or >0 events. Since there was no
+ // event for stopping, it must be an inotify event ready for reading.
+ ssize_t NumRead = llvm::sys::RetryAfterSignal(-1, read, InotifyFD, Buf,
+ EventBufferLength);
+ for (char *P = Buf; P < Buf + NumRead;) {
+ if (P + sizeof(struct inotify_event) > Buf + NumRead) {
+ StopWork();
+ llvm_unreachable("an incomplete inotify_event was read");
+ return;
+ }
+
+ struct inotify_event *Event = reinterpret_cast<struct inotify_event *>(P);
+ P += sizeof(struct inotify_event) + Event->len;
+
+ if (Event->mask & (IN_CREATE | IN_MODIFY | IN_MOVED_TO | IN_DELETE) &&
+ Event->len <= 0) {
+ StopWork();
+ llvm_unreachable("expected a filename from inotify");
+ return;
+ }
+
+ if (Event->mask & (IN_CREATE | IN_MOVED_TO | IN_MODIFY)) {
+ Queue.push_back(DirectoryWatcher::Event::EventKind::Modified,
+ Event->name);
+ } else if (Event->mask & (IN_DELETE | IN_MOVED_FROM)) {
+ Queue.push_back(DirectoryWatcher::Event::EventKind::Removed,
+ Event->name);
+ } else if (Event->mask & (IN_DELETE_SELF | IN_MOVE_SELF)) {
+ Queue.push_back(DirectoryWatcher::Event::EventKind::WatchedDirRemoved,
+ "");
+ StopWork();
+ return;
+ } else if (Event->mask & IN_IGNORED) {
+ StopWork();
+ return;
+ } else {
+ StopWork();
+ llvm_unreachable("Unknown event type.");
+ return;
+ }
+ }
+ }
+}
+
+void DirectoryWatcherLinux::InitialScan() {
+ this->Receiver(getAsFileEvents(scanDirectory(WatchedDirPath)),
+ /*IsInitial=*/true);
+}
+
+void DirectoryWatcherLinux::EventReceivingLoop() {
+ while (true) {
+ DirectoryWatcher::Event Event = this->Queue.pop_front_blocking();
+ this->Receiver(Event, false);
+ if (Event.Kind ==
+ DirectoryWatcher::Event::EventKind::WatcherGotInvalidated) {
+ StopWork();
+ return;
+ }
+ }
+}
+
+DirectoryWatcherLinux::DirectoryWatcherLinux(
+ StringRef WatchedDirPath,
+ std::function<void(llvm::ArrayRef<Event>, bool)> Receiver,
+ bool WaitForInitialSync, int InotifyFD, int InotifyWD,
+ SemaphorePipe &&InotifyPollingStopSignal)
+ : WatchedDirPath(WatchedDirPath), InotifyFD(InotifyFD),
+ InotifyWD(InotifyWD), Receiver(Receiver),
+ InotifyPollingStopSignal(std::move(InotifyPollingStopSignal)) {
+
+ InotifyPollingThread = std::thread([this]() { InotifyPollingLoop(); });
+ // We have no guarantees about thread safety of the Receiver which is being
+ // used in both InitialScan and EventReceivingLoop. We shouldn't run these
+ // only synchronously.
+ if (WaitForInitialSync) {
+ InitialScan();
+ EventsReceivingThread = std::thread([this]() { EventReceivingLoop(); });
+ } else {
+ EventsReceivingThread = std::thread([this]() {
+ // FIXME: We might want to terminate an async initial scan early in case
+ // of a failure in EventsReceivingThread.
+ InitialScan();
+ EventReceivingLoop();
+ });
+ }
+}
+
+} // namespace
+
+std::unique_ptr<DirectoryWatcher> clang::DirectoryWatcher::create(
+ StringRef Path,
+ std::function<void(llvm::ArrayRef<DirectoryWatcher::Event>, bool)> Receiver,
+ bool WaitForInitialSync) {
+ if (Path.empty())
+ return nullptr;
+
+ const int InotifyFD = inotify_init1(IN_CLOEXEC);
+ if (InotifyFD == -1)
+ return nullptr;
+
+ const int InotifyWD = inotify_add_watch(
+ InotifyFD, Path.str().c_str(),
+ IN_CREATE | IN_DELETE | IN_DELETE_SELF | IN_MODIFY |
+ IN_MOVED_FROM | IN_MOVE_SELF | IN_MOVED_TO | IN_ONLYDIR | IN_IGNORED
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)
+ | IN_EXCL_UNLINK
+#endif
+ );
+ if (InotifyWD == -1)
+ return nullptr;
+
+ auto InotifyPollingStopper = SemaphorePipe::create();
+
+ if (!InotifyPollingStopper)
+ return nullptr;
+
+ return llvm::make_unique<DirectoryWatcherLinux>(
+ Path, Receiver, WaitForInitialSync, InotifyFD, InotifyWD,
+ std::move(*InotifyPollingStopper));
+} \ No newline at end of file
diff --git a/lib/DirectoryWatcher/mac/DirectoryWatcher-mac.cpp b/lib/DirectoryWatcher/mac/DirectoryWatcher-mac.cpp
new file mode 100644
index 000000000000..3df79ac48a4a
--- /dev/null
+++ b/lib/DirectoryWatcher/mac/DirectoryWatcher-mac.cpp
@@ -0,0 +1,233 @@
+//===- DirectoryWatcher-mac.cpp - Mac-platform directory watching ---------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "DirectoryScanner.h"
+#include "clang/DirectoryWatcher/DirectoryWatcher.h"
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Path.h"
+#include <CoreServices/CoreServices.h>
+
+using namespace llvm;
+using namespace clang;
+
+static FSEventStreamRef createFSEventStream(
+ StringRef Path,
+ std::function<void(llvm::ArrayRef<DirectoryWatcher::Event>, bool)>,
+ dispatch_queue_t);
+static void stopFSEventStream(FSEventStreamRef);
+
+namespace {
+
+class DirectoryWatcherMac : public clang::DirectoryWatcher {
+public:
+ DirectoryWatcherMac(
+ FSEventStreamRef EventStream,
+ std::function<void(llvm::ArrayRef<DirectoryWatcher::Event>, bool)>
+ Receiver,
+ llvm::StringRef WatchedDirPath)
+ : EventStream(EventStream), Receiver(Receiver),
+ WatchedDirPath(WatchedDirPath) {}
+
+ ~DirectoryWatcherMac() override {
+ stopFSEventStream(EventStream);
+ EventStream = nullptr;
+ // Now it's safe to use Receiver as the only other concurrent use would have
+ // been in EventStream processing.
+ Receiver(DirectoryWatcher::Event(
+ DirectoryWatcher::Event::EventKind::WatcherGotInvalidated, ""),
+ false);
+ }
+
+private:
+ FSEventStreamRef EventStream;
+ std::function<void(llvm::ArrayRef<Event>, bool)> Receiver;
+ const std::string WatchedDirPath;
+};
+
+struct EventStreamContextData {
+ std::string WatchedPath;
+ std::function<void(llvm::ArrayRef<DirectoryWatcher::Event>, bool)> Receiver;
+
+ EventStreamContextData(
+ std::string &&WatchedPath,
+ std::function<void(llvm::ArrayRef<DirectoryWatcher::Event>, bool)>
+ Receiver)
+ : WatchedPath(std::move(WatchedPath)), Receiver(Receiver) {}
+
+ // Needed for FSEvents
+ static void dispose(const void *ctx) {
+ delete static_cast<const EventStreamContextData *>(ctx);
+ }
+};
+} // namespace
+
+constexpr const FSEventStreamEventFlags StreamInvalidatingFlags =
+ kFSEventStreamEventFlagUserDropped | kFSEventStreamEventFlagKernelDropped |
+ kFSEventStreamEventFlagMustScanSubDirs;
+
+constexpr const FSEventStreamEventFlags ModifyingFileEvents =
+ kFSEventStreamEventFlagItemCreated | kFSEventStreamEventFlagItemRenamed |
+ kFSEventStreamEventFlagItemModified;
+
+static void eventStreamCallback(ConstFSEventStreamRef Stream,
+ void *ClientCallBackInfo, size_t NumEvents,
+ void *EventPaths,
+ const FSEventStreamEventFlags EventFlags[],
+ const FSEventStreamEventId EventIds[]) {
+ auto *ctx = static_cast<EventStreamContextData *>(ClientCallBackInfo);
+
+ std::vector<DirectoryWatcher::Event> Events;
+ for (size_t i = 0; i < NumEvents; ++i) {
+ StringRef Path = ((const char **)EventPaths)[i];
+ const FSEventStreamEventFlags Flags = EventFlags[i];
+
+ if (Flags & StreamInvalidatingFlags) {
+ Events.emplace_back(DirectoryWatcher::Event{
+ DirectoryWatcher::Event::EventKind::WatcherGotInvalidated, ""});
+ break;
+ } else if (!(Flags & kFSEventStreamEventFlagItemIsFile)) {
+ // Subdirectories aren't supported - if some directory got removed it
+ // must've been the watched directory itself.
+ if ((Flags & kFSEventStreamEventFlagItemRemoved) &&
+ Path == ctx->WatchedPath) {
+ Events.emplace_back(DirectoryWatcher::Event{
+ DirectoryWatcher::Event::EventKind::WatchedDirRemoved, ""});
+ Events.emplace_back(DirectoryWatcher::Event{
+ DirectoryWatcher::Event::EventKind::WatcherGotInvalidated, ""});
+ break;
+ }
+ // No support for subdirectories - just ignore everything.
+ continue;
+ } else if (Flags & kFSEventStreamEventFlagItemRemoved) {
+ Events.emplace_back(DirectoryWatcher::Event::EventKind::Removed,
+ llvm::sys::path::filename(Path));
+ continue;
+ } else if (Flags & ModifyingFileEvents) {
+ if (!getFileStatus(Path).hasValue()) {
+ Events.emplace_back(DirectoryWatcher::Event::EventKind::Removed,
+ llvm::sys::path::filename(Path));
+ } else {
+ Events.emplace_back(DirectoryWatcher::Event::EventKind::Modified,
+ llvm::sys::path::filename(Path));
+ }
+ continue;
+ }
+
+ // default
+ Events.emplace_back(DirectoryWatcher::Event{
+ DirectoryWatcher::Event::EventKind::WatcherGotInvalidated, ""});
+ llvm_unreachable("Unknown FSEvent type.");
+ }
+
+ if (!Events.empty()) {
+ ctx->Receiver(Events, /*IsInitial=*/false);
+ }
+}
+
+FSEventStreamRef createFSEventStream(
+ StringRef Path,
+ std::function<void(llvm::ArrayRef<DirectoryWatcher::Event>, bool)> Receiver,
+ dispatch_queue_t Queue) {
+ if (Path.empty())
+ return nullptr;
+
+ CFMutableArrayRef PathsToWatch = [&]() {
+ CFMutableArrayRef PathsToWatch =
+ CFArrayCreateMutable(nullptr, 0, &kCFTypeArrayCallBacks);
+ CFStringRef CfPathStr =
+ CFStringCreateWithBytes(nullptr, (const UInt8 *)Path.data(),
+ Path.size(), kCFStringEncodingUTF8, false);
+ CFArrayAppendValue(PathsToWatch, CfPathStr);
+ CFRelease(CfPathStr);
+ return PathsToWatch;
+ }();
+
+ FSEventStreamContext Context = [&]() {
+ std::string RealPath;
+ {
+ SmallString<128> Storage;
+ StringRef P = llvm::Twine(Path).toNullTerminatedStringRef(Storage);
+ char Buffer[PATH_MAX];
+ if (::realpath(P.begin(), Buffer) != nullptr)
+ RealPath = Buffer;
+ else
+ RealPath = Path;
+ }
+
+ FSEventStreamContext Context;
+ Context.version = 0;
+ Context.info = new EventStreamContextData(std::move(RealPath), Receiver);
+ Context.retain = nullptr;
+ Context.release = EventStreamContextData::dispose;
+ Context.copyDescription = nullptr;
+ return Context;
+ }();
+
+ FSEventStreamRef Result = FSEventStreamCreate(
+ nullptr, eventStreamCallback, &Context, PathsToWatch,
+ kFSEventStreamEventIdSinceNow, /* latency in seconds */ 0.0,
+ kFSEventStreamCreateFlagFileEvents | kFSEventStreamCreateFlagNoDefer);
+ CFRelease(PathsToWatch);
+
+ return Result;
+}
+
+void stopFSEventStream(FSEventStreamRef EventStream) {
+ if (!EventStream)
+ return;
+ FSEventStreamStop(EventStream);
+ FSEventStreamInvalidate(EventStream);
+ FSEventStreamRelease(EventStream);
+}
+
+std::unique_ptr<DirectoryWatcher> clang::DirectoryWatcher::create(
+ StringRef Path,
+ std::function<void(llvm::ArrayRef<DirectoryWatcher::Event>, bool)> Receiver,
+ bool WaitForInitialSync) {
+ dispatch_queue_t Queue =
+ dispatch_queue_create("DirectoryWatcher", DISPATCH_QUEUE_SERIAL);
+
+ if (Path.empty())
+ return nullptr;
+
+ auto EventStream = createFSEventStream(Path, Receiver, Queue);
+ if (!EventStream) {
+ return nullptr;
+ }
+
+ std::unique_ptr<DirectoryWatcher> Result =
+ llvm::make_unique<DirectoryWatcherMac>(EventStream, Receiver, Path);
+
+ // We need to copy the data so the lifetime is ok after a const copy is made
+ // for the block.
+ const std::string CopiedPath = Path;
+
+ auto InitWork = ^{
+ // We need to start watching the directory before we start scanning in order
+ // to not miss any event. By dispatching this on the same serial Queue as
+ // the FSEvents will be handled we manage to start watching BEFORE the
+ // inital scan and handling events ONLY AFTER the scan finishes.
+ FSEventStreamSetDispatchQueue(EventStream, Queue);
+ FSEventStreamStart(EventStream);
+ // We need to decrement the ref count for Queue as initialize() will return
+ // and FSEvents has incremented it. Since we have to wait for FSEvents to
+ // take ownership it's the easiest to do it here rather than main thread.
+ dispatch_release(Queue);
+ Receiver(getAsFileEvents(scanDirectory(CopiedPath)), /*IsInitial=*/true);
+ };
+
+ if (WaitForInitialSync) {
+ dispatch_sync(Queue, InitWork);
+ } else {
+ dispatch_async(Queue, InitWork);
+ }
+
+ return Result;
+}
diff --git a/lib/Driver/Action.cpp b/lib/Driver/Action.cpp
index d4c7040a233c..47b03f6643b8 100644
--- a/lib/Driver/Action.cpp
+++ b/lib/Driver/Action.cpp
@@ -1,9 +1,8 @@
//===- Action.cpp - Abstract compilation steps ----------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Driver/Compilation.cpp b/lib/Driver/Compilation.cpp
index 982d7ecad962..5f3026e6ce50 100644
--- a/lib/Driver/Compilation.cpp
+++ b/lib/Driver/Compilation.cpp
@@ -1,9 +1,8 @@
//===- Compilation.cpp - Compilation Task Implementation ------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -154,30 +153,28 @@ int Compilation::ExecuteCommand(const Command &C,
if ((getDriver().CCPrintOptions ||
getArgs().hasArg(options::OPT_v)) && !getDriver().CCGenDiagnostics) {
raw_ostream *OS = &llvm::errs();
+ std::unique_ptr<llvm::raw_fd_ostream> OwnedStream;
// Follow gcc implementation of CC_PRINT_OPTIONS; we could also cache the
// output stream.
if (getDriver().CCPrintOptions && getDriver().CCPrintOptionsFilename) {
std::error_code EC;
- OS = new llvm::raw_fd_ostream(getDriver().CCPrintOptionsFilename, EC,
- llvm::sys::fs::F_Append |
- llvm::sys::fs::F_Text);
+ OwnedStream.reset(new llvm::raw_fd_ostream(
+ getDriver().CCPrintOptionsFilename, EC,
+ llvm::sys::fs::F_Append | llvm::sys::fs::F_Text));
if (EC) {
getDriver().Diag(diag::err_drv_cc_print_options_failure)
<< EC.message();
FailingCommand = &C;
- delete OS;
return 1;
}
+ OS = OwnedStream.get();
}
if (getDriver().CCPrintOptions)
*OS << "[Logging clang options]";
C.Print(*OS, "\n", /*Quote=*/getDriver().CCPrintOptions);
-
- if (OS != &llvm::errs())
- delete OS;
}
std::string Error;
diff --git a/lib/Driver/DarwinSDKInfo.cpp b/lib/Driver/DarwinSDKInfo.cpp
index 547978b2f973..761c6717266b 100644
--- a/lib/Driver/DarwinSDKInfo.cpp
+++ b/lib/Driver/DarwinSDKInfo.cpp
@@ -1,9 +1,8 @@
//===--- DarwinSDKInfo.cpp - SDK Information parser for darwin - ----------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Driver/Distro.cpp b/lib/Driver/Distro.cpp
index 396d0bee5603..f2a3074d1e70 100644
--- a/lib/Driver/Distro.cpp
+++ b/lib/Driver/Distro.cpp
@@ -1,9 +1,8 @@
//===--- Distro.cpp - Linux distribution detection support ------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -52,6 +51,7 @@ static Distro::DistroType DetectDistro(llvm::vfs::FileSystem &VFS) {
.Case("bionic", Distro::UbuntuBionic)
.Case("cosmic", Distro::UbuntuCosmic)
.Case("disco", Distro::UbuntuDisco)
+ .Case("eoan", Distro::UbuntuEoan)
.Default(Distro::UnknownDistro);
if (Version != Distro::UnknownDistro)
return Version;
@@ -94,6 +94,8 @@ static Distro::DistroType DetectDistro(llvm::vfs::FileSystem &VFS) {
return Distro::DebianStretch;
case 10:
return Distro::DebianBuster;
+ case 11:
+ return Distro::DebianBullseye;
default:
return Distro::UnknownDistro;
}
@@ -103,6 +105,8 @@ static Distro::DistroType DetectDistro(llvm::vfs::FileSystem &VFS) {
.Case("wheezy/sid", Distro::DebianWheezy)
.Case("jessie/sid", Distro::DebianJessie)
.Case("stretch/sid", Distro::DebianStretch)
+ .Case("buster/sid", Distro::DebianBuster)
+ .Case("bullseye/sid", Distro::DebianBullseye)
.Default(Distro::UnknownDistro);
}
diff --git a/lib/Driver/Driver.cpp b/lib/Driver/Driver.cpp
index a784e218f139..396ddf4dd816 100644
--- a/lib/Driver/Driver.cpp
+++ b/lib/Driver/Driver.cpp
@@ -1,9 +1,8 @@
//===--- Driver.cpp - Clang GCC Compatible Driver -------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -39,6 +38,7 @@
#include "ToolChains/NetBSD.h"
#include "ToolChains/OpenBSD.h"
#include "ToolChains/PS4CPU.h"
+#include "ToolChains/PPCLinux.h"
#include "ToolChains/RISCVToolchain.h"
#include "ToolChains/Solaris.h"
#include "ToolChains/TCE.h"
@@ -90,6 +90,33 @@ using namespace clang::driver;
using namespace clang;
using namespace llvm::opt;
+// static
+std::string Driver::GetResourcesPath(StringRef BinaryPath,
+ StringRef CustomResourceDir) {
+ // Since the resource directory is embedded in the module hash, it's important
+ // that all places that need it call this function, so that they get the
+ // exact same string ("a/../b/" and "b/" get different hashes, for example).
+
+ // Dir is bin/ or lib/, depending on where BinaryPath is.
+ std::string Dir = llvm::sys::path::parent_path(BinaryPath);
+
+ SmallString<128> P(Dir);
+ if (CustomResourceDir != "") {
+ llvm::sys::path::append(P, CustomResourceDir);
+ } else {
+ // On Windows, libclang.dll is in bin/.
+ // On non-Windows, libclang.so/.dylib is in lib/.
+ // With a static-library build of libclang, LibClangPath will contain the
+ // path of the embedding binary, which for LLVM binaries will be in bin/.
+ // ../lib gets us to lib/ in both cases.
+ P = llvm::sys::path::parent_path(Dir);
+ llvm::sys::path::append(P, Twine("lib") + CLANG_LIBDIR_SUFFIX, "clang",
+ CLANG_VERSION_STRING);
+ }
+
+ return P.str();
+}
+
Driver::Driver(StringRef ClangExecutable, StringRef TargetTriple,
DiagnosticsEngine &Diags,
IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS)
@@ -106,7 +133,7 @@ Driver::Driver(StringRef ClangExecutable, StringRef TargetTriple,
// Provide a sane fallback if no VFS is specified.
if (!this->VFS)
- this->VFS = llvm::vfs::getRealFileSystem();
+ this->VFS = llvm::vfs::createPhysicalFileSystem().release();
Name = llvm::sys::path::filename(ClangExecutable);
Dir = llvm::sys::path::parent_path(ClangExecutable);
@@ -120,17 +147,7 @@ Driver::Driver(StringRef ClangExecutable, StringRef TargetTriple,
#endif
// Compute the path to the resource directory.
- StringRef ClangResourceDir(CLANG_RESOURCE_DIR);
- SmallString<128> P(Dir);
- if (ClangResourceDir != "") {
- llvm::sys::path::append(P, ClangResourceDir);
- } else {
- StringRef ClangLibdirSuffix(CLANG_LIBDIR_SUFFIX);
- P = llvm::sys::path::parent_path(Dir);
- llvm::sys::path::append(P, Twine("lib") + ClangLibdirSuffix, "clang",
- CLANG_VERSION_STRING);
- }
- ResourceDir = P.str();
+ ResourceDir = GetResourcesPath(ClangExecutable, CLANG_RESOURCE_DIR);
}
void Driver::ParseDriverMode(StringRef ProgramName,
@@ -230,8 +247,9 @@ InputArgList Driver::ParseArgStrings(ArrayRef<const char *> ArgStrings,
: diag::err_drv_unknown_argument;
Diags.Report(DiagID) << ArgString;
} else {
- DiagID = IsCLMode() ? diag::warn_drv_unknown_argument_clang_cl_with_suggestion
- : diag::err_drv_unknown_argument_with_suggestion;
+ DiagID = IsCLMode()
+ ? diag::warn_drv_unknown_argument_clang_cl_with_suggestion
+ : diag::err_drv_unknown_argument_with_suggestion;
Diags.Report(DiagID) << ArgString << Nearest;
}
ContainsError |= Diags.getDiagnosticLevel(DiagID, SourceLocation()) >
@@ -262,11 +280,13 @@ phases::ID Driver::getFinalPhase(const DerivedArgList &DAL,
// -{fsyntax-only,-analyze,emit-ast} only run up to the compiler.
} else if ((PhaseArg = DAL.getLastArg(options::OPT_fsyntax_only)) ||
+ (PhaseArg = DAL.getLastArg(options::OPT_print_supported_cpus)) ||
(PhaseArg = DAL.getLastArg(options::OPT_module_file_info)) ||
(PhaseArg = DAL.getLastArg(options::OPT_verify_pch)) ||
(PhaseArg = DAL.getLastArg(options::OPT_rewrite_objc)) ||
(PhaseArg = DAL.getLastArg(options::OPT_rewrite_legacy_objc)) ||
(PhaseArg = DAL.getLastArg(options::OPT__migrate)) ||
+ (PhaseArg = DAL.getLastArg(options::OPT_emit_iterface_stubs)) ||
(PhaseArg = DAL.getLastArg(options::OPT__analyze,
options::OPT__analyze_auto)) ||
(PhaseArg = DAL.getLastArg(options::OPT_emit_ast))) {
@@ -946,6 +966,9 @@ Compilation *Driver::BuildCompilation(ArrayRef<const char *> ArgList) {
InputArgList Args = std::move(HasConfigFile ? std::move(*CfgOptions)
: std::move(*CLOptions));
+ // The args for config files or /clang: flags belong to different InputArgList
+ // objects than Args. This copies an Arg from one of those other InputArgLists
+ // to the ownership of Args.
auto appendOneArg = [&Args](const Arg *Opt, const Arg *BaseArg) {
unsigned Index = Args.MakeIndex(Opt->getSpelling());
Arg *Copy = new llvm::opt::Arg(Opt->getOption(), Opt->getSpelling(),
@@ -987,6 +1010,11 @@ Compilation *Driver::BuildCompilation(ArrayRef<const char *> ArgList) {
}
}
+ // Check for working directory option before accessing any files
+ if (Arg *WD = Args.getLastArg(options::OPT_working_directory))
+ if (VFS->setCurrentWorkingDirectory(WD->getValue()))
+ Diag(diag::err_drv_unable_to_set_working_directory) << WD->getValue();
+
// FIXME: This stuff needs to go into the Compilation, not the driver.
bool CCCPrintPhases;
@@ -1401,11 +1429,13 @@ void Driver::generateCompilationDiagnostics(
}
void Driver::setUpResponseFiles(Compilation &C, Command &Cmd) {
- // Since commandLineFitsWithinSystemLimits() may underestimate system's capacity
- // if the tool does not support response files, there is a chance/ that things
- // will just work without a response file, so we silently just skip it.
+ // Since commandLineFitsWithinSystemLimits() may underestimate system's
+ // capacity if the tool does not support response files, there is a chance/
+ // that things will just work without a response file, so we silently just
+ // skip it.
if (Cmd.getCreator().getResponseFilesSupport() == Tool::RF_None ||
- llvm::sys::commandLineFitsWithinSystemLimits(Cmd.getExecutable(), Cmd.getArguments()))
+ llvm::sys::commandLineFitsWithinSystemLimits(Cmd.getExecutable(),
+ Cmd.getArguments()))
return;
std::string TmpName = GetTemporaryPath("response", "txt");
@@ -1559,8 +1589,7 @@ void Driver::HandleAutocompletions(StringRef PassedFlags) const {
// We want to show cc1-only options only when clang is invoked with -cc1 or
// -Xclang.
- if (std::find(Flags.begin(), Flags.end(), "-Xclang") != Flags.end() ||
- std::find(Flags.begin(), Flags.end(), "-cc1") != Flags.end())
+ if (llvm::is_contained(Flags, "-Xclang") || llvm::is_contained(Flags, "-cc1"))
DisableFlags &= ~options::NoDriverOption;
StringRef Cur;
@@ -1625,11 +1654,7 @@ bool Driver::HandleImmediateArgs(const Compilation &C) {
if (C.getArgs().hasArg(options::OPT_dumpversion)) {
// Since -dumpversion is only implemented for pedantic GCC compatibility, we
// return an answer which matches our definition of __VERSION__.
- //
- // If we want to return a more correct answer some day, then we should
- // introduce a non-pedantically GCC compatible mode to Clang in which we
- // provide sensible definitions for -dumpversion, __VERSION__, etc.
- llvm::outs() << "4.2.1\n";
+ llvm::outs() << CLANG_VERSION_STRING << "\n";
return false;
}
@@ -1651,7 +1676,8 @@ bool Driver::HandleImmediateArgs(const Compilation &C) {
}
if (C.getArgs().hasArg(options::OPT_v) ||
- C.getArgs().hasArg(options::OPT__HASH_HASH_HASH)) {
+ C.getArgs().hasArg(options::OPT__HASH_HASH_HASH) ||
+ C.getArgs().hasArg(options::OPT_print_supported_cpus)) {
PrintVersion(C, llvm::errs());
SuppressMissingInputWarning = true;
}
@@ -1680,7 +1706,7 @@ bool Driver::HandleImmediateArgs(const Compilation &C) {
bool separator = false;
for (const std::string &Path : TC.getProgramPaths()) {
if (separator)
- llvm::outs() << ':';
+ llvm::outs() << llvm::sys::EnvPathSeparator;
llvm::outs() << Path;
separator = true;
}
@@ -1691,7 +1717,7 @@ bool Driver::HandleImmediateArgs(const Compilation &C) {
for (const std::string &Path : TC.getFilePaths()) {
// Always print a separator. ResourceDir was the first item shown.
- llvm::outs() << ':';
+ llvm::outs() << llvm::sys::EnvPathSeparator;
// Interpretation of leading '=' is needed only for NetBSD.
if (Path[0] == '=')
llvm::outs() << sysroot << Path.substr(1);
@@ -1960,31 +1986,20 @@ void Driver::BuildUniversalActions(Compilation &C, const ToolChain &TC,
}
}
-/// Check that the file referenced by Value exists. If it doesn't,
-/// issue a diagnostic and return false.
-static bool DiagnoseInputExistence(const Driver &D, const DerivedArgList &Args,
- StringRef Value, types::ID Ty) {
- if (!D.getCheckInputsExist())
+bool Driver::DiagnoseInputExistence(const DerivedArgList &Args, StringRef Value,
+ types::ID Ty, bool TypoCorrect) const {
+ if (!getCheckInputsExist())
return true;
// stdin always exists.
if (Value == "-")
return true;
- SmallString<64> Path(Value);
- if (Arg *WorkDir = Args.getLastArg(options::OPT_working_directory)) {
- if (!llvm::sys::path::is_absolute(Path)) {
- SmallString<64> Directory(WorkDir->getValue());
- llvm::sys::path::append(Directory, Value);
- Path.assign(Directory);
- }
- }
-
- if (D.getVFS().exists(Path))
+ if (getVFS().exists(Value))
return true;
- if (D.IsCLMode()) {
- if (!llvm::sys::path::is_absolute(Twine(Path)) &&
+ if (IsCLMode()) {
+ if (!llvm::sys::path::is_absolute(Twine(Value)) &&
llvm::sys::Process::FindInEnvPath("LIB", Value))
return true;
@@ -1996,7 +2011,26 @@ static bool DiagnoseInputExistence(const Driver &D, const DerivedArgList &Args,
}
}
- D.Diag(clang::diag::err_drv_no_such_file) << Path;
+ if (TypoCorrect) {
+ // Check if the filename is a typo for an option flag. OptTable thinks
+ // that all args that are not known options and that start with / are
+ // filenames, but e.g. `/diagnostic:caret` is more likely a typo for
+ // the option `/diagnostics:caret` than a reference to a file in the root
+ // directory.
+ unsigned IncludedFlagsBitmask;
+ unsigned ExcludedFlagsBitmask;
+ std::tie(IncludedFlagsBitmask, ExcludedFlagsBitmask) =
+ getIncludeExcludeOptionFlagMasks(IsCLMode());
+ std::string Nearest;
+ if (getOpts().findNearest(Value, Nearest, IncludedFlagsBitmask,
+ ExcludedFlagsBitmask) <= 1) {
+ Diag(clang::diag::err_drv_no_such_file_with_suggestion)
+ << Value << Nearest;
+ return false;
+ }
+ }
+
+ Diag(clang::diag::err_drv_no_such_file) << Value;
return false;
}
@@ -2019,7 +2053,8 @@ void Driver::BuildInputs(const ToolChain &TC, DerivedArgList &Args,
Arg *Previous = nullptr;
bool ShowNote = false;
- for (Arg *A : Args.filtered(options::OPT__SLASH_TC, options::OPT__SLASH_TP)) {
+ for (Arg *A :
+ Args.filtered(options::OPT__SLASH_TC, options::OPT__SLASH_TP)) {
if (Previous) {
Diag(clang::diag::warn_drv_overriding_flag_option)
<< Previous->getSpelling() << A->getSpelling();
@@ -2084,6 +2119,12 @@ void Driver::BuildInputs(const ToolChain &TC, DerivedArgList &Args,
Diag(clang::diag::warn_drv_treating_input_as_cxx)
<< getTypeName(OldTy) << getTypeName(Ty);
}
+
+ // If running with -fthinlto-index=, extensions that normally identify
+ // native object files actually identify LLVM bitcode files.
+ if (Args.hasArgNoClaim(options::OPT_fthinlto_index_EQ) &&
+ Ty == types::TY_Object)
+ Ty = types::TY_LLVM_BC;
}
// -ObjC and -ObjC++ override the default language, but only for "source
@@ -2112,19 +2153,21 @@ void Driver::BuildInputs(const ToolChain &TC, DerivedArgList &Args,
}
}
- if (DiagnoseInputExistence(*this, Args, Value, Ty))
+ if (DiagnoseInputExistence(Args, Value, Ty, /*TypoCorrect=*/true))
Inputs.push_back(std::make_pair(Ty, A));
} else if (A->getOption().matches(options::OPT__SLASH_Tc)) {
StringRef Value = A->getValue();
- if (DiagnoseInputExistence(*this, Args, Value, types::TY_C)) {
+ if (DiagnoseInputExistence(Args, Value, types::TY_C,
+ /*TypoCorrect=*/false)) {
Arg *InputArg = MakeInputArg(Args, *Opts, A->getValue());
Inputs.push_back(std::make_pair(types::TY_C, InputArg));
}
A->claim();
} else if (A->getOption().matches(options::OPT__SLASH_Tp)) {
StringRef Value = A->getValue();
- if (DiagnoseInputExistence(*this, Args, Value, types::TY_CXX)) {
+ if (DiagnoseInputExistence(Args, Value, types::TY_CXX,
+ /*TypoCorrect=*/false)) {
Arg *InputArg = MakeInputArg(Args, *Opts, A->getValue());
Inputs.push_back(std::make_pair(types::TY_CXX, InputArg));
}
@@ -2146,7 +2189,7 @@ void Driver::BuildInputs(const ToolChain &TC, DerivedArgList &Args,
Diag(clang::diag::err_drv_unknown_language) << A->getValue();
InputType = types::TY_Object;
}
- } else if (A->getOption().getID() == options::OPT__SLASH_U) {
+ } else if (A->getOption().getID() == options::OPT_U) {
assert(A->getNumValues() == 1 && "The /U option has one value.");
StringRef Val = A->getValue(0);
if (Val.find_first_of("/\\") != StringRef::npos) {
@@ -2278,6 +2321,9 @@ class OffloadingActionBuilder final {
/// Flag that is set to true if this builder acted on the current input.
bool IsActive = false;
+
+ /// Flag for -fgpu-rdc.
+ bool Relocatable = false;
public:
CudaActionBuilderBase(Compilation &C, DerivedArgList &Args,
const Driver::InputList &Inputs,
@@ -2323,6 +2369,12 @@ class OffloadingActionBuilder final {
// If this is an unbundling action use it as is for each CUDA toolchain.
if (auto *UA = dyn_cast<OffloadUnbundlingJobAction>(HostAction)) {
+
+ // If -fgpu-rdc is disabled, should not unbundle since there is no
+ // device code to link.
+ if (!Relocatable)
+ return ABRT_Inactive;
+
CudaDeviceActions.clear();
auto *IA = cast<InputAction>(UA->getInputs().back());
std::string FileName = IA->getInputArg().getAsString(Args);
@@ -2394,6 +2446,9 @@ class OffloadingActionBuilder final {
!C.hasOffloadToolChain<Action::OFK_HIP>())
return false;
+ Relocatable = Args.hasFlag(options::OPT_fgpu_rdc,
+ options::OPT_fno_gpu_rdc, /*Default=*/false);
+
const ToolChain *HostTC = C.getSingleOffloadToolChain<Action::OFK_Host>();
assert(HostTC && "No toolchain for host compilation.");
if (HostTC->getTriple().isNVPTX() ||
@@ -2579,13 +2634,11 @@ class OffloadingActionBuilder final {
class HIPActionBuilder final : public CudaActionBuilderBase {
/// The linker inputs obtained for each device arch.
SmallVector<ActionList, 8> DeviceLinkerInputs;
- bool Relocatable;
public:
HIPActionBuilder(Compilation &C, DerivedArgList &Args,
const Driver::InputList &Inputs)
- : CudaActionBuilderBase(C, Args, Inputs, Action::OFK_HIP),
- Relocatable(false) {}
+ : CudaActionBuilderBase(C, Args, Inputs, Action::OFK_HIP) {}
bool canUseBundlerUnbundler() const override { return true; }
@@ -2690,13 +2743,6 @@ class OffloadingActionBuilder final {
++I;
}
}
-
- bool initialize() override {
- Relocatable = Args.hasFlag(options::OPT_fgpu_rdc,
- options::OPT_fno_gpu_rdc, /*Default=*/false);
-
- return CudaActionBuilderBase::initialize();
- }
};
/// OpenMP action builder. The host bitcode is passed to the device frontend
@@ -3341,6 +3387,18 @@ void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
Args.ClaimAllArgs(options::OPT_cl_compile_Group);
}
+ // If --print-supported-cpus, -mcpu=? or -mtune=? is specified, build a custom
+ // Compile phase that prints out supported cpu models and quits.
+ if (Arg *A = Args.getLastArg(options::OPT_print_supported_cpus)) {
+ // Use the -mcpu=? flag as the dummy input to cc1.
+ Actions.clear();
+ Action *InputAc = C.MakeAction<InputAction>(*A, types::TY_C);
+ Actions.push_back(
+ C.MakeAction<PrecompileJobAction>(InputAc, types::TY_Nothing));
+ for (auto &I : Inputs)
+ I.second->claim();
+ }
+
// Claim ignored clang-cl options.
Args.ClaimAllArgs(options::OPT_cl_ignored_Group);
@@ -3426,6 +3484,8 @@ Action *Driver::ConstructPhaseAction(
return C.MakeAction<CompileJobAction>(Input, types::TY_ModuleFile);
if (Args.hasArg(options::OPT_verify_pch))
return C.MakeAction<VerifyPCHJobAction>(Input, types::TY_Nothing);
+ if (Args.hasArg(options::OPT_emit_iterface_stubs))
+ return C.MakeAction<CompileJobAction>(Input, types::TY_IFS);
return C.MakeAction<CompileJobAction>(Input, types::TY_LLVM_BC);
}
case phases::Backend: {
@@ -3963,9 +4023,9 @@ InputInfo Driver::BuildJobsForActionNoCache(
Input.claim();
if (Input.getOption().matches(options::OPT_INPUT)) {
const char *Name = Input.getValue();
- return InputInfo(A, Name, /* BaseInput = */ Name);
+ return InputInfo(A, Name, /* _BaseInput = */ Name);
}
- return InputInfo(A, &Input, /* BaseInput = */ "");
+ return InputInfo(A, &Input, /* _BaseInput = */ "");
}
if (const BindArchAction *BAA = dyn_cast<BindArchAction>(A)) {
@@ -4244,10 +4304,12 @@ const char *Driver::GetNamedOutputPath(Compilation &C, const JobAction &JA,
Arg *A = C.getArgs().getLastArg(options::OPT_fcrash_diagnostics_dir);
if (CCGenDiagnostics && A) {
SmallString<128> CrashDirectory(A->getValue());
+ if (!getVFS().exists(CrashDirectory))
+ llvm::sys::fs::create_directories(CrashDirectory);
llvm::sys::path::append(CrashDirectory, Split.first);
const char *Middle = Suffix ? "-%%%%%%." : "-%%%%%%";
- std::error_code EC =
- llvm::sys::fs::createUniqueFile(CrashDirectory + Middle + Suffix, TmpName);
+ std::error_code EC = llvm::sys::fs::createUniqueFile(
+ CrashDirectory + Middle + Suffix, TmpName);
if (EC) {
Diag(clang::diag::err_unable_to_make_temp) << EC.message();
return "";
@@ -4401,6 +4463,11 @@ std::string Driver::GetFilePath(StringRef Name, const ToolChain &TC) const {
if (llvm::sys::fs::exists(Twine(P)))
return P.str();
+ SmallString<128> D(Dir);
+ llvm::sys::path::append(D, "..", Name);
+ if (llvm::sys::fs::exists(Twine(D)))
+ return D.str();
+
if (auto P = SearchPaths(TC.getLibraryPaths()))
return *P;
@@ -4558,6 +4625,11 @@ const ToolChain &Driver::getToolChain(const ArgList &Args,
!Target.hasEnvironment())
TC = llvm::make_unique<toolchains::MipsLLVMToolChain>(*this, Target,
Args);
+ else if (Target.getArch() == llvm::Triple::ppc ||
+ Target.getArch() == llvm::Triple::ppc64 ||
+ Target.getArch() == llvm::Triple::ppc64le)
+ TC = llvm::make_unique<toolchains::PPCLinuxToolChain>(*this, Target,
+ Args);
else
TC = llvm::make_unique<toolchains::Linux>(*this, Target, Args);
break;
@@ -4571,6 +4643,8 @@ const ToolChain &Driver::getToolChain(const ArgList &Args,
TC = llvm::make_unique<toolchains::Solaris>(*this, Target, Args);
break;
case llvm::Triple::AMDHSA:
+ case llvm::Triple::AMDPAL:
+ case llvm::Triple::Mesa3D:
TC = llvm::make_unique<toolchains::AMDGPUToolChain>(*this, Target, Args);
break;
case llvm::Triple::Win32:
@@ -4750,7 +4824,8 @@ bool Driver::GetReleaseVersion(StringRef Str,
return false;
}
-std::pair<unsigned, unsigned> Driver::getIncludeExcludeOptionFlagMasks(bool IsClCompatMode) const {
+std::pair<unsigned, unsigned>
+Driver::getIncludeExcludeOptionFlagMasks(bool IsClCompatMode) const {
unsigned IncludedFlagsBitmask = 0;
unsigned ExcludedFlagsBitmask = options::NoDriverOption;
diff --git a/lib/Driver/DriverOptions.cpp b/lib/Driver/DriverOptions.cpp
index 11e7e4c8fe2b..1ad188838eda 100644
--- a/lib/Driver/DriverOptions.cpp
+++ b/lib/Driver/DriverOptions.cpp
@@ -1,9 +1,8 @@
//===--- DriverOptions.cpp - Driver Options Table -------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Driver/InputInfo.h b/lib/Driver/InputInfo.h
index 0c36e817c135..a6b6f7f344bc 100644
--- a/lib/Driver/InputInfo.h
+++ b/lib/Driver/InputInfo.h
@@ -1,9 +1,8 @@
//===--- InputInfo.h - Input Source & Type Information ----------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Driver/Job.cpp b/lib/Driver/Job.cpp
index 8d1dfbe12d73..0a95e49694f9 100644
--- a/lib/Driver/Job.cpp
+++ b/lib/Driver/Job.cpp
@@ -1,9 +1,8 @@
//===- Job.cpp - Command to Execute ---------------------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -100,7 +99,7 @@ static bool skipArgs(const char *Flag, bool HaveCrashVFS, int &SkipNum,
}
void Command::printArg(raw_ostream &OS, StringRef Arg, bool Quote) {
- const bool Escape = Arg.find_first_of("\"\\$") != StringRef::npos;
+ const bool Escape = Arg.find_first_of(" \"\\$") != StringRef::npos;
if (!Quote && !Escape) {
OS << Arg;
@@ -251,8 +250,8 @@ void Command::Print(raw_ostream &OS, const char *Terminator, bool Quote,
}
}
- auto Found = std::find_if(InputFilenames.begin(), InputFilenames.end(),
- [&Arg](StringRef IF) { return IF == Arg; });
+ auto Found = llvm::find_if(InputFilenames,
+ [&Arg](StringRef IF) { return IF == Arg; });
if (Found != InputFilenames.end() &&
(i == 0 || StringRef(Args[i - 1]) != "-main-file-name")) {
// Replace the input file name with the crashinfo's file name.
diff --git a/lib/Driver/Multilib.cpp b/lib/Driver/Multilib.cpp
index 178a60db60e5..303047e05f78 100644
--- a/lib/Driver/Multilib.cpp
+++ b/lib/Driver/Multilib.cpp
@@ -1,9 +1,8 @@
//===- Multilib.cpp - Multilib Implementation -----------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -52,8 +51,9 @@ static void normalizePathSegment(std::string &Segment) {
}
Multilib::Multilib(StringRef GCCSuffix, StringRef OSSuffix,
- StringRef IncludeSuffix)
- : GCCSuffix(GCCSuffix), OSSuffix(OSSuffix), IncludeSuffix(IncludeSuffix) {
+ StringRef IncludeSuffix, int Priority)
+ : GCCSuffix(GCCSuffix), OSSuffix(OSSuffix), IncludeSuffix(IncludeSuffix),
+ Priority(Priority) {
normalizePathSegment(this->GCCSuffix);
normalizePathSegment(this->OSSuffix);
normalizePathSegment(this->IncludeSuffix);
@@ -266,8 +266,19 @@ bool MultilibSet::select(const Multilib::flags_list &Flags, Multilib &M) const {
return true;
}
- // TODO: pick the "best" multlib when more than one is suitable
- assert(false);
+ // Sort multilibs by priority and select the one with the highest priority.
+ llvm::sort(Filtered.begin(), Filtered.end(),
+ [](const Multilib &a, const Multilib &b) -> bool {
+ return a.priority() > b.priority();
+ });
+
+ if (Filtered[0].priority() > Filtered[1].priority()) {
+ M = Filtered[0];
+ return true;
+ }
+
+ // TODO: We should consider returning llvm::Error rather than aborting.
+ assert(false && "More than one multilib with the same priority");
return false;
}
diff --git a/lib/Driver/Phases.cpp b/lib/Driver/Phases.cpp
index 7ae270857f4d..5b776c63f713 100644
--- a/lib/Driver/Phases.cpp
+++ b/lib/Driver/Phases.cpp
@@ -1,9 +1,8 @@
//===--- Phases.cpp - Transformations on Driver Types ---------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Driver/SanitizerArgs.cpp b/lib/Driver/SanitizerArgs.cpp
index 1a46073aaa37..6b6a9feec42c 100644
--- a/lib/Driver/SanitizerArgs.cpp
+++ b/lib/Driver/SanitizerArgs.cpp
@@ -1,9 +1,8 @@
//===--- SanitizerArgs.cpp - Arguments for sanitizer tools ---------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "clang/Driver/SanitizerArgs.h"
@@ -22,33 +21,55 @@
#include <memory>
using namespace clang;
-using namespace clang::SanitizerKind;
using namespace clang::driver;
using namespace llvm::opt;
-enum : SanitizerMask {
- NeedsUbsanRt = Undefined | Integer | ImplicitConversion | Nullability | CFI,
- NeedsUbsanCxxRt = Vptr | CFI,
- NotAllowedWithTrap = Vptr,
- NotAllowedWithMinimalRuntime = Vptr,
- RequiresPIE = DataFlow | HWAddress | Scudo,
- NeedsUnwindTables = Address | HWAddress | Thread | Memory | DataFlow,
- SupportsCoverage = Address | HWAddress | KernelAddress | KernelHWAddress |
- Memory | KernelMemory | Leak | Undefined | Integer |
- ImplicitConversion | Nullability | DataFlow | Fuzzer |
- FuzzerNoLink,
- RecoverableByDefault = Undefined | Integer | ImplicitConversion | Nullability,
- Unrecoverable = Unreachable | Return,
- AlwaysRecoverable = KernelAddress | KernelHWAddress,
- LegacyFsanitizeRecoverMask = Undefined | Integer,
- NeedsLTO = CFI,
- TrappingSupported = (Undefined & ~Vptr) | UnsignedIntegerOverflow |
- ImplicitConversion | Nullability | LocalBounds | CFI,
- TrappingDefault = CFI,
- CFIClasses =
- CFIVCall | CFINVCall | CFIMFCall | CFIDerivedCast | CFIUnrelatedCast,
- CompatibleWithMinimalRuntime = TrappingSupported | Scudo | ShadowCallStack,
-};
+static const SanitizerMask NeedsUbsanRt =
+ SanitizerKind::Undefined | SanitizerKind::Integer |
+ SanitizerKind::ImplicitConversion | SanitizerKind::Nullability |
+ SanitizerKind::CFI | SanitizerKind::FloatDivideByZero;
+static const SanitizerMask NeedsUbsanCxxRt =
+ SanitizerKind::Vptr | SanitizerKind::CFI;
+static const SanitizerMask NotAllowedWithTrap = SanitizerKind::Vptr;
+static const SanitizerMask NotAllowedWithMinimalRuntime =
+ SanitizerKind::Function | SanitizerKind::Vptr;
+static const SanitizerMask RequiresPIE =
+ SanitizerKind::DataFlow | SanitizerKind::HWAddress | SanitizerKind::Scudo;
+static const SanitizerMask NeedsUnwindTables =
+ SanitizerKind::Address | SanitizerKind::HWAddress | SanitizerKind::Thread |
+ SanitizerKind::Memory | SanitizerKind::DataFlow;
+static const SanitizerMask SupportsCoverage =
+ SanitizerKind::Address | SanitizerKind::HWAddress |
+ SanitizerKind::KernelAddress | SanitizerKind::KernelHWAddress |
+ SanitizerKind::MemTag | SanitizerKind::Memory |
+ SanitizerKind::KernelMemory | SanitizerKind::Leak |
+ SanitizerKind::Undefined | SanitizerKind::Integer |
+ SanitizerKind::ImplicitConversion | SanitizerKind::Nullability |
+ SanitizerKind::DataFlow | SanitizerKind::Fuzzer |
+ SanitizerKind::FuzzerNoLink | SanitizerKind::FloatDivideByZero;
+static const SanitizerMask RecoverableByDefault =
+ SanitizerKind::Undefined | SanitizerKind::Integer |
+ SanitizerKind::ImplicitConversion | SanitizerKind::Nullability |
+ SanitizerKind::FloatDivideByZero;
+static const SanitizerMask Unrecoverable =
+ SanitizerKind::Unreachable | SanitizerKind::Return;
+static const SanitizerMask AlwaysRecoverable =
+ SanitizerKind::KernelAddress | SanitizerKind::KernelHWAddress;
+static const SanitizerMask LegacyFsanitizeRecoverMask =
+ SanitizerKind::Undefined | SanitizerKind::Integer;
+static const SanitizerMask NeedsLTO = SanitizerKind::CFI;
+static const SanitizerMask TrappingSupported =
+ (SanitizerKind::Undefined & ~SanitizerKind::Vptr) |
+ SanitizerKind::UnsignedIntegerOverflow | SanitizerKind::ImplicitConversion |
+ SanitizerKind::Nullability | SanitizerKind::LocalBounds |
+ SanitizerKind::CFI | SanitizerKind::FloatDivideByZero;
+static const SanitizerMask TrappingDefault = SanitizerKind::CFI;
+static const SanitizerMask CFIClasses =
+ SanitizerKind::CFIVCall | SanitizerKind::CFINVCall |
+ SanitizerKind::CFIMFCall | SanitizerKind::CFIDerivedCast |
+ SanitizerKind::CFIUnrelatedCast;
+static const SanitizerMask CompatibleWithMinimalRuntime =
+ TrappingSupported | SanitizerKind::Scudo | SanitizerKind::ShadowCallStack;
enum CoverageFeature {
CoverageFunc = 1 << 0,
@@ -101,13 +122,17 @@ static void addDefaultBlacklists(const Driver &D, SanitizerMask Kinds,
struct Blacklist {
const char *File;
SanitizerMask Mask;
- } Blacklists[] = {{"asan_blacklist.txt", Address},
- {"hwasan_blacklist.txt", HWAddress},
- {"msan_blacklist.txt", Memory},
- {"tsan_blacklist.txt", Thread},
- {"dfsan_abilist.txt", DataFlow},
- {"cfi_blacklist.txt", CFI},
- {"ubsan_blacklist.txt", Undefined | Integer | Nullability}};
+ } Blacklists[] = {{"asan_blacklist.txt", SanitizerKind::Address},
+ {"hwasan_blacklist.txt", SanitizerKind::HWAddress},
+ {"memtag_blacklist.txt", SanitizerKind::MemTag},
+ {"msan_blacklist.txt", SanitizerKind::Memory},
+ {"tsan_blacklist.txt", SanitizerKind::Thread},
+ {"dfsan_abilist.txt", SanitizerKind::DataFlow},
+ {"cfi_blacklist.txt", SanitizerKind::CFI},
+ {"ubsan_blacklist.txt",
+ SanitizerKind::Undefined | SanitizerKind::Integer |
+ SanitizerKind::Nullability |
+ SanitizerKind::FloatDivideByZero}};
for (auto BL : Blacklists) {
if (!(Kinds & BL.Mask))
@@ -117,7 +142,7 @@ static void addDefaultBlacklists(const Driver &D, SanitizerMask Kinds,
llvm::sys::path::append(Path, "share", BL.File);
if (llvm::sys::fs::exists(Path))
BlacklistFiles.push_back(Path.str());
- else if (BL.Mask == CFI)
+ else if (BL.Mask == SanitizerKind::CFI)
// If cfi_blacklist.txt cannot be found in the resource dir, driver
// should fail.
D.Diag(clang::diag::err_drv_no_such_file) << Path;
@@ -137,10 +162,10 @@ static SanitizerMask setGroupBits(SanitizerMask Kinds) {
static SanitizerMask parseSanitizeTrapArgs(const Driver &D,
const llvm::opt::ArgList &Args) {
- SanitizerMask TrapRemove = 0; // During the loop below, the accumulated set of
+ SanitizerMask TrapRemove; // During the loop below, the accumulated set of
// sanitizers disabled by the current sanitizer
// argument or any argument after it.
- SanitizerMask TrappingKinds = 0;
+ SanitizerMask TrappingKinds;
SanitizerMask TrappingSupportedWithGroups = setGroupBits(TrappingSupported);
for (ArgList::const_reverse_iterator I = Args.rbegin(), E = Args.rend();
@@ -164,11 +189,12 @@ static SanitizerMask parseSanitizeTrapArgs(const Driver &D,
options::OPT_fsanitize_undefined_trap_on_error)) {
Arg->claim();
TrappingKinds |=
- expandSanitizerGroups(UndefinedGroup & ~TrapRemove) & ~TrapRemove;
+ expandSanitizerGroups(SanitizerKind::UndefinedGroup & ~TrapRemove) &
+ ~TrapRemove;
} else if (Arg->getOption().matches(
options::OPT_fno_sanitize_undefined_trap_on_error)) {
Arg->claim();
- TrapRemove |= expandSanitizerGroups(UndefinedGroup);
+ TrapRemove |= expandSanitizerGroups(SanitizerKind::UndefinedGroup);
}
}
@@ -190,13 +216,13 @@ bool SanitizerArgs::needsUbsanRt() const {
}
bool SanitizerArgs::needsCfiRt() const {
- return !(Sanitizers.Mask & CFI & ~TrapSanitizers.Mask) && CfiCrossDso &&
- !ImplicitCfiRuntime;
+ return !(Sanitizers.Mask & SanitizerKind::CFI & ~TrapSanitizers.Mask) &&
+ CfiCrossDso && !ImplicitCfiRuntime;
}
bool SanitizerArgs::needsCfiDiagRt() const {
- return (Sanitizers.Mask & CFI & ~TrapSanitizers.Mask) && CfiCrossDso &&
- !ImplicitCfiRuntime;
+ return (Sanitizers.Mask & SanitizerKind::CFI & ~TrapSanitizers.Mask) &&
+ CfiCrossDso && !ImplicitCfiRuntime;
}
bool SanitizerArgs::requiresPIE() const {
@@ -204,24 +230,26 @@ bool SanitizerArgs::requiresPIE() const {
}
bool SanitizerArgs::needsUnwindTables() const {
- return Sanitizers.Mask & NeedsUnwindTables;
+ return static_cast<bool>(Sanitizers.Mask & NeedsUnwindTables);
}
-bool SanitizerArgs::needsLTO() const { return Sanitizers.Mask & NeedsLTO; }
+bool SanitizerArgs::needsLTO() const {
+ return static_cast<bool>(Sanitizers.Mask & NeedsLTO);
+}
SanitizerArgs::SanitizerArgs(const ToolChain &TC,
const llvm::opt::ArgList &Args) {
- SanitizerMask AllRemove = 0; // During the loop below, the accumulated set of
+ SanitizerMask AllRemove; // During the loop below, the accumulated set of
// sanitizers disabled by the current sanitizer
// argument or any argument after it.
- SanitizerMask AllAddedKinds = 0; // Mask of all sanitizers ever enabled by
+ SanitizerMask AllAddedKinds; // Mask of all sanitizers ever enabled by
// -fsanitize= flags (directly or via group
// expansion), some of which may be disabled
// later. Used to carefully prune
// unused-argument diagnostics.
- SanitizerMask DiagnosedKinds = 0; // All Kinds we have diagnosed up to now.
+ SanitizerMask DiagnosedKinds; // All Kinds we have diagnosed up to now.
// Used to deduplicate diagnostics.
- SanitizerMask Kinds = 0;
+ SanitizerMask Kinds;
const SanitizerMask Supported = setGroupBits(TC.getSupportedSanitizers());
CfiCrossDso = Args.hasFlag(options::OPT_fsanitize_cfi_cross_dso,
@@ -252,7 +280,7 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
if (RemoveObjectSizeAtO0) {
AllRemove |= SanitizerKind::ObjectSize;
- // The user explicitly enabled the object size sanitizer. Warn that
+ // The user explicitly enabled the object size sanitizer. Warn
// that this does nothing at -O0.
if (Add & SanitizerKind::ObjectSize)
D.Diag(diag::warn_drv_object_size_disabled_O0)
@@ -296,12 +324,12 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
// identifiers.
// Fixing both of those may require changes to the cross-DSO CFI
// interface.
- if (CfiCrossDso && (Add & CFIMFCall & ~DiagnosedKinds)) {
+ if (CfiCrossDso && (Add & SanitizerKind::CFIMFCall & ~DiagnosedKinds)) {
D.Diag(diag::err_drv_argument_not_allowed_with)
<< "-fsanitize=cfi-mfcall"
<< "-fsanitize-cfi-cross-dso";
- Add &= ~CFIMFCall;
- DiagnosedKinds |= CFIMFCall;
+ Add &= ~SanitizerKind::CFIMFCall;
+ DiagnosedKinds |= SanitizerKind::CFIMFCall;
}
if (SanitizerMask KindsToDiagnose = Add & ~Supported & ~DiagnosedKinds) {
@@ -315,7 +343,7 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
// Test for -fno-rtti + explicit -fsanitizer=vptr before expanding groups
// so we don't error out if -fno-rtti and -fsanitize=undefined were
// passed.
- if ((Add & Vptr) && (RTTIMode == ToolChain::RM_Disabled)) {
+ if ((Add & SanitizerKind::Vptr) && (RTTIMode == ToolChain::RM_Disabled)) {
if (const llvm::opt::Arg *NoRTTIArg = TC.getRTTIArg()) {
assert(NoRTTIArg->getOption().matches(options::OPT_fno_rtti) &&
"RTTI disabled without -fno-rtti option?");
@@ -330,7 +358,7 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
}
// Take out the Vptr sanitizer from the enabled sanitizers
- AllRemove |= Vptr;
+ AllRemove |= SanitizerKind::Vptr;
}
Add = expandSanitizerGroups(Add);
@@ -343,14 +371,14 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
Add &= ~NotAllowedWithMinimalRuntime;
}
if (CfiCrossDso)
- Add &= ~CFIMFCall;
+ Add &= ~SanitizerKind::CFIMFCall;
Add &= Supported;
- if (Add & Fuzzer)
- Add |= FuzzerNoLink;
+ if (Add & SanitizerKind::Fuzzer)
+ Add |= SanitizerKind::FuzzerNoLink;
// Enable coverage if the fuzzing flag is set.
- if (Add & FuzzerNoLink) {
+ if (Add & SanitizerKind::FuzzerNoLink) {
CoverageFeatures |= CoverageInline8bitCounters | CoverageIndirCall |
CoverageTraceCmp | CoveragePCTable;
// Due to TLS differences, stack depth tracking is only enabled on Linux
@@ -367,23 +395,39 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
}
std::pair<SanitizerMask, SanitizerMask> IncompatibleGroups[] = {
- std::make_pair(Address, Thread | Memory),
- std::make_pair(Thread, Memory),
- std::make_pair(Leak, Thread | Memory),
- std::make_pair(KernelAddress, Address | Leak | Thread | Memory),
- std::make_pair(HWAddress, Address | Thread | Memory | KernelAddress),
- std::make_pair(Efficiency, Address | HWAddress | Leak | Thread | Memory |
- KernelAddress),
- std::make_pair(Scudo, Address | HWAddress | Leak | Thread | Memory |
- KernelAddress | Efficiency),
- std::make_pair(SafeStack, Address | HWAddress | Leak | Thread | Memory |
- KernelAddress | Efficiency),
- std::make_pair(KernelHWAddress, Address | HWAddress | Leak | Thread |
- Memory | KernelAddress | Efficiency |
- SafeStack),
- std::make_pair(KernelMemory, Address | HWAddress | Leak | Thread |
- Memory | KernelAddress | Efficiency |
- Scudo | SafeStack)};
+ std::make_pair(SanitizerKind::Address,
+ SanitizerKind::Thread | SanitizerKind::Memory),
+ std::make_pair(SanitizerKind::Thread, SanitizerKind::Memory),
+ std::make_pair(SanitizerKind::Leak,
+ SanitizerKind::Thread | SanitizerKind::Memory),
+ std::make_pair(SanitizerKind::KernelAddress,
+ SanitizerKind::Address | SanitizerKind::Leak |
+ SanitizerKind::Thread | SanitizerKind::Memory),
+ std::make_pair(SanitizerKind::HWAddress,
+ SanitizerKind::Address | SanitizerKind::Thread |
+ SanitizerKind::Memory | SanitizerKind::KernelAddress),
+ std::make_pair(SanitizerKind::Scudo,
+ SanitizerKind::Address | SanitizerKind::HWAddress |
+ SanitizerKind::Leak | SanitizerKind::Thread |
+ SanitizerKind::Memory | SanitizerKind::KernelAddress),
+ std::make_pair(SanitizerKind::SafeStack,
+ SanitizerKind::Address | SanitizerKind::HWAddress |
+ SanitizerKind::Leak | SanitizerKind::Thread |
+ SanitizerKind::Memory | SanitizerKind::KernelAddress),
+ std::make_pair(SanitizerKind::KernelHWAddress,
+ SanitizerKind::Address | SanitizerKind::HWAddress |
+ SanitizerKind::Leak | SanitizerKind::Thread |
+ SanitizerKind::Memory | SanitizerKind::KernelAddress |
+ SanitizerKind::SafeStack),
+ std::make_pair(SanitizerKind::KernelMemory,
+ SanitizerKind::Address | SanitizerKind::HWAddress |
+ SanitizerKind::Leak | SanitizerKind::Thread |
+ SanitizerKind::Memory | SanitizerKind::KernelAddress |
+ SanitizerKind::Scudo | SanitizerKind::SafeStack),
+ std::make_pair(SanitizerKind::MemTag,
+ SanitizerKind::Address | SanitizerKind::KernelAddress |
+ SanitizerKind::HWAddress |
+ SanitizerKind::KernelHWAddress)};
// Enable toolchain specific default sanitizers if not explicitly disabled.
SanitizerMask Default = TC.getDefaultSanitizers() & ~AllRemove;
@@ -399,8 +443,8 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
// We disable the vptr sanitizer if it was enabled by group expansion but RTTI
// is disabled.
- if ((Kinds & Vptr) && (RTTIMode == ToolChain::RM_Disabled)) {
- Kinds &= ~Vptr;
+ if ((Kinds & SanitizerKind::Vptr) && (RTTIMode == ToolChain::RM_Disabled)) {
+ Kinds &= ~SanitizerKind::Vptr;
}
// Check that LTO is enabled if we need it.
@@ -409,12 +453,12 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
<< lastArgumentForMask(D, Args, Kinds & NeedsLTO) << "-flto";
}
- if ((Kinds & ShadowCallStack) &&
+ if ((Kinds & SanitizerKind::ShadowCallStack) &&
TC.getTriple().getArch() == llvm::Triple::aarch64 &&
!llvm::AArch64::isX18ReservedByDefault(TC.getTriple()) &&
!Args.hasArg(options::OPT_ffixed_x18)) {
D.Diag(diag::err_drv_argument_only_allowed_with)
- << lastArgumentForMask(D, Args, Kinds & ShadowCallStack)
+ << lastArgumentForMask(D, Args, Kinds & SanitizerKind::ShadowCallStack)
<< "-ffixed-x18";
}
@@ -422,12 +466,12 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
// c++abi-specific parts of UBSan runtime, and they are not provided by the
// toolchain. We don't have a good way to check the latter, so we just
// check if the toolchan supports vptr.
- if (~Supported & Vptr) {
+ if (~Supported & SanitizerKind::Vptr) {
SanitizerMask KindsToDiagnose = Kinds & ~TrappingKinds & NeedsUbsanCxxRt;
// The runtime library supports the Microsoft C++ ABI, but only well enough
// for CFI. FIXME: Remove this once we support vptr on Windows.
if (TC.getTriple().isOSWindows())
- KindsToDiagnose &= ~CFI;
+ KindsToDiagnose &= ~SanitizerKind::CFI;
if (KindsToDiagnose) {
SanitizerSet S;
S.Mask = KindsToDiagnose;
@@ -456,8 +500,8 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
// Parse -f(no-)?sanitize-recover flags.
SanitizerMask RecoverableKinds = RecoverableByDefault | AlwaysRecoverable;
- SanitizerMask DiagnosedUnrecoverableKinds = 0;
- SanitizerMask DiagnosedAlwaysRecoverableKinds = 0;
+ SanitizerMask DiagnosedUnrecoverableKinds;
+ SanitizerMask DiagnosedAlwaysRecoverableKinds;
for (const auto *Arg : Args) {
const char *DeprecatedReplacement = nullptr;
if (Arg->getOption().matches(options::OPT_fsanitize_recover)) {
@@ -540,7 +584,7 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
}
// Parse -f[no-]sanitize-memory-track-origins[=level] options.
- if (AllAddedKinds & Memory) {
+ if (AllAddedKinds & SanitizerKind::Memory) {
if (Arg *A =
Args.getLastArg(options::OPT_fsanitize_memory_track_origins_EQ,
options::OPT_fsanitize_memory_track_origins,
@@ -568,19 +612,19 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
MsanUseAfterDtor = false;
}
- if (AllAddedKinds & Thread) {
- TsanMemoryAccess = Args.hasFlag(options::OPT_fsanitize_thread_memory_access,
- options::OPT_fno_sanitize_thread_memory_access,
- TsanMemoryAccess);
- TsanFuncEntryExit = Args.hasFlag(options::OPT_fsanitize_thread_func_entry_exit,
- options::OPT_fno_sanitize_thread_func_entry_exit,
- TsanFuncEntryExit);
- TsanAtomics = Args.hasFlag(options::OPT_fsanitize_thread_atomics,
- options::OPT_fno_sanitize_thread_atomics,
- TsanAtomics);
+ if (AllAddedKinds & SanitizerKind::Thread) {
+ TsanMemoryAccess = Args.hasFlag(
+ options::OPT_fsanitize_thread_memory_access,
+ options::OPT_fno_sanitize_thread_memory_access, TsanMemoryAccess);
+ TsanFuncEntryExit = Args.hasFlag(
+ options::OPT_fsanitize_thread_func_entry_exit,
+ options::OPT_fno_sanitize_thread_func_entry_exit, TsanFuncEntryExit);
+ TsanAtomics =
+ Args.hasFlag(options::OPT_fsanitize_thread_atomics,
+ options::OPT_fno_sanitize_thread_atomics, TsanAtomics);
}
- if (AllAddedKinds & CFI) {
+ if (AllAddedKinds & SanitizerKind::CFI) {
// Without PIE, external function address may resolve to a PLT record, which
// can not be verified by the target module.
NeedPIE |= CfiCrossDso;
@@ -604,7 +648,7 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
<< "-fsanitize-minimal-runtime"
<< lastArgumentForMask(D, Args, IncompatibleMask);
- SanitizerMask NonTrappingCfi = Kinds & CFI & ~TrappingKinds;
+ SanitizerMask NonTrappingCfi = Kinds & SanitizerKind::CFI & ~TrappingKinds;
if (NonTrappingCfi)
D.Diag(clang::diag::err_drv_argument_only_allowed_with)
<< "fsanitize-minimal-runtime"
@@ -692,7 +736,7 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
ImplicitCfiRuntime = TC.getTriple().isAndroid();
- if (AllAddedKinds & Address) {
+ if (AllAddedKinds & SanitizerKind::Address) {
NeedPIE |= TC.getTriple().isOSFuchsia();
if (Arg *A =
Args.getLastArg(options::OPT_fsanitize_address_field_padding)) {
@@ -714,7 +758,7 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
case options::OPT__SLASH_LDd:
D.Diag(clang::diag::err_drv_argument_not_allowed_with)
<< WindowsDebugRTArg->getAsString(Args)
- << lastArgumentForMask(D, Args, Address);
+ << lastArgumentForMask(D, Args, SanitizerKind::Address);
D.Diag(clang::diag::note_drv_address_sanitizer_debug_runtime);
}
}
@@ -733,17 +777,37 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
// See https://sourceware.org/bugzilla/show_bug.cgi?id=19002
AsanGlobalsDeadStripping =
!TC.getTriple().isOSBinFormatELF() || TC.getTriple().isOSFuchsia() ||
+ TC.getTriple().isPS4() ||
Args.hasArg(options::OPT_fsanitize_address_globals_dead_stripping);
AsanUseOdrIndicator =
Args.hasFlag(options::OPT_fsanitize_address_use_odr_indicator,
options::OPT_fno_sanitize_address_use_odr_indicator,
AsanUseOdrIndicator);
+
+ if (AllAddedKinds & SanitizerKind::PointerCompare & ~AllRemove) {
+ AsanInvalidPointerCmp = true;
+ }
+
+ if (AllAddedKinds & SanitizerKind::PointerSubtract & ~AllRemove) {
+ AsanInvalidPointerSub = true;
+ }
+
} else {
AsanUseAfterScope = false;
+ // -fsanitize=pointer-compare/pointer-subtract requires -fsanitize=address.
+ SanitizerMask DetectInvalidPointerPairs =
+ SanitizerKind::PointerCompare | SanitizerKind::PointerSubtract;
+ if (AllAddedKinds & DetectInvalidPointerPairs & ~AllRemove) {
+ TC.getDriver().Diag(clang::diag::err_drv_argument_only_allowed_with)
+ << lastArgumentForMask(D, Args,
+ SanitizerKind::PointerCompare |
+ SanitizerKind::PointerSubtract)
+ << "-fsanitize=address";
+ }
}
- if (AllAddedKinds & HWAddress) {
+ if (AllAddedKinds & SanitizerKind::HWAddress) {
if (Arg *HwasanAbiArg =
Args.getLastArg(options::OPT_fsanitize_hwaddress_abi_EQ)) {
HwasanAbi = HwasanAbiArg->getValue();
@@ -755,7 +819,7 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
}
}
- if (AllAddedKinds & SafeStack) {
+ if (AllAddedKinds & SanitizerKind::SafeStack) {
// SafeStack runtime is built into the system on Fuchsia.
SafeStackRuntime = !TC.getTriple().isOSFuchsia();
}
@@ -775,7 +839,7 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
static std::string toString(const clang::SanitizerSet &Sanitizers) {
std::string Res;
#define SANITIZER(NAME, ID) \
- if (Sanitizers.has(ID)) { \
+ if (Sanitizers.has(SanitizerKind::ID)) { \
if (!Res.empty()) \
Res += ","; \
Res += NAME; \
@@ -927,6 +991,16 @@ void SanitizerArgs::addArgs(const ToolChain &TC, const llvm::opt::ArgList &Args,
if (AsanUseOdrIndicator)
CmdArgs.push_back("-fsanitize-address-use-odr-indicator");
+ if (AsanInvalidPointerCmp) {
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back("-asan-detect-invalid-pointer-cmp");
+ }
+
+ if (AsanInvalidPointerSub) {
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back("-asan-detect-invalid-pointer-sub");
+ }
+
if (!HwasanAbi.empty()) {
CmdArgs.push_back("-default-function-attr");
CmdArgs.push_back(Args.MakeArgString("hwasan-abi=" + HwasanAbi));
@@ -937,7 +1011,8 @@ void SanitizerArgs::addArgs(const ToolChain &TC, const llvm::opt::ArgList &Args,
// https://github.com/google/sanitizers/issues/373
// We can't make this conditional on -fsanitize=leak, as that flag shouldn't
// affect compilation.
- if (Sanitizers.has(Memory) || Sanitizers.has(Address))
+ if (Sanitizers.has(SanitizerKind::Memory) ||
+ Sanitizers.has(SanitizerKind::Address))
CmdArgs.push_back("-fno-assume-sane-operator-new");
// Require -fvisibility= flag on non-Windows when compiling if vptr CFI is
@@ -960,18 +1035,14 @@ SanitizerMask parseArgValues(const Driver &D, const llvm::opt::Arg *A,
A->getOption().matches(options::OPT_fsanitize_trap_EQ) ||
A->getOption().matches(options::OPT_fno_sanitize_trap_EQ)) &&
"Invalid argument in parseArgValues!");
- SanitizerMask Kinds = 0;
+ SanitizerMask Kinds;
for (int i = 0, n = A->getNumValues(); i != n; ++i) {
const char *Value = A->getValue(i);
SanitizerMask Kind;
// Special case: don't accept -fsanitize=all.
if (A->getOption().matches(options::OPT_fsanitize_EQ) &&
0 == strcmp("all", Value))
- Kind = 0;
- // Similarly, don't accept -fsanitize=efficiency-all.
- else if (A->getOption().matches(options::OPT_fsanitize_EQ) &&
- 0 == strcmp("efficiency-all", Value))
- Kind = 0;
+ Kind = SanitizerMask();
else
Kind = parseSanitizerValue(Value, /*AllowGroups=*/true);
diff --git a/lib/Driver/Tool.cpp b/lib/Driver/Tool.cpp
index 818494662179..9ff6e863a124 100644
--- a/lib/Driver/Tool.cpp
+++ b/lib/Driver/Tool.cpp
@@ -1,9 +1,8 @@
//===--- Tool.cpp - Compilation Tools -------------------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Driver/ToolChain.cpp b/lib/Driver/ToolChain.cpp
index 88a627eab6de..b1fddb0af55d 100644
--- a/lib/Driver/ToolChain.cpp
+++ b/lib/Driver/ToolChain.cpp
@@ -1,9 +1,8 @@
//===- ToolChain.cpp - Collections of tools for one platform --------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -74,17 +73,13 @@ ToolChain::ToolChain(const Driver &D, const llvm::Triple &T,
const ArgList &Args)
: D(D), Triple(T), Args(Args), CachedRTTIArg(GetRTTIArgument(Args)),
CachedRTTIMode(CalculateRTTIMode(Args, Triple, CachedRTTIArg)) {
- SmallString<128> P;
-
- P.assign(D.ResourceDir);
- llvm::sys::path::append(P, D.getTargetTriple(), "lib");
- if (getVFS().exists(P))
- getLibraryPaths().push_back(P.str());
+ if (D.CCCIsCXX()) {
+ if (auto CXXStdlibPath = getCXXStdlibPath())
+ getFilePaths().push_back(*CXXStdlibPath);
+ }
- P.assign(D.ResourceDir);
- llvm::sys::path::append(P, Triple.str(), "lib");
- if (getVFS().exists(P))
- getLibraryPaths().push_back(P.str());
+ if (auto RuntimePath = getRuntimePath())
+ getLibraryPaths().push_back(*RuntimePath);
std::string CandidateLibPath = getArchSpecificLibPath();
if (getVFS().exists(CandidateLibPath))
@@ -113,6 +108,10 @@ bool ToolChain::useRelaxRelocations() const {
return ENABLE_X86_RELAX_RELOCATIONS;
}
+bool ToolChain::isNoExecStackDefault() const {
+ return false;
+}
+
const SanitizerArgs& ToolChain::getSanitizerArgs() const {
if (!SanitizerArguments.get())
SanitizerArguments.reset(new SanitizerArgs(*this, Args));
@@ -363,16 +362,27 @@ std::string ToolChain::getCompilerRTPath() const {
}
std::string ToolChain::getCompilerRT(const ArgList &Args, StringRef Component,
- bool Shared) const {
+ FileType Type) const {
const llvm::Triple &TT = getTriple();
bool IsITANMSVCWindows =
TT.isWindowsMSVCEnvironment() || TT.isWindowsItaniumEnvironment();
- const char *Prefix = IsITANMSVCWindows ? "" : "lib";
- const char *Suffix = Shared ? (Triple.isOSWindows() ? ".lib" : ".so")
- : (IsITANMSVCWindows ? ".lib" : ".a");
- if (Shared && Triple.isWindowsGNUEnvironment())
- Suffix = ".dll.a";
+ const char *Prefix =
+ IsITANMSVCWindows || Type == ToolChain::FT_Object ? "" : "lib";
+ const char *Suffix;
+ switch (Type) {
+ case ToolChain::FT_Object:
+ Suffix = IsITANMSVCWindows ? ".obj" : ".o";
+ break;
+ case ToolChain::FT_Static:
+ Suffix = IsITANMSVCWindows ? ".lib" : ".a";
+ break;
+ case ToolChain::FT_Shared:
+ Suffix = Triple.isOSWindows()
+ ? (Triple.isWindowsGNUEnvironment() ? ".dll.a" : ".lib")
+ : ".so";
+ break;
+ }
for (const auto &LibPath : getLibraryPaths()) {
SmallString<128> P(LibPath);
@@ -391,8 +401,45 @@ std::string ToolChain::getCompilerRT(const ArgList &Args, StringRef Component,
const char *ToolChain::getCompilerRTArgString(const llvm::opt::ArgList &Args,
StringRef Component,
- bool Shared) const {
- return Args.MakeArgString(getCompilerRT(Args, Component, Shared));
+ FileType Type) const {
+ return Args.MakeArgString(getCompilerRT(Args, Component, Type));
+}
+
+
+Optional<std::string> ToolChain::getRuntimePath() const {
+ SmallString<128> P;
+
+ // First try the triple passed to driver as --target=<triple>.
+ P.assign(D.ResourceDir);
+ llvm::sys::path::append(P, "lib", D.getTargetTriple());
+ if (getVFS().exists(P))
+ return llvm::Optional<std::string>(P.str());
+
+ // Second try the normalized triple.
+ P.assign(D.ResourceDir);
+ llvm::sys::path::append(P, "lib", Triple.str());
+ if (getVFS().exists(P))
+ return llvm::Optional<std::string>(P.str());
+
+ return None;
+}
+
+Optional<std::string> ToolChain::getCXXStdlibPath() const {
+ SmallString<128> P;
+
+ // First try the triple passed to driver as --target=<triple>.
+ P.assign(D.Dir);
+ llvm::sys::path::append(P, "..", "lib", D.getTargetTriple(), "c++");
+ if (getVFS().exists(P))
+ return llvm::Optional<std::string>(P.str());
+
+ // Second try the normalized triple.
+ P.assign(D.Dir);
+ llvm::sys::path::append(P, "..", "lib", Triple.str(), "c++");
+ if (getVFS().exists(P))
+ return llvm::Optional<std::string>(P.str());
+
+ return None;
}
std::string ToolChain::getArchSpecificLibPath() const {
@@ -403,12 +450,18 @@ std::string ToolChain::getArchSpecificLibPath() const {
}
bool ToolChain::needsProfileRT(const ArgList &Args) {
+ if (Args.hasArg(options::OPT_noprofilelib))
+ return false;
+
if (needsGCovInstrumentation(Args) ||
Args.hasArg(options::OPT_fprofile_generate) ||
Args.hasArg(options::OPT_fprofile_generate_EQ) ||
+ Args.hasArg(options::OPT_fcs_profile_generate) ||
+ Args.hasArg(options::OPT_fcs_profile_generate_EQ) ||
Args.hasArg(options::OPT_fprofile_instr_generate) ||
Args.hasArg(options::OPT_fprofile_instr_generate_EQ) ||
- Args.hasArg(options::OPT_fcreate_profile))
+ Args.hasArg(options::OPT_fcreate_profile) ||
+ Args.hasArg(options::OPT_forder_file_instrumentation))
return true;
return false;
@@ -680,6 +733,33 @@ ToolChain::RuntimeLibType ToolChain::GetRuntimeLibType(
return GetDefaultRuntimeLibType();
}
+ToolChain::UnwindLibType ToolChain::GetUnwindLibType(
+ const ArgList &Args) const {
+ const Arg *A = Args.getLastArg(options::OPT_unwindlib_EQ);
+ StringRef LibName = A ? A->getValue() : CLANG_DEFAULT_UNWINDLIB;
+
+ if (LibName == "none")
+ return ToolChain::UNW_None;
+ else if (LibName == "platform" || LibName == "") {
+ ToolChain::RuntimeLibType RtLibType = GetRuntimeLibType(Args);
+ if (RtLibType == ToolChain::RLT_CompilerRT)
+ return ToolChain::UNW_None;
+ else if (RtLibType == ToolChain::RLT_Libgcc)
+ return ToolChain::UNW_Libgcc;
+ } else if (LibName == "libunwind") {
+ if (GetRuntimeLibType(Args) == RLT_Libgcc)
+ getDriver().Diag(diag::err_drv_incompatible_unwindlib);
+ return ToolChain::UNW_CompilerRT;
+ } else if (LibName == "libgcc")
+ return ToolChain::UNW_Libgcc;
+
+ if (A)
+ getDriver().Diag(diag::err_drv_invalid_unwindlib_name)
+ << A->getAsString(Args);
+
+ return GetDefaultUnwindLibType();
+}
+
ToolChain::CXXStdlibType ToolChain::GetCXXStdlibType(const ArgList &Args) const{
const Arg *A = Args.getLastArg(options::OPT_stdlib_EQ);
StringRef LibName = A ? A->getValue() : CLANG_DEFAULT_CXX_STDLIB;
@@ -777,10 +857,6 @@ void ToolChain::AddCXXStdlibLibArgs(const ArgList &Args,
void ToolChain::AddFilePathLibArgs(const ArgList &Args,
ArgStringList &CmdArgs) const {
- for (const auto &LibPath : getLibraryPaths())
- if(LibPath.length() > 0)
- CmdArgs.push_back(Args.MakeArgString(StringRef("-L") + LibPath));
-
for (const auto &LibPath : getFilePaths())
if(LibPath.length() > 0)
CmdArgs.push_back(Args.MakeArgString(StringRef("-L") + LibPath));
@@ -819,21 +895,24 @@ SanitizerMask ToolChain::getSupportedSanitizers() const {
// Return sanitizers which don't require runtime support and are not
// platform dependent.
- using namespace SanitizerKind;
-
- SanitizerMask Res = (Undefined & ~Vptr & ~Function) | (CFI & ~CFIICall) |
- CFICastStrict | UnsignedIntegerOverflow |
- ImplicitConversion | Nullability | LocalBounds;
+ SanitizerMask Res = (SanitizerKind::Undefined & ~SanitizerKind::Vptr &
+ ~SanitizerKind::Function) |
+ (SanitizerKind::CFI & ~SanitizerKind::CFIICall) |
+ SanitizerKind::CFICastStrict |
+ SanitizerKind::FloatDivideByZero |
+ SanitizerKind::UnsignedIntegerOverflow |
+ SanitizerKind::ImplicitConversion |
+ SanitizerKind::Nullability | SanitizerKind::LocalBounds;
if (getTriple().getArch() == llvm::Triple::x86 ||
getTriple().getArch() == llvm::Triple::x86_64 ||
getTriple().getArch() == llvm::Triple::arm ||
getTriple().getArch() == llvm::Triple::aarch64 ||
getTriple().getArch() == llvm::Triple::wasm32 ||
getTriple().getArch() == llvm::Triple::wasm64)
- Res |= CFIICall;
+ Res |= SanitizerKind::CFIICall;
if (getTriple().getArch() == llvm::Triple::x86_64 ||
getTriple().getArch() == llvm::Triple::aarch64)
- Res |= ShadowCallStack;
+ Res |= SanitizerKind::ShadowCallStack;
return Res;
}
diff --git a/lib/Driver/ToolChains/AMDGPU.cpp b/lib/Driver/ToolChains/AMDGPU.cpp
index a421a09891cd..df4e7ee202bf 100644
--- a/lib/Driver/ToolChains/AMDGPU.cpp
+++ b/lib/Driver/ToolChains/AMDGPU.cpp
@@ -1,9 +1,8 @@
//===--- AMDGPU.cpp - AMDGPU ToolChain Implementations ----------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -39,14 +38,18 @@ void amdgpu::Linker::ConstructJob(Compilation &C, const JobAction &JA,
void amdgpu::getAMDGPUTargetFeatures(const Driver &D,
const llvm::opt::ArgList &Args,
std::vector<StringRef> &Features) {
- if (const Arg *dAbi = Args.getLastArg(options::OPT_mamdgpu_debugger_abi)) {
- StringRef value = dAbi->getValue();
- if (value == "1.0") {
- Features.push_back("+amdgpu-debugger-insert-nops");
- Features.push_back("+amdgpu-debugger-emit-prologue");
- } else {
- D.Diag(diag::err_drv_clang_unsupported) << dAbi->getAsString(Args);
- }
+ if (const Arg *dAbi = Args.getLastArg(options::OPT_mamdgpu_debugger_abi))
+ D.Diag(diag::err_drv_clang_unsupported) << dAbi->getAsString(Args);
+
+ if (Args.getLastArg(options::OPT_mwavefrontsize64)) {
+ Features.push_back("-wavefrontsize16");
+ Features.push_back("-wavefrontsize32");
+ Features.push_back("+wavefrontsize64");
+ }
+ if (Args.getLastArg(options::OPT_mno_wavefrontsize64)) {
+ Features.push_back("-wavefrontsize16");
+ Features.push_back("+wavefrontsize32");
+ Features.push_back("-wavefrontsize64");
}
handleTargetFeaturesGroup(
@@ -109,5 +112,6 @@ void AMDGPUToolChain::addClangTargetOptions(
options::OPT_fvisibility_ms_compat)) {
CC1Args.push_back("-fvisibility");
CC1Args.push_back("hidden");
+ CC1Args.push_back("-fapply-global-visibility-to-externs");
}
}
diff --git a/lib/Driver/ToolChains/AMDGPU.h b/lib/Driver/ToolChains/AMDGPU.h
index 9d38eeedf59d..5585cc534861 100644
--- a/lib/Driver/ToolChains/AMDGPU.h
+++ b/lib/Driver/ToolChains/AMDGPU.h
@@ -1,9 +1,8 @@
//===--- AMDGPU.h - AMDGPU ToolChain Implementations ----------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -56,8 +55,10 @@ protected:
public:
AMDGPUToolChain(const Driver &D, const llvm::Triple &Triple,
const llvm::opt::ArgList &Args);
- unsigned GetDefaultDwarfVersion() const override { return 2; }
+ unsigned GetDefaultDwarfVersion() const override { return 5; }
bool IsIntegratedAssemblerDefault() const override { return true; }
+ bool IsMathErrnoDefault() const override { return false; }
+
llvm::opt::DerivedArgList *
TranslateArgs(const llvm::opt::DerivedArgList &Args, StringRef BoundArch,
Action::OffloadKind DeviceOffloadKind) const override;
diff --git a/lib/Driver/ToolChains/AVR.cpp b/lib/Driver/ToolChains/AVR.cpp
index 877009af8a30..4a60d9ec589b 100644
--- a/lib/Driver/ToolChains/AVR.cpp
+++ b/lib/Driver/ToolChains/AVR.cpp
@@ -1,9 +1,8 @@
//===--- AVR.cpp - AVR ToolChain Implementations ----------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -11,7 +10,14 @@
#include "CommonArgs.h"
#include "InputInfo.h"
#include "clang/Driver/Compilation.h"
+#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/Options.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/MC/SubtargetFeature.h"
#include "llvm/Option/ArgList.h"
+#include "llvm/Support/FileSystem.h"
using namespace clang::driver;
using namespace clang::driver::toolchains;
@@ -19,12 +25,76 @@ using namespace clang::driver::tools;
using namespace clang;
using namespace llvm::opt;
+namespace {
+
+// TODO: Consider merging this into the AVR device table
+// array in Targets/AVR.cpp.
+llvm::Optional<StringRef> GetMcuFamilyName(StringRef MCU) {
+ return llvm::StringSwitch<llvm::Optional<StringRef>>(MCU)
+ .Case("atmega328", Optional<StringRef>("avr5"))
+ .Case("atmega328p", Optional<StringRef>("avr5"))
+ .Default(Optional<StringRef>());
+}
+
+const StringRef PossibleAVRLibcLocations[] = {
+ "/usr/avr",
+ "/usr/lib/avr",
+};
+
+} // end anonymous namespace
+
/// AVR Toolchain
AVRToolChain::AVRToolChain(const Driver &D, const llvm::Triple &Triple,
const ArgList &Args)
- : Generic_ELF(D, Triple, Args) { }
+ : Generic_ELF(D, Triple, Args), LinkStdlib(false) {
+ GCCInstallation.init(Triple, Args);
+
+ // Only add default libraries if the user hasn't explicitly opted out.
+ if (!Args.hasArg(options::OPT_nostdlib) &&
+ !Args.hasArg(options::OPT_nodefaultlibs) &&
+ !Args.hasArg(options::OPT_c /* does not apply when not linking */)) {
+ std::string CPU = getCPUName(Args, Triple);
+
+ if (CPU.empty()) {
+ // We cannot link any standard libraries without an MCU specified.
+ D.Diag(diag::warn_drv_avr_mcu_not_specified);
+ } else {
+ Optional<StringRef> FamilyName = GetMcuFamilyName(CPU);
+ Optional<std::string> AVRLibcRoot = findAVRLibcInstallation();
+
+ if (!FamilyName.hasValue()) {
+ // We do not have an entry for this CPU in the family
+ // mapping table yet.
+ D.Diag(diag::warn_drv_avr_family_linking_stdlibs_not_implemented)
+ << CPU;
+ } else if (!GCCInstallation.isValid()) {
+ // No avr-gcc found and so no runtime linked.
+ D.Diag(diag::warn_drv_avr_gcc_not_found);
+ } else if (!AVRLibcRoot.hasValue()) {
+ // No avr-libc found and so no runtime linked.
+ D.Diag(diag::warn_drv_avr_libc_not_found);
+ } else { // We have enough information to link stdlibs
+ std::string GCCRoot = GCCInstallation.getInstallPath();
+ std::string LibcRoot = AVRLibcRoot.getValue();
+
+ getFilePaths().push_back(LibcRoot + std::string("/lib/") +
+ std::string(*FamilyName));
+ getFilePaths().push_back(LibcRoot + std::string("/lib/") +
+ std::string(*FamilyName));
+ getFilePaths().push_back(GCCRoot + std::string("/") +
+ std::string(*FamilyName));
+
+ LinkStdlib = true;
+ }
+ }
+
+ if (!LinkStdlib)
+ D.Diag(diag::warn_drv_avr_stdlib_not_linked);
+ }
+}
+
Tool *AVRToolChain::buildLinker() const {
- return new tools::AVR::Linker(*this);
+ return new tools::AVR::Linker(getTriple(), *this, LinkStdlib);
}
void AVR::Linker::ConstructJob(Compilation &C, const JobAction &JA,
@@ -32,13 +102,58 @@ void AVR::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfoList &Inputs,
const ArgList &Args,
const char *LinkingOutput) const {
+ // Compute information about the target AVR.
+ std::string CPU = getCPUName(Args, getToolChain().getTriple());
+ llvm::Optional<StringRef> FamilyName = GetMcuFamilyName(CPU);
std::string Linker = getToolChain().GetProgramPath(getShortName());
ArgStringList CmdArgs;
AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs, JA);
+
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
+
+ // Enable garbage collection of unused sections.
+ CmdArgs.push_back("--gc-sections");
+
+ // Add library search paths before we specify libraries.
+ Args.AddAllArgs(CmdArgs, options::OPT_L);
+ getToolChain().AddFilePathLibArgs(Args, CmdArgs);
+
+ // If the family name is known, we can link with the device-specific libgcc.
+ // Without it, libgcc will simply not be linked. This matches avr-gcc
+ // behavior.
+ if (LinkStdlib) {
+ assert(!CPU.empty() && "CPU name must be known in order to link stdlibs");
+
+ // Add the object file for the CRT.
+ std::string CrtFileName = std::string("-l:crt") + CPU + std::string(".o");
+ CmdArgs.push_back(Args.MakeArgString(CrtFileName));
+
+ CmdArgs.push_back("-lgcc");
+ CmdArgs.push_back("-lm");
+ CmdArgs.push_back("-lc");
+
+ // Add the link library specific to the MCU.
+ CmdArgs.push_back(Args.MakeArgString(std::string("-l") + CPU));
+
+ // Specify the family name as the emulation mode to use.
+ // This is almost always required because otherwise avr-ld
+ // will assume 'avr2' and warn about the program being larger
+ // than the bare minimum supports.
+ CmdArgs.push_back(Args.MakeArgString(std::string("-m") + *FamilyName));
+ }
+
C.addCommand(llvm::make_unique<Command>(JA, *this, Args.MakeArgString(Linker),
CmdArgs, Inputs));
}
-// AVR tools end.
+
+llvm::Optional<std::string> AVRToolChain::findAVRLibcInstallation() const {
+ for (StringRef PossiblePath : PossibleAVRLibcLocations) {
+ // Return the first avr-libc installation that exists.
+ if (llvm::sys::fs::is_directory(PossiblePath))
+ return Optional<std::string>(std::string(PossiblePath));
+ }
+
+ return llvm::None;
+}
diff --git a/lib/Driver/ToolChains/AVR.h b/lib/Driver/ToolChains/AVR.h
index a7479a7f5652..d244fc4f90e9 100644
--- a/lib/Driver/ToolChains/AVR.h
+++ b/lib/Driver/ToolChains/AVR.h
@@ -1,9 +1,8 @@
//===--- AVR.h - AVR Tool and ToolChain Implementations ---------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -20,11 +19,21 @@ namespace driver {
namespace toolchains {
class LLVM_LIBRARY_VISIBILITY AVRToolChain : public Generic_ELF {
-protected:
- Tool *buildLinker() const override;
public:
AVRToolChain(const Driver &D, const llvm::Triple &Triple,
const llvm::opt::ArgList &Args);
+
+protected:
+ Tool *buildLinker() const override;
+
+private:
+ /// Whether libgcc, libct, and friends should be linked.
+ ///
+ /// This is not done if the user does not specify a
+ /// microcontroller on the command line.
+ bool LinkStdlib;
+
+ llvm::Optional<std::string> findAVRLibcInstallation() const;
};
} // end namespace toolchains
@@ -33,13 +42,20 @@ namespace tools {
namespace AVR {
class LLVM_LIBRARY_VISIBILITY Linker : public GnuTool {
public:
- Linker(const ToolChain &TC) : GnuTool("AVR::Linker", "avr-ld", TC) {}
+ Linker(const llvm::Triple &Triple, const ToolChain &TC, bool LinkStdlib)
+ : GnuTool("AVR::Linker", "avr-ld", TC), Triple(Triple),
+ LinkStdlib(LinkStdlib) {}
+
bool hasIntegratedCPP() const override { return false; }
bool isLinkJob() const override { return true; }
void ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output, const InputInfoList &Inputs,
const llvm::opt::ArgList &TCArgs,
const char *LinkingOutput) const override;
+
+protected:
+ const llvm::Triple &Triple;
+ bool LinkStdlib;
};
} // end namespace AVR
} // end namespace tools
diff --git a/lib/Driver/ToolChains/Ananas.cpp b/lib/Driver/ToolChains/Ananas.cpp
index 006fdc029ef8..e3511198cb2d 100644
--- a/lib/Driver/ToolChains/Ananas.cpp
+++ b/lib/Driver/ToolChains/Ananas.cpp
@@ -1,9 +1,8 @@
//===--- Ananas.cpp - Ananas ToolChain Implementations ------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Driver/ToolChains/Ananas.h b/lib/Driver/ToolChains/Ananas.h
index 2563dd2d49a9..5e45b47fc108 100644
--- a/lib/Driver/ToolChains/Ananas.h
+++ b/lib/Driver/ToolChains/Ananas.h
@@ -1,9 +1,8 @@
//===--- Ananas.h - Ananas ToolChain Implementations --------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Driver/ToolChains/Arch/AArch64.cpp b/lib/Driver/ToolChains/Arch/AArch64.cpp
index 71e55fe79e27..35d11f4e2d3b 100644
--- a/lib/Driver/ToolChains/Arch/AArch64.cpp
+++ b/lib/Driver/ToolChains/Arch/AArch64.cpp
@@ -1,9 +1,8 @@
//===--- AArch64.cpp - AArch64 (not ARM) Helpers for Tools ------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -195,6 +194,18 @@ void aarch64::getAArch64TargetFeatures(const Driver &D,
Features.push_back("-neon");
}
+ if (Arg *A = Args.getLastArg(options::OPT_mtp_mode_EQ)) {
+ StringRef Mtp = A->getValue();
+ if (Mtp == "el3")
+ Features.push_back("+tpidr-el3");
+ else if (Mtp == "el2")
+ Features.push_back("+tpidr-el2");
+ else if (Mtp == "el1")
+ Features.push_back("+tpidr-el1");
+ else if (Mtp != "el0")
+ D.Diag(diag::err_drv_invalid_mtp) << A->getAsString(Args);
+ }
+
// En/disable crc
if (Arg *A = Args.getLastArg(options::OPT_mcrc, options::OPT_mnocrc)) {
if (A->getOption().matches(options::OPT_mcrc))
@@ -208,7 +219,7 @@ void aarch64::getAArch64TargetFeatures(const Driver &D,
// TargetParser rewrite.
const auto ItRNoFullFP16 = std::find(Features.rbegin(), Features.rend(), "-fullfp16");
const auto ItRFP16FML = std::find(Features.rbegin(), Features.rend(), "+fp16fml");
- if (std::find(Features.begin(), Features.end(), "+v8.4a") != Features.end()) {
+ if (llvm::is_contained(Features, "+v8.4a")) {
const auto ItRFullFP16 = std::find(Features.rbegin(), Features.rend(), "+fullfp16");
if (ItRFullFP16 < ItRNoFullFP16 && ItRFullFP16 < ItRFP16FML) {
// Only entangled feature that can be to the right of this +fullfp16 is -fp16fml.
@@ -218,8 +229,7 @@ void aarch64::getAArch64TargetFeatures(const Driver &D,
}
else
goto fp16_fml_fallthrough;
- }
- else {
+ } else {
fp16_fml_fallthrough:
// In both of these cases, putting the 'other' feature on the end of the vector will
// result in the same effect as placing it immediately after the current feature.
@@ -336,12 +346,57 @@ fp16_fml_fallthrough:
if (Args.hasArg(options::OPT_ffixed_x7))
Features.push_back("+reserve-x7");
+ if (Args.hasArg(options::OPT_ffixed_x9))
+ Features.push_back("+reserve-x9");
+
+ if (Args.hasArg(options::OPT_ffixed_x10))
+ Features.push_back("+reserve-x10");
+
+ if (Args.hasArg(options::OPT_ffixed_x11))
+ Features.push_back("+reserve-x11");
+
+ if (Args.hasArg(options::OPT_ffixed_x12))
+ Features.push_back("+reserve-x12");
+
+ if (Args.hasArg(options::OPT_ffixed_x13))
+ Features.push_back("+reserve-x13");
+
+ if (Args.hasArg(options::OPT_ffixed_x14))
+ Features.push_back("+reserve-x14");
+
+ if (Args.hasArg(options::OPT_ffixed_x15))
+ Features.push_back("+reserve-x15");
+
if (Args.hasArg(options::OPT_ffixed_x18))
Features.push_back("+reserve-x18");
if (Args.hasArg(options::OPT_ffixed_x20))
Features.push_back("+reserve-x20");
+ if (Args.hasArg(options::OPT_ffixed_x21))
+ Features.push_back("+reserve-x21");
+
+ if (Args.hasArg(options::OPT_ffixed_x22))
+ Features.push_back("+reserve-x22");
+
+ if (Args.hasArg(options::OPT_ffixed_x23))
+ Features.push_back("+reserve-x23");
+
+ if (Args.hasArg(options::OPT_ffixed_x24))
+ Features.push_back("+reserve-x24");
+
+ if (Args.hasArg(options::OPT_ffixed_x25))
+ Features.push_back("+reserve-x25");
+
+ if (Args.hasArg(options::OPT_ffixed_x26))
+ Features.push_back("+reserve-x26");
+
+ if (Args.hasArg(options::OPT_ffixed_x27))
+ Features.push_back("+reserve-x27");
+
+ if (Args.hasArg(options::OPT_ffixed_x28))
+ Features.push_back("+reserve-x28");
+
if (Args.hasArg(options::OPT_fcall_saved_x8))
Features.push_back("+call-saved-x8");
diff --git a/lib/Driver/ToolChains/Arch/AArch64.h b/lib/Driver/ToolChains/Arch/AArch64.h
index 5f6148ebd6c4..713af870d69f 100644
--- a/lib/Driver/ToolChains/Arch/AArch64.h
+++ b/lib/Driver/ToolChains/Arch/AArch64.h
@@ -1,9 +1,8 @@
//===--- AArch64.h - AArch64-specific (not ARM) Tool Helpers ----*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Driver/ToolChains/Arch/ARM.cpp b/lib/Driver/ToolChains/Arch/ARM.cpp
index f55efc1a22e3..d1db583e5280 100644
--- a/lib/Driver/ToolChains/Arch/ARM.cpp
+++ b/lib/Driver/ToolChains/Arch/ARM.cpp
@@ -1,9 +1,8 @@
//===--- ARM.cpp - ARM (not AArch64) Helpers for Tools ----------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -73,15 +72,13 @@ static void getARMFPUFeatures(const Driver &D, const Arg *A,
// Decode ARM features from string like +[no]featureA+[no]featureB+...
static bool DecodeARMFeatures(const Driver &D, StringRef text,
+ StringRef CPU, llvm::ARM::ArchKind ArchKind,
std::vector<StringRef> &Features) {
SmallVector<StringRef, 8> Split;
text.split(Split, StringRef("+"), -1, false);
for (StringRef Feature : Split) {
- StringRef FeatureName = llvm::ARM::getArchExtFeature(Feature);
- if (!FeatureName.empty())
- Features.push_back(FeatureName);
- else
+ if (!appendArchExtFeatures(CPU, ArchKind, Feature, Features))
return false;
}
return true;
@@ -89,6 +86,7 @@ static bool DecodeARMFeatures(const Driver &D, StringRef text,
static void DecodeARMFeaturesFromCPU(const Driver &D, StringRef CPU,
std::vector<StringRef> &Features) {
+ CPU = CPU.split("+").first;
if (CPU != "generic") {
llvm::ARM::ArchKind ArchKind = llvm::ARM::parseCPUArch(CPU);
unsigned Extension = llvm::ARM::getDefaultExtensions(CPU, ArchKind);
@@ -100,14 +98,16 @@ static void DecodeARMFeaturesFromCPU(const Driver &D, StringRef CPU,
// getARMArch is used here instead of just checking the -march value in order
// to handle -march=native correctly.
static void checkARMArchName(const Driver &D, const Arg *A, const ArgList &Args,
- llvm::StringRef ArchName,
+ llvm::StringRef ArchName, llvm::StringRef CPUName,
std::vector<StringRef> &Features,
const llvm::Triple &Triple) {
std::pair<StringRef, StringRef> Split = ArchName.split("+");
std::string MArch = arm::getARMArch(ArchName, Triple);
- if (llvm::ARM::parseArch(MArch) == llvm::ARM::ArchKind::INVALID ||
- (Split.second.size() && !DecodeARMFeatures(D, Split.second, Features)))
+ llvm::ARM::ArchKind ArchKind = llvm::ARM::parseArch(MArch);
+ if (ArchKind == llvm::ARM::ArchKind::INVALID ||
+ (Split.second.size() && !DecodeARMFeatures(
+ D, Split.second, CPUName, ArchKind, Features)))
D.Diag(clang::diag::err_drv_clang_unsupported) << A->getAsString(Args);
}
@@ -119,8 +119,11 @@ static void checkARMCPUName(const Driver &D, const Arg *A, const ArgList &Args,
std::pair<StringRef, StringRef> Split = CPUName.split("+");
std::string CPU = arm::getARMTargetCPU(CPUName, ArchName, Triple);
- if (arm::getLLVMArchSuffixForARM(CPU, ArchName, Triple).empty() ||
- (Split.second.size() && !DecodeARMFeatures(D, Split.second, Features)))
+ llvm::ARM::ArchKind ArchKind =
+ arm::getLLVMArchKindForARM(CPU, ArchName, Triple);
+ if (ArchKind == llvm::ARM::ArchKind::INVALID ||
+ (Split.second.size() && !DecodeARMFeatures(
+ D, Split.second, CPU, ArchKind, Features)))
D.Diag(clang::diag::err_drv_clang_unsupported) << A->getAsString(Args);
}
@@ -249,7 +252,7 @@ arm::FloatABI arm::getARMFloatABI(const ToolChain &TC, const ArgList &Args) {
ABI = FloatABI::SoftFP;
break;
case llvm::Triple::Android:
- ABI = (SubArch == 7) ? FloatABI::SoftFP : FloatABI::Soft;
+ ABI = (SubArch >= 7) ? FloatABI::SoftFP : FloatABI::Soft;
break;
default:
// Assume "soft", but warn the user we are guessing.
@@ -286,6 +289,13 @@ void arm::getARMTargetFeatures(const ToolChain &TC,
const Arg *WaCPU = nullptr, *WaFPU = nullptr;
const Arg *WaHDiv = nullptr, *WaArch = nullptr;
+ // This vector will accumulate features from the architecture
+ // extension suffixes on -mcpu and -march (e.g. the 'bar' in
+ // -mcpu=foo+bar). We want to apply those after the features derived
+ // from the FPU, in case -mfpu generates a negative feature which
+ // the +bar is supposed to override.
+ std::vector<StringRef> ExtensionFeatures;
+
if (!ForAS) {
// FIXME: Note, this is a hack, the LLVM backend doesn't actually use these
// yet (it uses the -mfloat-abi and -msoft-float options), and it is
@@ -327,34 +337,35 @@ void arm::getARMTargetFeatures(const ToolChain &TC,
if (ThreadPointer == arm::ReadTPMode::Cp15)
Features.push_back("+read-tp-hard");
- // Check -march. ClangAs gives preference to -Wa,-march=.
const Arg *ArchArg = Args.getLastArg(options::OPT_march_EQ);
+ const Arg *CPUArg = Args.getLastArg(options::OPT_mcpu_EQ);
StringRef ArchName;
+ StringRef CPUName;
+
+ // Check -mcpu. ClangAs gives preference to -Wa,-mcpu=.
+ if (WaCPU) {
+ if (CPUArg)
+ D.Diag(clang::diag::warn_drv_unused_argument)
+ << CPUArg->getAsString(Args);
+ CPUName = StringRef(WaCPU->getValue()).substr(6);
+ CPUArg = WaCPU;
+ } else if (CPUArg)
+ CPUName = CPUArg->getValue();
+
+ // Check -march. ClangAs gives preference to -Wa,-march=.
if (WaArch) {
if (ArchArg)
D.Diag(clang::diag::warn_drv_unused_argument)
<< ArchArg->getAsString(Args);
ArchName = StringRef(WaArch->getValue()).substr(7);
- checkARMArchName(D, WaArch, Args, ArchName, Features, Triple);
+ checkARMArchName(D, WaArch, Args, ArchName, CPUName,
+ ExtensionFeatures, Triple);
// FIXME: Set Arch.
D.Diag(clang::diag::warn_drv_unused_argument) << WaArch->getAsString(Args);
} else if (ArchArg) {
ArchName = ArchArg->getValue();
- checkARMArchName(D, ArchArg, Args, ArchName, Features, Triple);
- }
-
- // Check -mcpu. ClangAs gives preference to -Wa,-mcpu=.
- const Arg *CPUArg = Args.getLastArg(options::OPT_mcpu_EQ);
- StringRef CPUName;
- if (WaCPU) {
- if (CPUArg)
- D.Diag(clang::diag::warn_drv_unused_argument)
- << CPUArg->getAsString(Args);
- CPUName = StringRef(WaCPU->getValue()).substr(6);
- checkARMCPUName(D, WaCPU, Args, CPUName, ArchName, Features, Triple);
- } else if (CPUArg) {
- CPUName = CPUArg->getValue();
- checkARMCPUName(D, CPUArg, Args, CPUName, ArchName, Features, Triple);
+ checkARMArchName(D, ArchArg, Args, ArchName, CPUName,
+ ExtensionFeatures, Triple);
}
// Add CPU features for generic CPUs
@@ -365,9 +376,16 @@ void arm::getARMTargetFeatures(const ToolChain &TC,
Features.push_back(
Args.MakeArgString((F.second ? "+" : "-") + F.first()));
} else if (!CPUName.empty()) {
+ // This sets the default features for the specified CPU. We certainly don't
+ // want to override the features that have been explicitly specified on the
+ // command line. Therefore, process them directly instead of appending them
+ // at the end later.
DecodeARMFeaturesFromCPU(D, CPUName, Features);
}
+ if (CPUArg)
+ checkARMCPUName(D, CPUArg, Args, CPUName, ArchName,
+ ExtensionFeatures, Triple);
// Honor -mfpu=. ClangAs gives preference to -Wa,-mfpu=.
const Arg *FPUArg = Args.getLastArg(options::OPT_mfpu_EQ);
if (WaFPU) {
@@ -379,14 +397,18 @@ void arm::getARMTargetFeatures(const ToolChain &TC,
} else if (FPUArg) {
getARMFPUFeatures(D, FPUArg, Args, FPUArg->getValue(), Features);
} else if (Triple.isAndroid() && getARMSubArchVersionNumber(Triple) >= 7) {
- // Android mandates minimum FPU requirements based on OS version.
- const char *AndroidFPU =
- Triple.isAndroidVersionLT(23) ? "vfpv3-d16" : "neon";
+ const char *AndroidFPU = "neon";
if (!llvm::ARM::getFPUFeatures(llvm::ARM::parseFPU(AndroidFPU), Features))
D.Diag(clang::diag::err_drv_clang_unsupported)
<< std::string("-mfpu=") + AndroidFPU;
}
+ // Now we've finished accumulating features from arch, cpu and fpu,
+ // we can append the ones for architecture extensions that we
+ // collected separately.
+ Features.insert(std::end(Features),
+ std::begin(ExtensionFeatures), std::end(ExtensionFeatures));
+
// Honor -mhwdiv=. ClangAs gives preference to -Wa,-mhwdiv=.
const Arg *HDivArg = Args.getLastArg(options::OPT_mhwdiv_EQ);
if (WaHDiv) {
@@ -431,16 +453,20 @@ fp16_fml_fallthrough:
if (ABI == arm::FloatABI::Soft) {
llvm::ARM::getFPUFeatures(llvm::ARM::FK_NONE, Features);
- // Disable hardware FP features which have been enabled.
- // FIXME: Disabling vfp2 and neon should be enough as all the other
- // features are dependent on these 2 features in LLVM. However
+ // Disable all features relating to hardware FP.
+ // FIXME: Disabling fpregs should be enough all by itself, since all
+ // the other FP features are dependent on it. However
// there is currently no easy way to test this in clang, so for
// now just be explicit and disable all known dependent features
// as well.
- for (std::string Feature : {"vfp2", "vfp3", "vfp4", "fp-armv8", "fullfp16",
- "neon", "crypto", "dotprod", "fp16fml"})
- if (std::find(std::begin(Features), std::end(Features), "+" + Feature) != std::end(Features))
- Features.push_back(Args.MakeArgString("-" + Feature));
+ for (std::string Feature : {
+ "vfp2", "vfp2sp", "vfp2d16", "vfp2d16sp",
+ "vfp3", "vfp3sp", "vfp3d16", "vfp3d16sp",
+ "vfp4", "vfp4sp", "vfp4d16", "vfp4d16sp",
+ "fp-armv8", "fp-armv8sp", "fp-armv8d16", "fp-armv8d16sp",
+ "fullfp16", "neon", "crypto", "dotprod", "fp16fml",
+ "fp64", "d32", "fpregs"})
+ Features.push_back(Args.MakeArgString("-" + Feature));
}
// En/disable crc code generation.
@@ -471,6 +497,10 @@ fp16_fml_fallthrough:
}
}
+ // CMSE: Check for target 8M (for -mcmse to be applicable) is performed later.
+ if (Args.getLastArg(options::OPT_mcmse))
+ Features.push_back("+8msecext");
+
// Look for the last occurrence of -mlong-calls or -mno-long-calls. If
// neither options are specified, see if we are compiling for kernel/kext and
// decide whether to pass "+long-calls" based on the OS and its version.
@@ -618,11 +648,12 @@ std::string arm::getARMTargetCPU(StringRef CPU, StringRef Arch,
return getARMCPUForMArch(Arch, Triple);
}
-/// getLLVMArchSuffixForARM - Get the LLVM arch name to use for a particular
-/// CPU (or Arch, if CPU is generic).
-// FIXME: This is redundant with -mcpu, why does LLVM use this.
-StringRef arm::getLLVMArchSuffixForARM(StringRef CPU, StringRef Arch,
- const llvm::Triple &Triple) {
+/// getLLVMArchSuffixForARM - Get the LLVM ArchKind value to use for a
+/// particular CPU (or Arch, if CPU is generic). This is needed to
+/// pass to functions like llvm::ARM::getDefaultFPU which need an
+/// ArchKind as well as a CPU name.
+llvm::ARM::ArchKind arm::getLLVMArchKindForARM(StringRef CPU, StringRef Arch,
+ const llvm::Triple &Triple) {
llvm::ARM::ArchKind ArchKind;
if (CPU == "generic") {
std::string ARMArch = tools::arm::getARMArch(Arch, Triple);
@@ -638,6 +669,15 @@ StringRef arm::getLLVMArchSuffixForARM(StringRef CPU, StringRef Arch,
? llvm::ARM::ArchKind::ARMV7K
: llvm::ARM::parseCPUArch(CPU);
}
+ return ArchKind;
+}
+
+/// getLLVMArchSuffixForARM - Get the LLVM arch name to use for a particular
+/// CPU (or Arch, if CPU is generic).
+// FIXME: This is redundant with -mcpu, why does LLVM use this.
+StringRef arm::getLLVMArchSuffixForARM(StringRef CPU, StringRef Arch,
+ const llvm::Triple &Triple) {
+ llvm::ARM::ArchKind ArchKind = getLLVMArchKindForARM(CPU, Arch, Triple);
if (ArchKind == llvm::ARM::ArchKind::INVALID)
return "";
return llvm::ARM::getSubArch(ArchKind);
diff --git a/lib/Driver/ToolChains/Arch/ARM.h b/lib/Driver/ToolChains/Arch/ARM.h
index 9f0dc4ea2e25..5640f8371262 100644
--- a/lib/Driver/ToolChains/Arch/ARM.h
+++ b/lib/Driver/ToolChains/Arch/ARM.h
@@ -1,9 +1,8 @@
//===--- ARM.h - ARM-specific (not AArch64) Tool Helpers --------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -14,6 +13,7 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Triple.h"
#include "llvm/Option/Option.h"
+#include "llvm/Support/TargetParser.h"
#include <string>
#include <vector>
@@ -26,6 +26,8 @@ std::string getARMTargetCPU(StringRef CPU, llvm::StringRef Arch,
const llvm::Triple &Triple);
const std::string getARMArch(llvm::StringRef Arch, const llvm::Triple &Triple);
StringRef getARMCPUForMArch(llvm::StringRef Arch, const llvm::Triple &Triple);
+llvm::ARM::ArchKind getLLVMArchKindForARM(StringRef CPU, StringRef Arch,
+ const llvm::Triple &Triple);
StringRef getLLVMArchSuffixForARM(llvm::StringRef CPU, llvm::StringRef Arch,
const llvm::Triple &Triple);
diff --git a/lib/Driver/ToolChains/Arch/Mips.cpp b/lib/Driver/ToolChains/Arch/Mips.cpp
index e10a5e1c773f..b512ff64b0c6 100644
--- a/lib/Driver/ToolChains/Arch/Mips.cpp
+++ b/lib/Driver/ToolChains/Arch/Mips.cpp
@@ -1,9 +1,8 @@
//===--- Mips.cpp - Tools Implementations -----------------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -247,7 +246,6 @@ void mips::getMIPSTargetFeatures(const Driver &D, const llvm::Triple &Triple,
if (IsN64 && NonPIC && (!ABICallsArg || UseAbiCalls)) {
D.Diag(diag::warn_drv_unsupported_pic_with_mabicalls)
<< LastPICArg->getAsString(Args) << (!ABICallsArg ? 0 : 1);
- NonPIC = false;
}
if (ABICallsArg && !UseAbiCalls && IsPIC) {
diff --git a/lib/Driver/ToolChains/Arch/Mips.h b/lib/Driver/ToolChains/Arch/Mips.h
index a232ddbc8f3d..23e0cf79e154 100644
--- a/lib/Driver/ToolChains/Arch/Mips.h
+++ b/lib/Driver/ToolChains/Arch/Mips.h
@@ -1,9 +1,8 @@
//===--- Mips.h - Mips-specific Tool Helpers ----------------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Driver/ToolChains/Arch/PPC.cpp b/lib/Driver/ToolChains/Arch/PPC.cpp
index 791f1206cf25..30f1a0d9022c 100644
--- a/lib/Driver/ToolChains/Arch/PPC.cpp
+++ b/lib/Driver/ToolChains/Arch/PPC.cpp
@@ -1,9 +1,8 @@
//===--- PPC.cpp - PPC Helpers for Tools ------------------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -116,7 +115,7 @@ ppc::ReadGOTPtrMode ppc::getPPCReadGOTPtrMode(const Driver &D, const llvm::Tripl
const ArgList &Args) {
if (Args.getLastArg(options::OPT_msecure_plt))
return ppc::ReadGOTPtrMode::SecurePlt;
- if (Triple.isOSOpenBSD())
+ if (Triple.isOSNetBSD() || Triple.isOSOpenBSD() || Triple.isMusl())
return ppc::ReadGOTPtrMode::SecurePlt;
else
return ppc::ReadGOTPtrMode::Bss;
diff --git a/lib/Driver/ToolChains/Arch/PPC.h b/lib/Driver/ToolChains/Arch/PPC.h
index 4f3cd688ca39..e1c943955e81 100644
--- a/lib/Driver/ToolChains/Arch/PPC.h
+++ b/lib/Driver/ToolChains/Arch/PPC.h
@@ -1,9 +1,8 @@
//===--- PPC.h - PPC-specific Tool Helpers ----------------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Driver/ToolChains/Arch/RISCV.cpp b/lib/Driver/ToolChains/Arch/RISCV.cpp
index 1321fedcec51..b6768de4d299 100644
--- a/lib/Driver/ToolChains/Arch/RISCV.cpp
+++ b/lib/Driver/ToolChains/Arch/RISCV.cpp
@@ -1,9 +1,8 @@
//===--- RISCV.cpp - RISCV Helpers for Tools --------------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -55,20 +54,14 @@ static bool isSupportedExtension(StringRef Ext) {
static bool getExtensionVersion(const Driver &D, StringRef MArch,
StringRef Ext, StringRef In,
std::string &Major, std::string &Minor) {
- auto I = In.begin();
- auto E = In.end();
-
- while (I != E && isDigit(*I))
- Major.append(1, *I++);
-
+ Major = In.take_while(isDigit);
+ In = In.substr(Major.size());
if (Major.empty())
return true;
- if (I != E && *I == 'p') {
- ++I;
-
- while (I != E && isDigit(*I))
- Minor.append(1, *I++);
+ if (In.consume_front("p")) {
+ Minor = In.take_while(isDigit);
+ In = In.substr(Major.size());
// Expected 'p' to be followed by minor version number.
if (Minor.empty()) {
@@ -111,17 +104,13 @@ static void getExtensionFeatures(const Driver &D,
SmallVector<StringRef, 8> Split;
Exts.split(Split, StringRef("_"));
- SmallVector<StringRef, 3> Prefix;
- Prefix.push_back("x");
- Prefix.push_back("s");
- Prefix.push_back("sx");
+ SmallVector<StringRef, 3> Prefix{"x", "s", "sx"};
auto I = Prefix.begin();
auto E = Prefix.end();
SmallVector<StringRef, 8> AllExts;
for (StringRef Ext : Split) {
-
if (Ext.empty()) {
D.Diag(diag::err_drv_invalid_riscv_arch_name) << MArch
<< "extension name missing after separator '_'";
@@ -171,7 +160,7 @@ static void getExtensionFeatures(const Driver &D,
}
// Check if duplicated extension.
- if (std::find(AllExts.begin(), AllExts.end(), Ext) != AllExts.end()) {
+ if (llvm::is_contained(AllExts, Ext)) {
std::string Error = "duplicated ";
Error += Desc;
D.Diag(diag::err_drv_invalid_riscv_ext_arch_name)
@@ -206,11 +195,9 @@ void riscv::getRISCVTargetFeatures(const Driver &D, const ArgList &Args,
StringRef MArch = A->getValue();
// RISC-V ISA strings must be lowercase.
- if (std::any_of(std::begin(MArch), std::end(MArch),
- [](char c) { return isupper(c); })) {
-
- D.Diag(diag::err_drv_invalid_riscv_arch_name) << MArch
- << "string must be lowercase";
+ if (llvm::any_of(MArch, [](char c) { return isupper(c); })) {
+ D.Diag(diag::err_drv_invalid_riscv_arch_name)
+ << MArch << "string must be lowercase";
return;
}
@@ -222,7 +209,7 @@ void riscv::getRISCVTargetFeatures(const Driver &D, const ArgList &Args,
return;
}
- bool HasRV64 = MArch.startswith("rv64") ? true : false;
+ bool HasRV64 = MArch.startswith("rv64");
// The canonical order specified in ISA manual.
// Ref: Table 22.1 in RISC-V User-Level ISA V2.2
@@ -365,6 +352,20 @@ void riscv::getRISCVTargetFeatures(const Driver &D, const ArgList &Args,
getExtensionFeatures(D, Args, Features, MArch, OtherExts);
}
+ // -mrelax is default, unless -mno-relax is specified.
+ if (Args.hasFlag(options::OPT_mrelax, options::OPT_mno_relax, true))
+ Features.push_back("+relax");
+ else
+ Features.push_back("-relax");
+
+ // GCC Compatibility: -mno-save-restore is default, unless -msave-restore is
+ // specified...
+ if (Args.hasFlag(options::OPT_msave_restore, options::OPT_mno_save_restore, false)) {
+ // ... but we don't support -msave-restore, so issue a warning.
+ D.Diag(diag::warn_drv_clang_unsupported)
+ << Args.getLastArg(options::OPT_msave_restore)->getAsString(Args);
+ }
+
// Now add any that the user explicitly requested on the command line,
// which may override the defaults.
handleTargetFeaturesGroup(Args, Features, options::OPT_m_riscv_Features_Group);
diff --git a/lib/Driver/ToolChains/Arch/RISCV.h b/lib/Driver/ToolChains/Arch/RISCV.h
index beda14979fab..443526900aae 100644
--- a/lib/Driver/ToolChains/Arch/RISCV.h
+++ b/lib/Driver/ToolChains/Arch/RISCV.h
@@ -1,9 +1,8 @@
//===--- RISCV.h - RISCV-specific Tool Helpers ------------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Driver/ToolChains/Arch/Sparc.cpp b/lib/Driver/ToolChains/Arch/Sparc.cpp
index c177031b9f75..043b7f257c01 100644
--- a/lib/Driver/ToolChains/Arch/Sparc.cpp
+++ b/lib/Driver/ToolChains/Arch/Sparc.cpp
@@ -1,9 +1,8 @@
//===--- Sparc.cpp - Tools Implementations ----------------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Driver/ToolChains/Arch/Sparc.h b/lib/Driver/ToolChains/Arch/Sparc.h
index 082b2808a946..d12a9a70e264 100644
--- a/lib/Driver/ToolChains/Arch/Sparc.h
+++ b/lib/Driver/ToolChains/Arch/Sparc.h
@@ -1,9 +1,8 @@
//===--- Sparc.h - Sparc-specific Tool Helpers ----------------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Driver/ToolChains/Arch/SystemZ.cpp b/lib/Driver/ToolChains/Arch/SystemZ.cpp
index 6ee724d00802..ca60b85cf8a0 100644
--- a/lib/Driver/ToolChains/Arch/SystemZ.cpp
+++ b/lib/Driver/ToolChains/Arch/SystemZ.cpp
@@ -1,9 +1,8 @@
//===--- SystemZ.cpp - SystemZ Helpers for Tools ----------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Driver/ToolChains/Arch/SystemZ.h b/lib/Driver/ToolChains/Arch/SystemZ.h
index 521f8c2aad02..11d77fa01cc8 100644
--- a/lib/Driver/ToolChains/Arch/SystemZ.h
+++ b/lib/Driver/ToolChains/Arch/SystemZ.h
@@ -1,9 +1,8 @@
//===--- SystemZ.h - SystemZ-specific Tool Helpers --------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Driver/ToolChains/Arch/X86.cpp b/lib/Driver/ToolChains/Arch/X86.cpp
index 45648945d5ef..34be226b69e9 100644
--- a/lib/Driver/ToolChains/Arch/X86.cpp
+++ b/lib/Driver/ToolChains/Arch/X86.cpp
@@ -1,9 +1,8 @@
//===--- X86.cpp - X86 Helpers for Tools ------------------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -136,6 +135,7 @@ void x86::getX86TargetFeatures(const Driver &D, const llvm::Triple &Triple,
if (ArchType == llvm::Triple::x86_64) {
Features.push_back("+sse4.2");
Features.push_back("+popcnt");
+ Features.push_back("+cx16");
} else
Features.push_back("+ssse3");
}
diff --git a/lib/Driver/ToolChains/Arch/X86.h b/lib/Driver/ToolChains/Arch/X86.h
index 20bf27a2b6bc..9f9c2b8c4b49 100644
--- a/lib/Driver/ToolChains/Arch/X86.h
+++ b/lib/Driver/ToolChains/Arch/X86.h
@@ -1,9 +1,8 @@
//===--- X86.h - X86-specific Tool Helpers ----------------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Driver/ToolChains/BareMetal.cpp b/lib/Driver/ToolChains/BareMetal.cpp
index 31d16922cc43..1544727050f4 100644
--- a/lib/Driver/ToolChains/BareMetal.cpp
+++ b/lib/Driver/ToolChains/BareMetal.cpp
@@ -1,9 +1,8 @@
-//===--- BaremMetal.cpp - Bare Metal ToolChain ------------------*- C++ -*-===//
+//===-- BareMetal.cpp - Bare Metal ToolChain --------------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Driver/ToolChains/BareMetal.h b/lib/Driver/ToolChains/BareMetal.h
index 43a6a8b4bec3..4c0c739307b1 100644
--- a/lib/Driver/ToolChains/BareMetal.h
+++ b/lib/Driver/ToolChains/BareMetal.h
@@ -1,9 +1,8 @@
//===--- BareMetal.h - Bare Metal Tool and ToolChain -------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Driver/ToolChains/Clang.cpp b/lib/Driver/ToolChains/Clang.cpp
index 75f16898dfaf..cb861f27aeda 100644
--- a/lib/Driver/ToolChains/Clang.cpp
+++ b/lib/Driver/ToolChains/Clang.cpp
@@ -1,9 +1,8 @@
-//===--- LLVM.cpp - Clang+LLVM ToolChain Implementations --------*- C++ -*-===//
+//===-- Clang.cpp - Clang+LLVM ToolChain Implementations --------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -502,6 +501,8 @@ static codegenoptions::DebugInfoKind DebugLevelToInfoKind(const Arg &A) {
return codegenoptions::LimitedDebugInfo;
}
+enum class FramePointerKind { None, NonLeaf, All };
+
static bool mustUseNonLeafFramePointerForTarget(const llvm::Triple &Triple) {
switch (Triple.getArch()){
default:
@@ -516,13 +517,20 @@ static bool mustUseNonLeafFramePointerForTarget(const llvm::Triple &Triple) {
static bool useFramePointerForTargetByDefault(const ArgList &Args,
const llvm::Triple &Triple) {
+ if (Args.hasArg(options::OPT_pg))
+ return true;
+
switch (Triple.getArch()) {
case llvm::Triple::xcore:
case llvm::Triple::wasm32:
case llvm::Triple::wasm64:
+ case llvm::Triple::msp430:
// XCore never wants frame pointers, regardless of OS.
// WebAssembly never wants frame pointers.
return false;
+ case llvm::Triple::ppc:
+ case llvm::Triple::ppc64:
+ case llvm::Triple::ppc64le:
case llvm::Triple::riscv32:
case llvm::Triple::riscv64:
return !areOptimizationsEnabled(Args);
@@ -542,9 +550,6 @@ static bool useFramePointerForTargetByDefault(const ArgList &Args,
case llvm::Triple::mips64el:
case llvm::Triple::mips:
case llvm::Triple::mipsel:
- case llvm::Triple::ppc:
- case llvm::Triple::ppc64:
- case llvm::Triple::ppc64le:
case llvm::Triple::systemz:
case llvm::Triple::x86:
case llvm::Triple::x86_64:
@@ -574,40 +579,34 @@ static bool useFramePointerForTargetByDefault(const ArgList &Args,
return true;
}
-static bool shouldUseFramePointer(const ArgList &Args,
- const llvm::Triple &Triple) {
- if (Arg *A = Args.getLastArg(options::OPT_fno_omit_frame_pointer,
- options::OPT_fomit_frame_pointer))
- return A->getOption().matches(options::OPT_fno_omit_frame_pointer) ||
- mustUseNonLeafFramePointerForTarget(Triple);
-
- if (Args.hasArg(options::OPT_pg))
- return true;
-
- return useFramePointerForTargetByDefault(Args, Triple);
-}
-
-static bool shouldUseLeafFramePointer(const ArgList &Args,
- const llvm::Triple &Triple) {
- if (Arg *A = Args.getLastArg(options::OPT_mno_omit_leaf_frame_pointer,
- options::OPT_momit_leaf_frame_pointer))
- return A->getOption().matches(options::OPT_mno_omit_leaf_frame_pointer);
-
- if (Args.hasArg(options::OPT_pg))
- return true;
-
- if (Triple.isPS4CPU())
- return false;
-
- return useFramePointerForTargetByDefault(Args, Triple);
+static FramePointerKind getFramePointerKind(const ArgList &Args,
+ const llvm::Triple &Triple) {
+ Arg *A = Args.getLastArg(options::OPT_fomit_frame_pointer,
+ options::OPT_fno_omit_frame_pointer);
+ bool OmitFP = A && A->getOption().matches(options::OPT_fomit_frame_pointer);
+ bool NoOmitFP =
+ A && A->getOption().matches(options::OPT_fno_omit_frame_pointer);
+ if (NoOmitFP || mustUseNonLeafFramePointerForTarget(Triple) ||
+ (!OmitFP && useFramePointerForTargetByDefault(Args, Triple))) {
+ if (Args.hasFlag(options::OPT_momit_leaf_frame_pointer,
+ options::OPT_mno_omit_leaf_frame_pointer,
+ Triple.isPS4CPU()))
+ return FramePointerKind::NonLeaf;
+ return FramePointerKind::All;
+ }
+ return FramePointerKind::None;
}
/// Add a CC1 option to specify the debug compilation directory.
-static void addDebugCompDirArg(const ArgList &Args, ArgStringList &CmdArgs) {
- SmallString<128> cwd;
- if (!llvm::sys::fs::current_path(cwd)) {
+static void addDebugCompDirArg(const ArgList &Args, ArgStringList &CmdArgs,
+ const llvm::vfs::FileSystem &VFS) {
+ if (Arg *A = Args.getLastArg(options::OPT_fdebug_compilation_dir)) {
CmdArgs.push_back("-fdebug-compilation-dir");
- CmdArgs.push_back(Args.MakeArgString(cwd));
+ CmdArgs.push_back(A->getValue());
+ } else if (llvm::ErrorOr<std::string> CWD =
+ VFS.getCurrentWorkingDirectory()) {
+ CmdArgs.push_back("-fdebug-compilation-dir");
+ CmdArgs.push_back(Args.MakeArgString(*CWD));
}
}
@@ -624,7 +623,8 @@ static void addDebugPrefixMapArg(const Driver &D, const ArgList &Args, ArgString
}
/// Vectorize at all optimization levels greater than 1 except for -Oz.
-/// For -Oz the loop vectorizer is disable, while the slp vectorizer is enabled.
+/// For -Oz the loop vectorizer is disabled, while the slp vectorizer is
+/// enabled.
static bool shouldEnableVectorizerAtOLevel(const ArgList &Args, bool isSlpVec) {
if (Arg *A = Args.getLastArg(options::OPT_O_Group)) {
if (A->getOption().matches(options::OPT_O4) ||
@@ -718,8 +718,9 @@ static void appendUserToPath(SmallVectorImpl<char> &Result) {
Result.append(UID.begin(), UID.end());
}
-static void addPGOAndCoverageFlags(Compilation &C, const Driver &D,
- const InputInfo &Output, const ArgList &Args,
+static void addPGOAndCoverageFlags(const ToolChain &TC, Compilation &C,
+ const Driver &D, const InputInfo &Output,
+ const ArgList &Args,
ArgStringList &CmdArgs) {
auto *PGOGenerateArg = Args.getLastArg(options::OPT_fprofile_generate,
@@ -729,6 +730,13 @@ static void addPGOAndCoverageFlags(Compilation &C, const Driver &D,
PGOGenerateArg->getOption().matches(options::OPT_fno_profile_generate))
PGOGenerateArg = nullptr;
+ auto *CSPGOGenerateArg = Args.getLastArg(options::OPT_fcs_profile_generate,
+ options::OPT_fcs_profile_generate_EQ,
+ options::OPT_fno_profile_generate);
+ if (CSPGOGenerateArg &&
+ CSPGOGenerateArg->getOption().matches(options::OPT_fno_profile_generate))
+ CSPGOGenerateArg = nullptr;
+
auto *ProfileGenerateArg = Args.getLastArg(
options::OPT_fprofile_instr_generate,
options::OPT_fprofile_instr_generate_EQ,
@@ -752,6 +760,10 @@ static void addPGOAndCoverageFlags(Compilation &C, const Driver &D,
D.Diag(diag::err_drv_argument_not_allowed_with)
<< ProfileGenerateArg->getSpelling() << ProfileUseArg->getSpelling();
+ if (CSPGOGenerateArg && PGOGenerateArg)
+ D.Diag(diag::err_drv_argument_not_allowed_with)
+ << CSPGOGenerateArg->getSpelling() << PGOGenerateArg->getSpelling();
+
if (ProfileGenerateArg) {
if (ProfileGenerateArg->getOption().matches(
options::OPT_fprofile_instr_generate_EQ))
@@ -759,13 +771,33 @@ static void addPGOAndCoverageFlags(Compilation &C, const Driver &D,
ProfileGenerateArg->getValue()));
// The default is to use Clang Instrumentation.
CmdArgs.push_back("-fprofile-instrument=clang");
+ if (TC.getTriple().isWindowsMSVCEnvironment()) {
+ // Add dependent lib for clang_rt.profile
+ CmdArgs.push_back(Args.MakeArgString("--dependent-lib=" +
+ TC.getCompilerRT(Args, "profile")));
+ }
}
+ Arg *PGOGenArg = nullptr;
if (PGOGenerateArg) {
+ assert(!CSPGOGenerateArg);
+ PGOGenArg = PGOGenerateArg;
CmdArgs.push_back("-fprofile-instrument=llvm");
- if (PGOGenerateArg->getOption().matches(
- options::OPT_fprofile_generate_EQ)) {
- SmallString<128> Path(PGOGenerateArg->getValue());
+ }
+ if (CSPGOGenerateArg) {
+ assert(!PGOGenerateArg);
+ PGOGenArg = CSPGOGenerateArg;
+ CmdArgs.push_back("-fprofile-instrument=csllvm");
+ }
+ if (PGOGenArg) {
+ if (TC.getTriple().isWindowsMSVCEnvironment()) {
+ CmdArgs.push_back(Args.MakeArgString("--dependent-lib=" +
+ TC.getCompilerRT(Args, "profile")));
+ }
+ if (PGOGenArg->getOption().matches(
+ PGOGenerateArg ? options::OPT_fprofile_generate_EQ
+ : options::OPT_fcs_profile_generate_EQ)) {
+ SmallString<128> Path(PGOGenArg->getValue());
llvm::sys::path::append(Path, "default_%m.profraw");
CmdArgs.push_back(
Args.MakeArgString(Twine("-fprofile-instrument-path=") + Path));
@@ -840,13 +872,8 @@ static void addPGOAndCoverageFlags(Compilation &C, const Driver &D,
else
OutputFilename = llvm::sys::path::filename(Output.getBaseInput());
SmallString<128> CoverageFilename = OutputFilename;
- if (llvm::sys::path::is_relative(CoverageFilename)) {
- SmallString<128> Pwd;
- if (!llvm::sys::fs::current_path(Pwd)) {
- llvm::sys::path::append(Pwd, CoverageFilename);
- CoverageFilename.swap(Pwd);
- }
- }
+ if (llvm::sys::path::is_relative(CoverageFilename))
+ (void)D.getVFS().makeAbsolute(CoverageFilename);
llvm::sys::path::replace_extension(CoverageFilename, "gcno");
CmdArgs.push_back(Args.MakeArgString(CoverageFilename));
@@ -972,7 +999,7 @@ static void RenderDebugInfoCompressionArgs(const ArgList &Args,
if (checkDebugInfoOption(A, Args, D, TC)) {
if (A->getOption().getID() == options::OPT_gz) {
if (llvm::zlib::isAvailable())
- CmdArgs.push_back("-compress-debug-sections");
+ CmdArgs.push_back("--compress-debug-sections");
else
D.Diag(diag::warn_debug_compression_unavailable);
return;
@@ -980,11 +1007,11 @@ static void RenderDebugInfoCompressionArgs(const ArgList &Args,
StringRef Value = A->getValue();
if (Value == "none") {
- CmdArgs.push_back("-compress-debug-sections=none");
+ CmdArgs.push_back("--compress-debug-sections=none");
} else if (Value == "zlib" || Value == "zlib-gnu") {
if (llvm::zlib::isAvailable()) {
CmdArgs.push_back(
- Args.MakeArgString("-compress-debug-sections=" + Twine(Value)));
+ Args.MakeArgString("--compress-debug-sections=" + Twine(Value)));
} else {
D.Diag(diag::warn_debug_compression_unavailable);
}
@@ -1116,6 +1143,24 @@ void Clang::AddPreprocessingOptions(Compilation &C, const JobAction &JA,
if (JA.isOffloading(Action::OFK_Cuda))
getToolChain().AddCudaIncludeArgs(Args, CmdArgs);
+ // If we are offloading to a target via OpenMP we need to include the
+ // openmp_wrappers folder which contains alternative system headers.
+ if (JA.isDeviceOffloading(Action::OFK_OpenMP) &&
+ getToolChain().getTriple().isNVPTX()){
+ if (!Args.hasArg(options::OPT_nobuiltininc)) {
+ // Add openmp_wrappers/* to our system include path. This lets us wrap
+ // standard library headers.
+ SmallString<128> P(D.ResourceDir);
+ llvm::sys::path::append(P, "include");
+ llvm::sys::path::append(P, "openmp_wrappers");
+ CmdArgs.push_back("-internal-isystem");
+ CmdArgs.push_back(Args.MakeArgString(P));
+ }
+
+ CmdArgs.push_back("-include");
+ CmdArgs.push_back("__clang_openmp_math_declares.h");
+ }
+
// Add -i* options, and automatically translate to
// -include-pch/-include-pth for transparent PCH support. It's
// wonky, but we include looking for .gch so we can support seamless
@@ -1364,6 +1409,9 @@ void Clang::AddARMTargetArgs(const llvm::Triple &Triple, const ArgList &Args,
if (!Args.hasFlag(options::OPT_mimplicit_float,
options::OPT_mno_implicit_float, true))
CmdArgs.push_back("-no-implicit-float");
+
+ if (Args.getLastArg(options::OPT_mcmse))
+ CmdArgs.push_back("-mcmse");
}
void Clang::RenderTargetOptions(const llvm::Triple &EffectiveTriple,
@@ -1716,6 +1764,14 @@ void Clang::AddMIPSTargetArgs(const ArgList &Args,
} else
D.Diag(diag::warn_target_unsupported_compact_branches) << CPUName;
}
+
+ if (Arg *A = Args.getLastArg(options::OPT_mrelax_pic_calls,
+ options::OPT_mno_relax_pic_calls)) {
+ if (A->getOption().matches(options::OPT_mno_relax_pic_calls)) {
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back("-mips-jalr-reloc=0");
+ }
+ }
}
void Clang::AddPPCTargetArgs(const ArgList &Args,
@@ -1747,12 +1803,21 @@ void Clang::AddPPCTargetArgs(const ArgList &Args,
break;
}
- if (Arg *A = Args.getLastArg(options::OPT_mabi_EQ))
- // The ppc64 linux abis are all "altivec" abis by default. Accept and ignore
- // the option if given as we don't have backend support for any targets
- // that don't use the altivec abi.
- if (StringRef(A->getValue()) != "altivec")
+ bool IEEELongDouble = false;
+ for (const Arg *A : Args.filtered(options::OPT_mabi_EQ)) {
+ StringRef V = A->getValue();
+ if (V == "ieeelongdouble")
+ IEEELongDouble = true;
+ else if (V == "ibmlongdouble")
+ IEEELongDouble = false;
+ else if (V != "altivec")
+ // The ppc64 linux abis are all "altivec" abis by default. Accept and ignore
+ // the option if given as we don't have backend support for any targets
+ // that don't use the altivec abi.
ABIName = A->getValue();
+ }
+ if (IEEELongDouble)
+ CmdArgs.push_back("-mabi=ieeelongdouble");
ppc::FloatABI FloatABI =
ppc::getPPCFloatABI(getToolChain().getDriver(), Args);
@@ -2023,6 +2088,7 @@ static void CollectArgsForIntegratedAssembler(Compilation &C,
bool TakeNextArg = false;
bool UseRelaxRelocations = C.getDefaultToolChain().useRelaxRelocations();
+ bool UseNoExecStack = C.getDefaultToolChain().isNoExecStackDefault();
const char *MipsTargetFeature = nullptr;
for (const Arg *A :
Args.filtered(options::OPT_Wa_COMMA, options::OPT_Xassembler)) {
@@ -2104,7 +2170,7 @@ static void CollectArgsForIntegratedAssembler(Compilation &C,
} else if (Value == "--fatal-warnings") {
CmdArgs.push_back("-massembler-fatal-warnings");
} else if (Value == "--noexecstack") {
- CmdArgs.push_back("-mnoexecstack");
+ UseNoExecStack = true;
} else if (Value.startswith("-compress-debug-sections") ||
Value.startswith("--compress-debug-sections") ||
Value == "-nocompress-debug-sections" ||
@@ -2167,6 +2233,8 @@ static void CollectArgsForIntegratedAssembler(Compilation &C,
}
if (UseRelaxRelocations)
CmdArgs.push_back("--mrelax-relocations");
+ if (UseNoExecStack)
+ CmdArgs.push_back("-mnoexecstack");
if (MipsTargetFeature != nullptr) {
CmdArgs.push_back("-target-feature");
CmdArgs.push_back(MipsTargetFeature);
@@ -2676,7 +2744,7 @@ static void RenderModulesOptions(Compilation &C, const Driver &D,
}
}
- HaveModules = HaveClangModules;
+ HaveModules |= HaveClangModules;
if (Args.hasArg(options::OPT_fmodules_ts)) {
CmdArgs.push_back("-fmodules-ts");
HaveModules = true;
@@ -2837,9 +2905,7 @@ static void RenderCharacterOptions(const ArgList &Args, const llvm::Triple &T,
}
// The default depends on the language standard.
- if (const Arg *A =
- Args.getLastArg(options::OPT_fchar8__t, options::OPT_fno_char8__t))
- A->render(Args, CmdArgs);
+ Args.AddLastArg(CmdArgs, options::OPT_fchar8__t, options::OPT_fno_char8__t);
if (const Arg *A = Args.getLastArg(options::OPT_fshort_wchar,
options::OPT_fno_short_wchar)) {
@@ -2905,7 +2971,7 @@ static void RenderObjCOptions(const ToolChain &TC, const Driver &D,
// We default off for Objective-C, on for Objective-C++.
if (Args.hasFlag(options::OPT_fobjc_arc_exceptions,
options::OPT_fno_objc_arc_exceptions,
- /*default=*/types::isCXX(Input.getType())))
+ /*Default=*/types::isCXX(Input.getType())))
CmdArgs.push_back("-fobjc-arc-exceptions");
}
@@ -3125,35 +3191,24 @@ static void RenderDebugOptions(const ToolChain &TC, const Driver &D,
SplitDWARFInlining = false;
}
- if (const Arg *A = Args.getLastArg(options::OPT_g_Group)) {
- if (checkDebugInfoOption(A, Args, D, TC)) {
- // If the last option explicitly specified a debug-info level, use it.
- if (A->getOption().matches(options::OPT_gN_Group)) {
- DebugInfoKind = DebugLevelToInfoKind(*A);
- // If you say "-gsplit-dwarf -gline-tables-only", -gsplit-dwarf loses.
- // But -gsplit-dwarf is not a g_group option, hence we have to check the
- // order explicitly. If -gsplit-dwarf wins, we fix DebugInfoKind later.
- // This gets a bit more complicated if you've disabled inline info in
- // the skeleton CUs (SplitDWARFInlining) - then there's value in
- // composing split-dwarf and line-tables-only, so let those compose
- // naturally in that case. And if you just turned off debug info,
- // (-gsplit-dwarf -g0) - do that.
- if (DwarfFission != DwarfFissionKind::None) {
- if (A->getIndex() > SplitDWARFArg->getIndex()) {
- if (DebugInfoKind == codegenoptions::NoDebugInfo ||
- DebugInfoKind == codegenoptions::DebugDirectivesOnly ||
- (DebugInfoKind == codegenoptions::DebugLineTablesOnly &&
- SplitDWARFInlining))
- DwarfFission = DwarfFissionKind::None;
- } else if (SplitDWARFInlining)
- DebugInfoKind = codegenoptions::NoDebugInfo;
- }
- } else {
- // For any other 'g' option, use Limited.
- DebugInfoKind = codegenoptions::LimitedDebugInfo;
- }
- } else {
- DebugInfoKind = codegenoptions::LimitedDebugInfo;
+ if (const Arg *A =
+ Args.getLastArg(options::OPT_g_Group, options::OPT_gsplit_dwarf,
+ options::OPT_gsplit_dwarf_EQ)) {
+ DebugInfoKind = codegenoptions::LimitedDebugInfo;
+
+ // If the last option explicitly specified a debug-info level, use it.
+ if (checkDebugInfoOption(A, Args, D, TC) &&
+ A->getOption().matches(options::OPT_gN_Group)) {
+ DebugInfoKind = DebugLevelToInfoKind(*A);
+ // For -g0 or -gline-tables-only, drop -gsplit-dwarf. This gets a bit more
+ // complicated if you've disabled inline info in the skeleton CUs
+ // (SplitDWARFInlining) - then there's value in composing split-dwarf and
+ // line-tables-only, so let those compose naturally in that case.
+ if (DebugInfoKind == codegenoptions::NoDebugInfo ||
+ DebugInfoKind == codegenoptions::DebugDirectivesOnly ||
+ (DebugInfoKind == codegenoptions::DebugLineTablesOnly &&
+ SplitDWARFInlining))
+ DwarfFission = DwarfFissionKind::None;
}
}
@@ -3228,32 +3283,18 @@ static void RenderDebugOptions(const ToolChain &TC, const Driver &D,
}
}
- // -gsplit-dwarf should turn on -g and enable the backend dwarf
- // splitting and extraction.
- // FIXME: Currently only works on Linux and Fuchsia.
- if (T.isOSLinux() || T.isOSFuchsia()) {
- if (!SplitDWARFInlining)
- CmdArgs.push_back("-fno-split-dwarf-inlining");
-
- if (DwarfFission != DwarfFissionKind::None) {
- if (DebugInfoKind == codegenoptions::NoDebugInfo)
- DebugInfoKind = codegenoptions::LimitedDebugInfo;
-
- if (DwarfFission == DwarfFissionKind::Single)
- CmdArgs.push_back("-enable-split-dwarf=single");
- else
- CmdArgs.push_back("-enable-split-dwarf");
- }
- }
+ if (T.isOSBinFormatELF() && !SplitDWARFInlining)
+ CmdArgs.push_back("-fno-split-dwarf-inlining");
// After we've dealt with all combinations of things that could
// make DebugInfoKind be other than None or DebugLineTablesOnly,
// figure out if we need to "upgrade" it to standalone debug info.
// We parse these two '-f' options whether or not they will be used,
// to claim them even if you wrote "-fstandalone-debug -gline-tables-only"
- bool NeedFullDebug = Args.hasFlag(options::OPT_fstandalone_debug,
- options::OPT_fno_standalone_debug,
- TC.GetDefaultStandaloneDebug());
+ bool NeedFullDebug = Args.hasFlag(
+ options::OPT_fstandalone_debug, options::OPT_fno_standalone_debug,
+ DebuggerTuning == llvm::DebuggerKind::LLDB ||
+ TC.GetDefaultStandaloneDebug());
if (const Arg *A = Args.getLastArg(options::OPT_fstandalone_debug))
(void)checkDebugInfoOption(A, Args, D, TC);
if (DebugInfoKind == codegenoptions::LimitedDebugInfo && NeedFullDebug)
@@ -3416,19 +3457,14 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
const llvm::Triple *AuxTriple = IsCuda ? TC.getAuxTriple() : nullptr;
- bool IsWindowsGNU = RawTriple.isWindowsGNUEnvironment();
- bool IsWindowsCygnus = RawTriple.isWindowsCygwinEnvironment();
bool IsWindowsMSVC = RawTriple.isWindowsMSVCEnvironment();
bool IsIAMCU = RawTriple.isOSIAMCU();
// Adjust IsWindowsXYZ for CUDA/HIP compilations. Even when compiling in
// device mode (i.e., getToolchain().getTriple() is NVPTX/AMDGCN, not
// Windows), we need to pass Windows-specific flags to cc1.
- if (IsCuda || IsHIP) {
+ if (IsCuda || IsHIP)
IsWindowsMSVC |= AuxTriple && AuxTriple->isWindowsMSVCEnvironment();
- IsWindowsGNU |= AuxTriple && AuxTriple->isWindowsGNUEnvironment();
- IsWindowsCygnus |= AuxTriple && AuxTriple->isWindowsCygwinEnvironment();
- }
// C++ is not supported for IAMCU.
if (IsIAMCU && types::isCXX(Input.getType()))
@@ -3457,13 +3493,25 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
NormalizedTriple = C.getSingleOffloadToolChain<Action::OFK_Host>()
->getTriple()
.normalize();
- else
+ else {
+ // Host-side compilation.
NormalizedTriple =
(IsCuda ? C.getSingleOffloadToolChain<Action::OFK_Cuda>()
: C.getSingleOffloadToolChain<Action::OFK_HIP>())
->getTriple()
.normalize();
-
+ if (IsCuda) {
+ // We need to figure out which CUDA version we're compiling for, as that
+ // determines how we load and launch GPU kernels.
+ auto *CTC = static_cast<const toolchains::CudaToolChain *>(
+ C.getSingleOffloadToolChain<Action::OFK_Cuda>());
+ assert(CTC && "Expected valid CUDA Toolchain.");
+ if (CTC && CTC->CudaInstallation.version() != CudaVersion::UNKNOWN)
+ CmdArgs.push_back(Args.MakeArgString(
+ Twine("-target-sdk-version=") +
+ CudaVersionToString(CTC->CudaInstallation.version())));
+ }
+ }
CmdArgs.push_back("-aux-triple");
CmdArgs.push_back(Args.MakeArgString(NormalizedTriple));
}
@@ -3539,6 +3587,25 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
} else if (JA.getType() == types::TY_LLVM_BC ||
JA.getType() == types::TY_LTO_BC) {
CmdArgs.push_back("-emit-llvm-bc");
+ } else if (JA.getType() == types::TY_IFS) {
+ StringRef StubFormat =
+ llvm::StringSwitch<StringRef>(
+ Args.hasArg(options::OPT_iterface_stub_version_EQ)
+ ? Args.getLastArgValue(options::OPT_iterface_stub_version_EQ)
+ : "")
+ .Case("experimental-yaml-elf-v1", "experimental-yaml-elf-v1")
+ .Case("experimental-tapi-elf-v1", "experimental-tapi-elf-v1")
+ .Default("");
+
+ if (StubFormat.empty())
+ D.Diag(diag::err_drv_invalid_value)
+ << "Must specify a valid interface stub format type using "
+ << "-interface-stub-version=<experimental-tapi-elf-v1 | "
+ "experimental-yaml-elf-v1>";
+
+ CmdArgs.push_back("-emit-interface-stubs");
+ CmdArgs.push_back(
+ Args.MakeArgString(Twine("-interface-stub-version=") + StubFormat));
} else if (JA.getType() == types::TY_PP_Asm) {
CmdArgs.push_back("-S");
} else if (JA.getType() == types::TY_AST) {
@@ -3580,8 +3647,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (const Arg *A = Args.getLastArg(options::OPT_fthinlto_index_EQ)) {
if (!types::isLLVMIR(Input.getType()))
- D.Diag(diag::err_drv_argument_only_allowed_with) << A->getAsString(Args)
- << "-x ir";
+ D.Diag(diag::err_drv_arg_requires_bitcode_input) << A->getAsString(Args);
Args.AddLastArg(CmdArgs, options::OPT_fthinlto_index_EQ);
}
@@ -3597,6 +3663,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// Disable all llvm IR level optimizations.
CmdArgs.push_back("-disable-llvm-passes");
+ // Render target options such as -fuse-init-array on modern ELF platforms.
+ TC.addClangTargetOptions(Args, CmdArgs, JA.getOffloadingDeviceKind());
+
// reject options that shouldn't be supported in bitcode
// also reject kernel/kext
static const constexpr unsigned kBitcodeOptionBlacklist[] = {
@@ -3638,9 +3707,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
options::OPT_mllvm,
};
for (const auto &A : Args)
- if (std::find(std::begin(kBitcodeOptionBlacklist),
- std::end(kBitcodeOptionBlacklist),
- A->getOption().getID()) !=
+ if (llvm::find(kBitcodeOptionBlacklist, A->getOption().getID()) !=
std::end(kBitcodeOptionBlacklist))
D.Diag(diag::err_drv_unsupported_embed_bitcode) << A->getSpelling();
@@ -3790,6 +3857,13 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-pic-is-pie");
}
+ if (RelocationModel == llvm::Reloc::ROPI ||
+ RelocationModel == llvm::Reloc::ROPI_RWPI)
+ CmdArgs.push_back("-fropi");
+ if (RelocationModel == llvm::Reloc::RWPI ||
+ RelocationModel == llvm::Reloc::ROPI_RWPI)
+ CmdArgs.push_back("-frwpi");
+
if (Arg *A = Args.getLastArg(options::OPT_meabi)) {
CmdArgs.push_back("-meabi");
CmdArgs.push_back(A->getValue());
@@ -3872,8 +3946,12 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasFlag(options::OPT_mrtd, options::OPT_mno_rtd, false))
CmdArgs.push_back("-fdefault-calling-conv=stdcall");
- if (shouldUseFramePointer(Args, RawTriple))
+ FramePointerKind FPKeepKind = getFramePointerKind(Args, RawTriple);
+ if (FPKeepKind != FramePointerKind::None) {
CmdArgs.push_back("-mdisable-fp-elim");
+ if (FPKeepKind == FramePointerKind::NonLeaf)
+ CmdArgs.push_back("-momit-leaf-frame-pointer");
+ }
if (!Args.hasFlag(options::OPT_fzero_initialized_in_bss,
options::OPT_fno_zero_initialized_in_bss))
CmdArgs.push_back("-mno-zero-initialized-in-bss");
@@ -3925,6 +4003,17 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
RenderFloatingPointOptions(TC, D, OFastEnabled, Args, CmdArgs);
+ if (Arg *A = Args.getLastArg(options::OPT_mlong_double_64,
+ options::OPT_mlong_double_128)) {
+ if (TC.getArch() == llvm::Triple::x86 ||
+ TC.getArch() == llvm::Triple::x86_64 ||
+ TC.getArch() == llvm::Triple::ppc || TC.getTriple().isPPC64())
+ A->render(Args, CmdArgs);
+ else
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << A->getAsString(Args) << TripleStr;
+ }
+
// Decide whether to use verbose asm. Verbose assembly is the default on
// toolchains which have the integrated assembler on by default.
bool IsIntegratedAssemblerDefault = TC.IsIntegratedAssemblerDefault();
@@ -4028,14 +4117,17 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// Add the split debug info name to the command lines here so we
// can propagate it to the backend.
bool SplitDWARF = (DwarfFission != DwarfFissionKind::None) &&
- (RawTriple.isOSLinux() || RawTriple.isOSFuchsia()) &&
+ TC.getTriple().isOSBinFormatELF() &&
(isa<AssembleJobAction>(JA) || isa<CompileJobAction>(JA) ||
isa<BackendJobAction>(JA));
- const char *SplitDWARFOut;
if (SplitDWARF) {
+ const char *SplitDWARFOut = SplitDebugName(Args, Input, Output);
CmdArgs.push_back("-split-dwarf-file");
- SplitDWARFOut = SplitDebugName(Args, Output);
CmdArgs.push_back(SplitDWARFOut);
+ if (DwarfFission == DwarfFissionKind::Split) {
+ CmdArgs.push_back("-split-dwarf-output");
+ CmdArgs.push_back(SplitDWARFOut);
+ }
}
// Pass the linker version in use.
@@ -4044,9 +4136,6 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(A->getValue());
}
- if (!shouldUseLeafFramePointer(Args, RawTriple))
- CmdArgs.push_back("-momit-leaf-frame-pointer");
-
// Explicitly error on some things we know we don't support and can't just
// ignore.
if (!Args.hasArg(options::OPT_fallow_unsupported)) {
@@ -4100,20 +4189,17 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
options::OPT_fno_unique_section_names, true))
CmdArgs.push_back("-fno-unique-section-names");
- if (auto *A = Args.getLastArg(
- options::OPT_finstrument_functions,
- options::OPT_finstrument_functions_after_inlining,
- options::OPT_finstrument_function_entry_bare))
- A->render(Args, CmdArgs);
+ Args.AddLastArg(CmdArgs, options::OPT_finstrument_functions,
+ options::OPT_finstrument_functions_after_inlining,
+ options::OPT_finstrument_function_entry_bare);
// NVPTX doesn't support PGO or coverage. There's no runtime support for
// sampling, overhead of call arc collection is way too high and there's no
// way to collect the output.
if (!Triple.isNVPTX())
- addPGOAndCoverageFlags(C, D, Output, Args, CmdArgs);
+ addPGOAndCoverageFlags(TC, C, D, Output, Args, CmdArgs);
- if (auto *ABICompatArg = Args.getLastArg(options::OPT_fclang_abi_compat_EQ))
- ABICompatArg->render(Args, CmdArgs);
+ Args.AddLastArg(CmdArgs, options::OPT_fclang_abi_compat_EQ);
// Add runtime flag for PS4 when PGO, coverage, or sanitizers are enabled.
if (RawTriple.isPS4CPU() &&
@@ -4198,7 +4284,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// If a std is supplied, only add -trigraphs if it follows the
// option.
bool ImplyVCPPCXXVer = false;
- if (Arg *Std = Args.getLastArg(options::OPT_std_EQ, options::OPT_ansi)) {
+ const Arg *Std = Args.getLastArg(options::OPT_std_EQ, options::OPT_ansi);
+ if (Std) {
if (Std->getOption().matches(options::OPT_ansi))
if (types::isCXX(InputType))
CmdArgs.push_back("-std=c++98");
@@ -4274,7 +4361,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-fno-autolink");
// Add in -fdebug-compilation-dir if necessary.
- addDebugCompDirArg(Args, CmdArgs);
+ addDebugCompDirArg(Args, CmdArgs, D.getVFS());
addDebugPrefixMapArg(D, Args, CmdArgs);
@@ -4437,6 +4524,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddAllArgs(CmdArgs, options::OPT_fopenmp_version_EQ);
Args.AddAllArgs(CmdArgs, options::OPT_fopenmp_cuda_number_of_sm_EQ);
Args.AddAllArgs(CmdArgs, options::OPT_fopenmp_cuda_blocks_per_sm_EQ);
+ Args.AddAllArgs(CmdArgs,
+ options::OPT_fopenmp_cuda_teams_reduction_recs_num_EQ);
if (Args.hasFlag(options::OPT_fopenmp_optimistic_collapse,
options::OPT_fno_openmp_optimistic_collapse,
/*Default=*/false))
@@ -4495,7 +4584,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddLastArg(CmdArgs, options::OPT_fdiagnostics_print_source_range_info);
Args.AddLastArg(CmdArgs, options::OPT_fdiagnostics_parseable_fixits);
Args.AddLastArg(CmdArgs, options::OPT_ftime_report);
+ Args.AddLastArg(CmdArgs, options::OPT_ftime_trace);
Args.AddLastArg(CmdArgs, options::OPT_ftrapv);
+ Args.AddLastArg(CmdArgs, options::OPT_malign_double);
if (Arg *A = Args.getLastArg(options::OPT_ftrapv_handler_EQ)) {
CmdArgs.push_back("-ftrapv-handler");
@@ -4584,6 +4675,14 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// Forward -f options with positive and negative forms; we translate
// these by hand.
if (Arg *A = getLastProfileSampleUseArg(Args)) {
+ auto *PGOArg = Args.getLastArg(
+ options::OPT_fprofile_generate, options::OPT_fprofile_generate_EQ,
+ options::OPT_fcs_profile_generate, options::OPT_fcs_profile_generate_EQ,
+ options::OPT_fprofile_use, options::OPT_fprofile_use_EQ);
+ if (PGOArg)
+ D.Diag(diag::err_drv_argument_not_allowed_with)
+ << "SampleUse with PGO options";
+
StringRef fname = A->getValue();
if (!llvm::sys::fs::exists(fname))
D.Diag(diag::err_drv_no_such_file) << fname;
@@ -4623,9 +4722,6 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddLastArg(CmdArgs, options::OPT_fdouble_square_bracket_attributes,
options::OPT_fno_double_square_bracket_attributes);
- bool HaveModules = false;
- RenderModulesOptions(C, D, Args, Input, Output, CmdArgs, HaveModules);
-
// -faccess-control is default.
if (Args.hasFlag(options::OPT_fno_access_control,
options::OPT_faccess_control, false))
@@ -4653,7 +4749,6 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (!Args.hasFlag(
options::OPT_fuse_cxa_atexit, options::OPT_fno_use_cxa_atexit,
!RawTriple.isOSWindows() &&
- RawTriple.getOS() != llvm::Triple::Solaris &&
TC.getArch() != llvm::Triple::xcore &&
((RawTriple.getVendor() != llvm::Triple::MipsTechnologies) ||
RawTriple.hasEnvironment())) ||
@@ -4692,6 +4787,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (ImplyVCPPCXXVer) {
StringRef LanguageStandard;
if (const Arg *StdArg = Args.getLastArg(options::OPT__SLASH_std)) {
+ Std = StdArg;
LanguageStandard = llvm::StringSwitch<StringRef>(StdArg->getValue())
.Case("c++14", "-std=c++14")
.Case("c++17", "-std=c++17")
@@ -4741,9 +4837,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// -fgnu-keywords default varies depending on language; only pass if
// specified.
- if (Arg *A = Args.getLastArg(options::OPT_fgnu_keywords,
- options::OPT_fno_gnu_keywords))
- A->render(Args, CmdArgs);
+ Args.AddLastArg(CmdArgs, options::OPT_fgnu_keywords,
+ options::OPT_fno_gnu_keywords);
if (Args.hasFlag(options::OPT_fgnu89_inline, options::OPT_fno_gnu89_inline,
false))
@@ -4752,10 +4847,15 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasArg(options::OPT_fno_inline))
CmdArgs.push_back("-fno-inline");
- if (Arg* InlineArg = Args.getLastArg(options::OPT_finline_functions,
- options::OPT_finline_hint_functions,
- options::OPT_fno_inline_functions))
- InlineArg->render(Args, CmdArgs);
+ Args.AddLastArg(CmdArgs, options::OPT_finline_functions,
+ options::OPT_finline_hint_functions,
+ options::OPT_fno_inline_functions);
+
+ // FIXME: Find a better way to determine whether the language has modules
+ // support by default, or just assume that all languages do.
+ bool HaveModules =
+ Std && (Std->containsValue("c++2a") || Std->containsValue("c++latest"));
+ RenderModulesOptions(C, D, Args, Input, Output, CmdArgs, HaveModules);
Args.AddLastArg(CmdArgs, options::OPT_fexperimental_new_pass_manager,
options::OPT_fno_experimental_new_pass_manager);
@@ -4944,12 +5044,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
ParseMPreferVectorWidth(D, Args, CmdArgs);
- if (Arg *A = Args.getLastArg(options::OPT_fshow_overloads_EQ))
- A->render(Args, CmdArgs);
-
- if (Arg *A = Args.getLastArg(
- options::OPT_fsanitize_undefined_strip_path_components_EQ))
- A->render(Args, CmdArgs);
+ Args.AddLastArg(CmdArgs, options::OPT_fshow_overloads_EQ);
+ Args.AddLastArg(CmdArgs,
+ options::OPT_fsanitize_undefined_strip_path_components_EQ);
// -fdollars-in-identifiers default varies depending on platform and
// language; only pass if specified.
@@ -4973,8 +5070,13 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
options::OPT_fno_apple_pragma_pack, false))
CmdArgs.push_back("-fapple-pragma-pack");
+ // Remarks can be enabled with any of the `-f.*optimization-record.*` flags.
if (Args.hasFlag(options::OPT_fsave_optimization_record,
options::OPT_foptimization_record_file_EQ,
+ options::OPT_fno_save_optimization_record, false) ||
+ Args.hasFlag(options::OPT_fsave_optimization_record_EQ,
+ options::OPT_fno_save_optimization_record, false) ||
+ Args.hasFlag(options::OPT_foptimization_record_passes_EQ,
options::OPT_fno_save_optimization_record, false)) {
CmdArgs.push_back("-opt-record-file");
@@ -5006,9 +5108,28 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
}
- llvm::sys::path::replace_extension(F, "opt.yaml");
+ std::string Extension = "opt.";
+ if (const Arg *A =
+ Args.getLastArg(options::OPT_fsave_optimization_record_EQ))
+ Extension += A->getValue();
+ else
+ Extension += "yaml";
+
+ llvm::sys::path::replace_extension(F, Extension);
CmdArgs.push_back(Args.MakeArgString(F));
}
+
+ if (const Arg *A =
+ Args.getLastArg(options::OPT_foptimization_record_passes_EQ)) {
+ CmdArgs.push_back("-opt-record-passes");
+ CmdArgs.push_back(A->getValue());
+ }
+
+ if (const Arg *A =
+ Args.getLastArg(options::OPT_fsave_optimization_record_EQ)) {
+ CmdArgs.push_back("-opt-record-format");
+ CmdArgs.push_back(A->getValue());
+ }
}
bool RewriteImports = Args.hasFlag(options::OPT_frewrite_imports,
@@ -5058,6 +5179,13 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
A->claim();
}
+ // Forward -fpass-plugin=name.so to -cc1.
+ for (const Arg *A : Args.filtered(options::OPT_fpass_plugin_EQ)) {
+ CmdArgs.push_back(
+ Args.MakeArgString(Twine("-fpass-plugin=") + A->getValue()));
+ A->claim();
+ }
+
// Setup statistics file output.
SmallString<128> StatsFile = getStatsFileName(Args, Output, Input, D);
if (!StatsFile.empty())
@@ -5104,30 +5232,6 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
isa<CompileJobAction>(JA))
CmdArgs.push_back("-disable-llvm-passes");
- if (Output.getType() == types::TY_Dependencies) {
- // Handled with other dependency code.
- } else if (Output.isFilename()) {
- CmdArgs.push_back("-o");
- CmdArgs.push_back(Output.getFilename());
- } else {
- assert(Output.isNothing() && "Invalid output.");
- }
-
- addDashXForInput(Args, Input, CmdArgs);
-
- ArrayRef<InputInfo> FrontendInputs = Input;
- if (IsHeaderModulePrecompile)
- FrontendInputs = ModuleHeaderInputs;
- else if (Input.isNothing())
- FrontendInputs = {};
-
- for (const InputInfo &Input : FrontendInputs) {
- if (Input.isFilename())
- CmdArgs.push_back(Input.getFilename());
- else
- Input.getInputArg().renderAsInput(Args, CmdArgs);
- }
-
Args.AddAllArgs(CmdArgs, options::OPT_undef);
const char *Exec = D.getClangProgramPath();
@@ -5271,6 +5375,17 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
}
+ if (Args.hasArg(options::OPT_forder_file_instrumentation)) {
+ CmdArgs.push_back("-forder-file-instrumentation");
+ // Enable order file instrumentation when ThinLTO is not on. When ThinLTO is
+ // on, we need to pass these flags as linker flags and that will be handled
+ // outside of the compiler.
+ if (!D.isUsingLTO()) {
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back("-enable-order-file-instrumentation");
+ }
+ }
+
if (Arg *A = Args.getLastArg(options::OPT_fforce_enable_int128,
options::OPT_fno_force_enable_int128)) {
if (A->getOption().matches(options::OPT_fforce_enable_int128))
@@ -5314,6 +5429,40 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
TC.useIntegratedAs()))
CmdArgs.push_back("-faddrsig");
+ if (Arg *A = Args.getLastArg(options::OPT_fsymbol_partition_EQ)) {
+ std::string Str = A->getAsString(Args);
+ if (!TC.getTriple().isOSBinFormatELF())
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << Str << TC.getTripleString();
+ CmdArgs.push_back(Args.MakeArgString(Str));
+ }
+
+ // Add the "-o out -x type src.c" flags last. This is done primarily to make
+ // the -cc1 command easier to edit when reproducing compiler crashes.
+ if (Output.getType() == types::TY_Dependencies) {
+ // Handled with other dependency code.
+ } else if (Output.isFilename()) {
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+ } else {
+ assert(Output.isNothing() && "Invalid output.");
+ }
+
+ addDashXForInput(Args, Input, CmdArgs);
+
+ ArrayRef<InputInfo> FrontendInputs = Input;
+ if (IsHeaderModulePrecompile)
+ FrontendInputs = ModuleHeaderInputs;
+ else if (Input.isNothing())
+ FrontendInputs = {};
+
+ for (const InputInfo &Input : FrontendInputs) {
+ if (Input.isFilename())
+ CmdArgs.push_back(Input.getFilename());
+ else
+ Input.getInputArg().renderAsInput(Args, CmdArgs);
+ }
+
// Finally add the compile command to the compilation.
if (Args.hasArg(options::OPT__SLASH_fallback) &&
Output.getType() == types::TY_Object &&
@@ -5340,7 +5489,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
if (Arg *A = Args.getLastArg(options::OPT_pg))
- if (!shouldUseFramePointer(Args, Triple))
+ if (FPKeepKind == FramePointerKind::None)
D.Diag(diag::err_drv_argument_not_allowed_with) << "-fomit-frame-pointer"
<< A->getAsString(Args);
@@ -5552,7 +5701,7 @@ static EHFlags parseClangCLEHFlags(const Driver &D, const ArgList &Args) {
// The default is that /GX is not specified.
if (EHArgs.empty() &&
Args.hasFlag(options::OPT__SLASH_GX, options::OPT__SLASH_GX_,
- /*default=*/false)) {
+ /*Default=*/false)) {
EH.Synch = true;
EH.NoUnwindC = true;
}
@@ -5617,18 +5766,17 @@ void Clang::AddClangCLArgs(const ArgList &Args, types::ID InputType,
CmdArgs.push_back("--dependent-lib=oldnames");
}
- if (Arg *A = Args.getLastArg(options::OPT_show_includes))
- A->render(Args, CmdArgs);
+ Args.AddLastArg(CmdArgs, options::OPT_show_includes);
// This controls whether or not we emit RTTI data for polymorphic types.
if (Args.hasFlag(options::OPT__SLASH_GR_, options::OPT__SLASH_GR,
- /*default=*/false))
+ /*Default=*/false))
CmdArgs.push_back("-fno-rtti-data");
// This controls whether or not we emit stack-protector instrumentation.
// In MSVC, Buffer Security Check (/GS) is on by default.
if (Args.hasFlag(options::OPT__SLASH_GS, options::OPT__SLASH_GS_,
- /*default=*/true)) {
+ /*Default=*/true)) {
CmdArgs.push_back("-stack-protector");
CmdArgs.push_back(Args.MakeArgString(Twine(LangOptions::SSPStrong)));
}
@@ -5747,8 +5895,7 @@ void Clang::AddClangCLArgs(const ArgList &Args, types::ID InputType,
CmdArgs.push_back(DCCFlag);
}
- if (Arg *A = Args.getLastArg(options::OPT_vtordisp_mode_EQ))
- A->render(Args, CmdArgs);
+ Args.AddLastArg(CmdArgs, options::OPT_vtordisp_mode_EQ);
if (!Args.hasArg(options::OPT_fdiagnostics_format_EQ)) {
CmdArgs.push_back("-fdiagnostics-format");
@@ -5845,6 +5992,15 @@ void ClangAs::AddX86TargetArgs(const ArgList &Args,
}
}
+void ClangAs::AddRISCVTargetArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ const llvm::Triple &Triple = getToolChain().getTriple();
+ StringRef ABIName = riscv::getRISCVABI(Args, Triple);
+
+ CmdArgs.push_back("-target-abi");
+ CmdArgs.push_back(ABIName.data());
+}
+
void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output, const InputInfoList &Inputs,
const ArgList &Args,
@@ -5932,7 +6088,7 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
DebugInfoKind = (WantDebug ? codegenoptions::LimitedDebugInfo
: codegenoptions::NoDebugInfo);
// Add the -fdebug-compilation-dir flag if needed.
- addDebugCompDirArg(Args, CmdArgs);
+ addDebugCompDirArg(Args, CmdArgs, C.getDriver().getVFS());
addDebugPrefixMapArg(getToolChain().getDriver(), Args, CmdArgs);
@@ -6014,6 +6170,11 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-arm-add-build-attributes");
}
break;
+
+ case llvm::Triple::riscv32:
+ case llvm::Triple::riscv64:
+ AddRISCVTargetArgs(Args, CmdArgs);
+ break;
}
// Consume all the warning flags. Usually this would be handled more
@@ -6034,10 +6195,10 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
const llvm::Triple &T = getToolChain().getTriple();
Arg *A;
- if ((getDebugFissionKind(D, Args, A) == DwarfFissionKind::Split) &&
- (T.isOSLinux() || T.isOSFuchsia())) {
- CmdArgs.push_back("-split-dwarf-file");
- CmdArgs.push_back(SplitDebugName(Args, Output));
+ if (getDebugFissionKind(D, Args, A) == DwarfFissionKind::Split &&
+ T.isOSBinFormatELF()) {
+ CmdArgs.push_back("-split-dwarf-output");
+ CmdArgs.push_back(SplitDebugName(Args, Input, Output));
}
assert(Input.isFilename() && "Invalid input.");
diff --git a/lib/Driver/ToolChains/Clang.h b/lib/Driver/ToolChains/Clang.h
index df67fb2cb331..fc4f5ecdd28a 100644
--- a/lib/Driver/ToolChains/Clang.h
+++ b/lib/Driver/ToolChains/Clang.h
@@ -1,9 +1,8 @@
//===--- Clang.h - Clang Tool and ToolChain Implementations ====-*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -120,6 +119,8 @@ public:
llvm::opt::ArgStringList &CmdArgs) const;
void AddX86TargetArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const;
+ void AddRISCVTargetArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const;
bool hasGoodDiagnostics() const override { return true; }
bool hasIntegratedAssembler() const override { return false; }
bool hasIntegratedCPP() const override { return false; }
diff --git a/lib/Driver/ToolChains/CloudABI.cpp b/lib/Driver/ToolChains/CloudABI.cpp
index 80f9fc493fd8..cc1ac3ab7e79 100644
--- a/lib/Driver/ToolChains/CloudABI.cpp
+++ b/lib/Driver/ToolChains/CloudABI.cpp
@@ -1,9 +1,8 @@
//===--- CloudABI.cpp - CloudABI ToolChain Implementations ------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Driver/ToolChains/CloudABI.h b/lib/Driver/ToolChains/CloudABI.h
index 7464c5954555..cc381c2b1e1f 100644
--- a/lib/Driver/ToolChains/CloudABI.h
+++ b/lib/Driver/ToolChains/CloudABI.h
@@ -1,9 +1,8 @@
//===--- CloudABI.h - CloudABI ToolChain Implementations --------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Driver/ToolChains/CommonArgs.cpp b/lib/Driver/ToolChains/CommonArgs.cpp
index d7e316befa61..99691cb43dc4 100644
--- a/lib/Driver/ToolChains/CommonArgs.cpp
+++ b/lib/Driver/ToolChains/CommonArgs.cpp
@@ -1,9 +1,8 @@
//===--- CommonArgs.cpp - Args handling for multiple toolchains -*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -444,6 +443,35 @@ void tools::AddGoldPlugin(const ToolChain &ToolChain, const ArgList &Args,
Args.MakeArgString(Twine("-plugin-opt=sample-profile=") + FName));
}
+ auto *CSPGOGenerateArg = Args.getLastArg(options::OPT_fcs_profile_generate,
+ options::OPT_fcs_profile_generate_EQ,
+ options::OPT_fno_profile_generate);
+ if (CSPGOGenerateArg &&
+ CSPGOGenerateArg->getOption().matches(options::OPT_fno_profile_generate))
+ CSPGOGenerateArg = nullptr;
+
+ auto *ProfileUseArg = getLastProfileUseArg(Args);
+
+ if (CSPGOGenerateArg) {
+ CmdArgs.push_back(Args.MakeArgString("-plugin-opt=cs-profile-generate"));
+ if (CSPGOGenerateArg->getOption().matches(
+ options::OPT_fcs_profile_generate_EQ)) {
+ SmallString<128> Path(CSPGOGenerateArg->getValue());
+ llvm::sys::path::append(Path, "default_%m.profraw");
+ CmdArgs.push_back(
+ Args.MakeArgString(Twine("-plugin-opt=cs-profile-path=") + Path));
+ } else
+ CmdArgs.push_back(
+ Args.MakeArgString("-plugin-opt=cs-profile-path=default_%m.profraw"));
+ } else if (ProfileUseArg) {
+ SmallString<128> Path(
+ ProfileUseArg->getNumValues() == 0 ? "" : ProfileUseArg->getValue());
+ if (Path.empty() || llvm::sys::fs::is_directory(Path))
+ llvm::sys::path::append(Path, "default.profdata");
+ CmdArgs.push_back(Args.MakeArgString(Twine("-plugin-opt=cs-profile-path=") +
+ Path));
+ }
+
// Need this flag to turn on new pass manager via Gold plugin.
if (Args.hasFlag(options::OPT_fexperimental_new_pass_manager,
options::OPT_fno_experimental_new_pass_manager,
@@ -511,7 +539,8 @@ static void addSanitizerRuntime(const ToolChain &TC, const ArgList &Args,
// Wrap any static runtimes that must be forced into executable in
// whole-archive.
if (IsWhole) CmdArgs.push_back("--whole-archive");
- CmdArgs.push_back(TC.getCompilerRTArgString(Args, Sanitizer, IsShared));
+ CmdArgs.push_back(TC.getCompilerRTArgString(
+ Args, Sanitizer, IsShared ? ToolChain::FT_Shared : ToolChain::FT_Static));
if (IsWhole) CmdArgs.push_back("--no-whole-archive");
if (IsShared) {
@@ -541,40 +570,6 @@ static bool addSanitizerDynamicList(const ToolChain &TC, const ArgList &Args,
return false;
}
-static void addSanitizerLibPath(const ToolChain &TC, const ArgList &Args,
- ArgStringList &CmdArgs, StringRef Name) {
- for (const auto &LibPath : TC.getLibraryPaths()) {
- if (!LibPath.empty()) {
- SmallString<128> P(LibPath);
- llvm::sys::path::append(P, Name);
- if (TC.getVFS().exists(P))
- CmdArgs.push_back(Args.MakeArgString(StringRef("-L") + P));
- }
- }
-}
-
-void tools::addSanitizerPathLibArgs(const ToolChain &TC, const ArgList &Args,
- ArgStringList &CmdArgs) {
- const SanitizerArgs &SanArgs = TC.getSanitizerArgs();
- if (SanArgs.needsAsanRt()) {
- addSanitizerLibPath(TC, Args, CmdArgs, "asan");
- }
- if (SanArgs.needsHwasanRt()) {
- addSanitizerLibPath(TC, Args, CmdArgs, "hwasan");
- }
- if (SanArgs.needsLsanRt()) {
- addSanitizerLibPath(TC, Args, CmdArgs, "lsan");
- }
- if (SanArgs.needsMsanRt()) {
- addSanitizerLibPath(TC, Args, CmdArgs, "msan");
- }
- if (SanArgs.needsTsanRt()) {
- addSanitizerLibPath(TC, Args, CmdArgs, "tsan");
- }
-}
-
-
-
void tools::linkSanitizerRuntimeDeps(const ToolChain &TC,
ArgStringList &CmdArgs) {
// Force linking against the system libraries sanitizers depends on
@@ -689,8 +684,6 @@ collectSanitizerRuntimes(const ToolChain &TC, const ArgList &Args,
NonWholeStaticRuntimes.push_back("stats");
RequiredSymbols.push_back("__sanitizer_stats_register");
}
- if (SanArgs.needsEsanRt())
- StaticRuntimes.push_back("esan");
if (SanArgs.needsScudoRt()) {
if (SanArgs.requiresMinimalRuntime()) {
StaticRuntimes.push_back("scudo_minimal");
@@ -758,9 +751,9 @@ bool tools::addXRayRuntime(const ToolChain&TC, const ArgList &Args, ArgStringLis
if (TC.getXRayArgs().needsXRayRt()) {
CmdArgs.push_back("-whole-archive");
- CmdArgs.push_back(TC.getCompilerRTArgString(Args, "xray", false));
+ CmdArgs.push_back(TC.getCompilerRTArgString(Args, "xray"));
for (const auto &Mode : TC.getXRayArgs().modeList())
- CmdArgs.push_back(TC.getCompilerRTArgString(Args, Mode, false));
+ CmdArgs.push_back(TC.getCompilerRTArgString(Args, Mode));
CmdArgs.push_back("-no-whole-archive");
return true;
}
@@ -789,18 +782,26 @@ bool tools::areOptimizationsEnabled(const ArgList &Args) {
return false;
}
-const char *tools::SplitDebugName(const ArgList &Args,
+const char *tools::SplitDebugName(const ArgList &Args, const InputInfo &Input,
const InputInfo &Output) {
- SmallString<128> F(Output.isFilename()
- ? Output.getFilename()
- : llvm::sys::path::stem(Output.getBaseInput()));
-
if (Arg *A = Args.getLastArg(options::OPT_gsplit_dwarf_EQ))
if (StringRef(A->getValue()) == "single")
- return Args.MakeArgString(F);
+ return Args.MakeArgString(Output.getFilename());
- llvm::sys::path::replace_extension(F, "dwo");
- return Args.MakeArgString(F);
+ Arg *FinalOutput = Args.getLastArg(options::OPT_o);
+ if (FinalOutput && Args.hasArg(options::OPT_c)) {
+ SmallString<128> T(FinalOutput->getValue());
+ llvm::sys::path::replace_extension(T, "dwo");
+ return Args.MakeArgString(T);
+ } else {
+ // Use the compilation dir.
+ SmallString<128> T(
+ Args.getLastArgValue(options::OPT_fdebug_compilation_dir));
+ SmallString<128> F(llvm::sys::path::stem(Input.getBaseInput()));
+ llvm::sys::path::replace_extension(F, "dwo");
+ T += F;
+ return Args.MakeArgString(F);
+ }
}
void tools::SplitDebugInfo(const ToolChain &TC, Compilation &C, const Tool &T,
@@ -1132,46 +1133,72 @@ bool tools::isObjCAutoRefCount(const ArgList &Args) {
return Args.hasFlag(options::OPT_fobjc_arc, options::OPT_fno_objc_arc, false);
}
-static void AddLibgcc(const llvm::Triple &Triple, const Driver &D,
- ArgStringList &CmdArgs, const ArgList &Args) {
- bool isAndroid = Triple.isAndroid();
- bool isCygMing = Triple.isOSCygMing();
- bool IsIAMCU = Triple.isOSIAMCU();
- bool StaticLibgcc = Args.hasArg(options::OPT_static_libgcc) ||
- Args.hasArg(options::OPT_static);
-
- bool SharedLibgcc = Args.hasArg(options::OPT_shared_libgcc);
- bool UnspecifiedLibgcc = !StaticLibgcc && !SharedLibgcc;
+enum class LibGccType { UnspecifiedLibGcc, StaticLibGcc, SharedLibGcc };
- // Gcc adds libgcc arguments in various ways:
- //
- // gcc <none>: -lgcc --as-needed -lgcc_s --no-as-needed
- // g++ <none>: -lgcc_s -lgcc
- // gcc shared: -lgcc_s -lgcc
- // g++ shared: -lgcc_s -lgcc
- // gcc static: -lgcc -lgcc_eh
- // g++ static: -lgcc -lgcc_eh
- //
- // Also, certain targets need additional adjustments.
+static LibGccType getLibGccType(const Driver &D, const ArgList &Args) {
+ if (Args.hasArg(options::OPT_static_libgcc) ||
+ Args.hasArg(options::OPT_static) || Args.hasArg(options::OPT_static_pie))
+ return LibGccType::StaticLibGcc;
+ if (Args.hasArg(options::OPT_shared_libgcc) || D.CCCIsCXX())
+ return LibGccType::SharedLibGcc;
+ return LibGccType::UnspecifiedLibGcc;
+}
- bool LibGccFirst = (D.CCCIsCC() && UnspecifiedLibgcc) || StaticLibgcc;
- if (LibGccFirst)
- CmdArgs.push_back("-lgcc");
+// Gcc adds libgcc arguments in various ways:
+//
+// gcc <none>: -lgcc --as-needed -lgcc_s --no-as-needed
+// g++ <none>: -lgcc_s -lgcc
+// gcc shared: -lgcc_s -lgcc
+// g++ shared: -lgcc_s -lgcc
+// gcc static: -lgcc -lgcc_eh
+// g++ static: -lgcc -lgcc_eh
+// gcc static-pie: -lgcc -lgcc_eh
+// g++ static-pie: -lgcc -lgcc_eh
+//
+// Also, certain targets need additional adjustments.
+
+static void AddUnwindLibrary(const ToolChain &TC, const Driver &D,
+ ArgStringList &CmdArgs, const ArgList &Args) {
+ ToolChain::UnwindLibType UNW = TC.GetUnwindLibType(Args);
+ // Targets that don't use unwind libraries.
+ if (TC.getTriple().isAndroid() || TC.getTriple().isOSIAMCU() ||
+ TC.getTriple().isOSBinFormatWasm() ||
+ UNW == ToolChain::UNW_None)
+ return;
- bool AsNeeded = D.CCCIsCC() && UnspecifiedLibgcc && !isAndroid && !isCygMing;
+ LibGccType LGT = getLibGccType(D, Args);
+ bool AsNeeded = LGT == LibGccType::UnspecifiedLibGcc &&
+ !TC.getTriple().isAndroid() && !TC.getTriple().isOSCygMing();
if (AsNeeded)
CmdArgs.push_back("--as-needed");
- if ((UnspecifiedLibgcc || SharedLibgcc) && !isAndroid)
- CmdArgs.push_back("-lgcc_s");
-
- else if (StaticLibgcc && !isAndroid && !IsIAMCU)
- CmdArgs.push_back("-lgcc_eh");
+ switch (UNW) {
+ case ToolChain::UNW_None:
+ return;
+ case ToolChain::UNW_Libgcc: {
+ LibGccType LGT = getLibGccType(D, Args);
+ if (LGT == LibGccType::StaticLibGcc)
+ CmdArgs.push_back("-lgcc_eh");
+ else
+ CmdArgs.push_back("-lgcc_s");
+ break;
+ }
+ case ToolChain::UNW_CompilerRT:
+ CmdArgs.push_back("-lunwind");
+ break;
+ }
if (AsNeeded)
CmdArgs.push_back("--no-as-needed");
+}
- if (!LibGccFirst)
+static void AddLibgcc(const ToolChain &TC, const Driver &D,
+ ArgStringList &CmdArgs, const ArgList &Args) {
+ LibGccType LGT = getLibGccType(D, Args);
+ if (LGT != LibGccType::SharedLibGcc)
+ CmdArgs.push_back("-lgcc");
+ AddUnwindLibrary(TC, D, CmdArgs, Args);
+ if (LGT == LibGccType::SharedLibGcc)
CmdArgs.push_back("-lgcc");
// According to Android ABI, we have to link with libdl if we are
@@ -1179,7 +1206,7 @@ static void AddLibgcc(const llvm::Triple &Triple, const Driver &D,
//
// NOTE: This fixes a link error on Android MIPS as well. The non-static
// libgcc for MIPS relies on _Unwind_Find_FDE and dl_iterate_phdr from libdl.
- if (isAndroid && !StaticLibgcc)
+ if (TC.getTriple().isAndroid() && LGT != LibGccType::StaticLibGcc)
CmdArgs.push_back("-ldl");
}
@@ -1191,6 +1218,7 @@ void tools::AddRunTimeLibs(const ToolChain &TC, const Driver &D,
switch (RLT) {
case ToolChain::RLT_CompilerRT:
CmdArgs.push_back(TC.getCompilerRTArgString(Args, "builtins"));
+ AddUnwindLibrary(TC, D, CmdArgs, Args);
break;
case ToolChain::RLT_Libgcc:
// Make sure libgcc is not used under MSVC environment by default
@@ -1202,7 +1230,7 @@ void tools::AddRunTimeLibs(const ToolChain &TC, const Driver &D,
<< Args.getLastArg(options::OPT_rtlib_EQ)->getValue() << "MSVC";
}
} else
- AddLibgcc(TC.getTriple(), D, CmdArgs, Args);
+ AddLibgcc(TC, D, CmdArgs, Args);
break;
}
}
@@ -1463,3 +1491,8 @@ SmallString<128> tools::getStatsFileName(const llvm::opt::ArgList &Args,
llvm::sys::path::replace_extension(StatsFile, "stats");
return StatsFile;
}
+
+void tools::addMultilibFlag(bool Enabled, const char *const Flag,
+ Multilib::flags_list &Flags) {
+ Flags.push_back(std::string(Enabled ? "+" : "-") + Flag);
+}
diff --git a/lib/Driver/ToolChains/CommonArgs.h b/lib/Driver/ToolChains/CommonArgs.h
index 3704b2e01b54..9a311708f3ae 100644
--- a/lib/Driver/ToolChains/CommonArgs.h
+++ b/lib/Driver/ToolChains/CommonArgs.h
@@ -1,9 +1,8 @@
//===--- CommonArgs.h - Args handling for multiple toolchains ---*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -12,6 +11,7 @@
#include "InputInfo.h"
#include "clang/Driver/Driver.h"
+#include "clang/Driver/Multilib.h"
#include "clang/Driver/Tool.h"
#include "clang/Driver/ToolChain.h"
#include "llvm/Support/CodeGen.h"
@@ -32,10 +32,6 @@ void claimNoWarnArgs(const llvm::opt::ArgList &Args);
bool addSanitizerRuntimes(const ToolChain &TC, const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs);
-void addSanitizerPathLibArgs(const ToolChain &TC,
- const llvm::opt::ArgList &Args,
- llvm::opt::ArgStringList &CmdArgs);
-
void linkSanitizerRuntimeDeps(const ToolChain &TC,
llvm::opt::ArgStringList &CmdArgs);
@@ -63,7 +59,7 @@ void AddHIPLinkerScript(const ToolChain &TC, Compilation &C,
const Tool &T);
const char *SplitDebugName(const llvm::opt::ArgList &Args,
- const InputInfo &Output);
+ const InputInfo &Input, const InputInfo &Output);
void SplitDebugInfo(const ToolChain &TC, Compilation &C, const Tool &T,
const JobAction &JA, const llvm::opt::ArgList &Args,
@@ -122,6 +118,12 @@ void handleTargetFeaturesGroup(const llvm::opt::ArgList &Args,
SmallString<128> getStatsFileName(const llvm::opt::ArgList &Args,
const InputInfo &Output,
const InputInfo &Input, const Driver &D);
+
+/// \p Flag must be a flag accepted by the driver with its leading '-' removed,
+// otherwise '-print-multi-lib' will not emit them correctly.
+void addMultilibFlag(bool Enabled, const char *const Flag,
+ Multilib::flags_list &Flags);
+
} // end namespace tools
} // end namespace driver
} // end namespace clang
diff --git a/lib/Driver/ToolChains/Contiki.cpp b/lib/Driver/ToolChains/Contiki.cpp
index 7f74bfcb4b01..5dda1b1b09fb 100644
--- a/lib/Driver/ToolChains/Contiki.cpp
+++ b/lib/Driver/ToolChains/Contiki.cpp
@@ -1,9 +1,8 @@
//===--- Contiki.cpp - Contiki ToolChain Implementations --------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Driver/ToolChains/Contiki.h b/lib/Driver/ToolChains/Contiki.h
index 86d59ac92b16..627d80bdda09 100644
--- a/lib/Driver/ToolChains/Contiki.h
+++ b/lib/Driver/ToolChains/Contiki.h
@@ -1,9 +1,8 @@
//===--- Contiki.h - Contiki ToolChain Implementations ----------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Driver/ToolChains/CrossWindows.cpp b/lib/Driver/ToolChains/CrossWindows.cpp
index 795356026fbe..bd3a6e11c928 100644
--- a/lib/Driver/ToolChains/CrossWindows.cpp
+++ b/lib/Driver/ToolChains/CrossWindows.cpp
@@ -1,9 +1,8 @@
-//===--- CrossWindowsToolChain.cpp - Cross Windows Tool Chain -------------===//
+//===-- CrossWindows.cpp - Cross Windows Tool Chain -----------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -278,6 +277,8 @@ AddCXXStdlibLibArgs(const llvm::opt::ArgList &DriverArgs,
clang::SanitizerMask CrossWindowsToolChain::getSupportedSanitizers() const {
SanitizerMask Res = ToolChain::getSupportedSanitizers();
Res |= SanitizerKind::Address;
+ Res |= SanitizerKind::PointerCompare;
+ Res |= SanitizerKind::PointerSubtract;
return Res;
}
diff --git a/lib/Driver/ToolChains/CrossWindows.h b/lib/Driver/ToolChains/CrossWindows.h
index 2f66446ec732..7267a35d48b9 100644
--- a/lib/Driver/ToolChains/CrossWindows.h
+++ b/lib/Driver/ToolChains/CrossWindows.h
@@ -1,9 +1,8 @@
//===--- CrossWindows.h - CrossWindows ToolChain Implementation -*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Driver/ToolChains/Cuda.cpp b/lib/Driver/ToolChains/Cuda.cpp
index 57b8d4340e3b..96f8c513bb56 100644
--- a/lib/Driver/ToolChains/Cuda.cpp
+++ b/lib/Driver/ToolChains/Cuda.cpp
@@ -1,9 +1,8 @@
//===--- Cuda.cpp - Cuda Tool and ToolChain Implementations -----*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -61,6 +60,8 @@ static CudaVersion ParseCudaVersionFile(llvm::StringRef V) {
return CudaVersion::CUDA_92;
if (Major == 10 && Minor == 0)
return CudaVersion::CUDA_100;
+ if (Major == 10 && Minor == 1)
+ return CudaVersion::CUDA_101;
return CudaVersion::UNKNOWN;
}
@@ -453,7 +454,8 @@ void NVPTX::Linker::ConstructJob(Compilation &C, const JobAction &JA,
assert(TC.getTriple().isNVPTX() && "Wrong platform");
ArgStringList CmdArgs;
- CmdArgs.push_back("--cuda");
+ if (TC.CudaInstallation.version() <= CudaVersion::CUDA_100)
+ CmdArgs.push_back("--cuda");
CmdArgs.push_back(TC.getTriple().isArch64Bit() ? "-64" : "-32");
CmdArgs.push_back(Args.MakeArgString("--create"));
CmdArgs.push_back(Args.MakeArgString(Output.getFilename()));
@@ -643,28 +645,41 @@ void CudaToolChain::addClangTargetOptions(
CC1Args.push_back("-mlink-builtin-bitcode");
CC1Args.push_back(DriverArgs.MakeArgString(LibDeviceFile));
- // Libdevice in CUDA-7.0 requires PTX version that's more recent than LLVM
- // defaults to. Use PTX4.2 by default, which is the PTX version that came with
- // CUDA-7.0.
- const char *PtxFeature = "+ptx42";
- // TODO(tra): CUDA-10+ needs PTX 6.3 to support new features. However that
- // requires fair amount of work on LLVM side. We'll keep using PTX 6.1 until
- // all prerequisites are in place.
- if (CudaInstallation.version() >= CudaVersion::CUDA_91) {
- // CUDA-9.1 uses new instructions that are only available in PTX6.1+
- PtxFeature = "+ptx61";
- } else if (CudaInstallation.version() >= CudaVersion::CUDA_90) {
- // CUDA-9.0 uses new instructions that are only available in PTX6.0+
- PtxFeature = "+ptx60";
+ // New CUDA versions often introduce new instructions that are only supported
+ // by new PTX version, so we need to raise PTX level to enable them in NVPTX
+ // back-end.
+ const char *PtxFeature = nullptr;
+ switch(CudaInstallation.version()) {
+ case CudaVersion::CUDA_101:
+ PtxFeature = "+ptx64";
+ break;
+ case CudaVersion::CUDA_100:
+ PtxFeature = "+ptx63";
+ break;
+ case CudaVersion::CUDA_92:
+ PtxFeature = "+ptx61";
+ break;
+ case CudaVersion::CUDA_91:
+ PtxFeature = "+ptx61";
+ break;
+ case CudaVersion::CUDA_90:
+ PtxFeature = "+ptx60";
+ break;
+ default:
+ PtxFeature = "+ptx42";
}
CC1Args.append({"-target-feature", PtxFeature});
if (DriverArgs.hasFlag(options::OPT_fcuda_short_ptr,
options::OPT_fno_cuda_short_ptr, false))
CC1Args.append({"-mllvm", "--nvptx-short-ptr"});
+ if (CudaInstallation.version() >= CudaVersion::UNKNOWN)
+ CC1Args.push_back(DriverArgs.MakeArgString(
+ Twine("-target-sdk-version=") +
+ CudaVersionToString(CudaInstallation.version())));
+
if (DeviceOffloadingKind == Action::OFK_OpenMP) {
SmallVector<StringRef, 8> LibraryPaths;
-
if (const Arg *A = DriverArgs.getLastArg(options::OPT_libomptarget_nvptx_path_EQ))
LibraryPaths.push_back(A->getValue());
diff --git a/lib/Driver/ToolChains/Cuda.h b/lib/Driver/ToolChains/Cuda.h
index 1d63ede41155..4ee8b6f1fea9 100644
--- a/lib/Driver/ToolChains/Cuda.h
+++ b/lib/Driver/ToolChains/Cuda.h
@@ -1,9 +1,8 @@
//===--- Cuda.h - Cuda ToolChain Implementations ----------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Driver/ToolChains/Darwin.cpp b/lib/Driver/ToolChains/Darwin.cpp
index c395c9a4430e..5de7d7132df8 100644
--- a/lib/Driver/ToolChains/Darwin.cpp
+++ b/lib/Driver/ToolChains/Darwin.cpp
@@ -1,9 +1,8 @@
//===--- Darwin.cpp - Darwin Tool and ToolChain Implementations -*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -12,6 +11,7 @@
#include "CommonArgs.h"
#include "clang/Basic/AlignedAllocation.h"
#include "clang/Basic/ObjCRuntime.h"
+#include "clang/Config/config.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/DriverDiagnostic.h"
@@ -462,6 +462,7 @@ void darwin::Linker::ConstructJob(Compilation &C, const JobAction &JA,
// For LTO, pass the name of the optimization record file and other
// opt-remarks flags.
if (Args.hasFlag(options::OPT_fsave_optimization_record,
+ options::OPT_fsave_optimization_record_EQ,
options::OPT_fno_save_optimization_record, false)) {
CmdArgs.push_back("-mllvm");
CmdArgs.push_back("-lto-pass-remarks-output");
@@ -469,7 +470,13 @@ void darwin::Linker::ConstructJob(Compilation &C, const JobAction &JA,
SmallString<128> F;
F = Output.getFilename();
- F += ".opt.yaml";
+ F += ".opt.";
+ if (const Arg *A =
+ Args.getLastArg(options::OPT_fsave_optimization_record_EQ))
+ F += A->getValue();
+ else
+ F += "yaml";
+
CmdArgs.push_back(Args.MakeArgString(F));
if (getLastProfileUseArg(Args)) {
@@ -484,20 +491,53 @@ void darwin::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Args.MakeArgString(Opt));
}
}
+
+ if (const Arg *A =
+ Args.getLastArg(options::OPT_foptimization_record_passes_EQ)) {
+ CmdArgs.push_back("-mllvm");
+ std::string Passes =
+ std::string("-lto-pass-remarks-filter=") + A->getValue();
+ CmdArgs.push_back(Args.MakeArgString(Passes));
+ }
+
+ if (const Arg *A =
+ Args.getLastArg(options::OPT_fsave_optimization_record_EQ)) {
+ CmdArgs.push_back("-mllvm");
+ std::string Format =
+ std::string("-lto-pass-remarks-format=") + A->getValue();
+ CmdArgs.push_back(Args.MakeArgString(Format));
+ }
}
// Propagate the -moutline flag to the linker in LTO.
- if (Args.hasFlag(options::OPT_moutline, options::OPT_mno_outline, false)) {
- if (getMachOToolChain().getMachOArchName(Args) == "arm64") {
- CmdArgs.push_back("-mllvm");
- CmdArgs.push_back("-enable-machine-outliner");
+ if (Arg *A =
+ Args.getLastArg(options::OPT_moutline, options::OPT_mno_outline)) {
+ if (A->getOption().matches(options::OPT_moutline)) {
+ if (getMachOToolChain().getMachOArchName(Args) == "arm64") {
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back("-enable-machine-outliner");
- // Outline from linkonceodr functions by default in LTO.
+ // Outline from linkonceodr functions by default in LTO.
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back("-enable-linkonceodr-outlining");
+ }
+ } else {
+ // Disable all outlining behaviour if we have mno-outline. We need to do
+ // this explicitly, because targets which support default outlining will
+ // try to do work if we don't.
CmdArgs.push_back("-mllvm");
- CmdArgs.push_back("-enable-linkonceodr-outlining");
+ CmdArgs.push_back("-enable-machine-outliner=never");
}
}
+ // Setup statistics file output.
+ SmallString<128> StatsFile =
+ getStatsFileName(Args, Output, Inputs[0], getToolChain().getDriver());
+ if (!StatsFile.empty()) {
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back(Args.MakeArgString("-lto-stats-file=" + StatsFile.str()));
+ }
+
// It seems that the 'e' option is completely ignored for dynamic executables
// (the default), and with static executables, the last one wins, as expected.
Args.AddAllArgs(CmdArgs, {options::OPT_d_Flag, options::OPT_s, options::OPT_t,
@@ -569,15 +609,26 @@ void darwin::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (getToolChain().ShouldLinkCXXStdlib(Args))
getToolChain().AddCXXStdlibLibArgs(Args, CmdArgs);
- if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
+
+ bool NoStdOrDefaultLibs =
+ Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs);
+ bool ForceLinkBuiltins = Args.hasArg(options::OPT_fapple_link_rtlib);
+ if (!NoStdOrDefaultLibs || ForceLinkBuiltins) {
// link_ssp spec is empty.
- // Let the tool chain choose which runtime library to link.
- getMachOToolChain().AddLinkRuntimeLibArgs(Args, CmdArgs);
+ // If we have both -nostdlib/nodefaultlibs and -fapple-link-rtlib then
+ // we just want to link the builtins, not the other libs like libSystem.
+ if (NoStdOrDefaultLibs && ForceLinkBuiltins) {
+ getMachOToolChain().AddLinkRuntimeLib(Args, CmdArgs, "builtins");
+ } else {
+ // Let the tool chain choose which runtime library to link.
+ getMachOToolChain().AddLinkRuntimeLibArgs(Args, CmdArgs,
+ ForceLinkBuiltins);
- // No need to do anything for pthreads. Claim argument to avoid warning.
- Args.ClaimAllArgs(options::OPT_pthread);
- Args.ClaimAllArgs(options::OPT_pthreads);
+ // No need to do anything for pthreads. Claim argument to avoid warning.
+ Args.ClaimAllArgs(options::OPT_pthread);
+ Args.ClaimAllArgs(options::OPT_pthreads);
+ }
}
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
@@ -869,6 +920,18 @@ void DarwinClang::addClangWarningOptions(ArgStringList &CC1Args) const {
}
}
+/// Take a path that speculatively points into Xcode and return the
+/// `XCODE/Contents/Developer` path if it is an Xcode path, or an empty path
+/// otherwise.
+static StringRef getXcodeDeveloperPath(StringRef PathIntoXcode) {
+ static constexpr llvm::StringLiteral XcodeAppSuffix(
+ ".app/Contents/Developer");
+ size_t Index = PathIntoXcode.find(XcodeAppSuffix);
+ if (Index == StringRef::npos)
+ return "";
+ return PathIntoXcode.take_front(Index + XcodeAppSuffix.size());
+}
+
void DarwinClang::AddLinkARCArgs(const ArgList &Args,
ArgStringList &CmdArgs) const {
// Avoid linking compatibility stubs on i386 mac.
@@ -881,10 +944,27 @@ void DarwinClang::AddLinkARCArgs(const ArgList &Args,
runtime.hasSubscripting())
return;
- CmdArgs.push_back("-force_load");
SmallString<128> P(getDriver().ClangExecutable);
llvm::sys::path::remove_filename(P); // 'clang'
llvm::sys::path::remove_filename(P); // 'bin'
+
+ // 'libarclite' usually lives in the same toolchain as 'clang'. However, the
+ // Swift open source toolchains for macOS distribute Clang without libarclite.
+ // In that case, to allow the linker to find 'libarclite', we point to the
+ // 'libarclite' in the XcodeDefault toolchain instead.
+ if (getXcodeDeveloperPath(P).empty()) {
+ if (const Arg *A = Args.getLastArg(options::OPT_isysroot)) {
+ // Try to infer the path to 'libarclite' in the toolchain from the
+ // specified SDK path.
+ StringRef XcodePathForSDK = getXcodeDeveloperPath(A->getValue());
+ if (!XcodePathForSDK.empty()) {
+ P = XcodePathForSDK;
+ llvm::sys::path::append(P, "Toolchains/XcodeDefault.xctoolchain/usr");
+ }
+ }
+ }
+
+ CmdArgs.push_back("-force_load");
llvm::sys::path::append(P, "lib", "arc", "libarclite_");
// Mash in the platform.
if (isTargetWatchOSSimulator())
@@ -1049,7 +1129,6 @@ void Darwin::addProfileRTLibs(const ArgList &Args,
addExportedSymbol(CmdArgs, "___llvm_profile_filename");
addExportedSymbol(CmdArgs, "___llvm_profile_raw_version");
addExportedSymbol(CmdArgs, "_lprofCurFilename");
- addExportedSymbol(CmdArgs, "_lprofMergeValueProfData");
}
addExportedSymbol(CmdArgs, "_lprofDirMode");
}
@@ -1076,7 +1155,8 @@ ToolChain::RuntimeLibType DarwinClang::GetRuntimeLibType(
}
void DarwinClang::AddLinkRuntimeLibArgs(const ArgList &Args,
- ArgStringList &CmdArgs) const {
+ ArgStringList &CmdArgs,
+ bool ForceLinkBuiltinRT) const {
// Call once to ensure diagnostic is printed if wrong value was specified
GetRuntimeLibType(Args);
@@ -1084,8 +1164,11 @@ void DarwinClang::AddLinkRuntimeLibArgs(const ArgList &Args,
// libraries with -static.
if (Args.hasArg(options::OPT_static) ||
Args.hasArg(options::OPT_fapple_kext) ||
- Args.hasArg(options::OPT_mkernel))
+ Args.hasArg(options::OPT_mkernel)) {
+ if (ForceLinkBuiltinRT)
+ AddLinkRuntimeLib(Args, CmdArgs, "builtins");
return;
+ }
// Reject -static-libgcc for now, we can deal with this when and if someone
// cares. This is useful in situations where someone wants to statically link
@@ -1117,8 +1200,6 @@ void DarwinClang::AddLinkRuntimeLibArgs(const ArgList &Args,
AddLinkRuntimeLib(Args, CmdArgs, "stats_client", RLO_AlwaysLink);
AddLinkSanitizerLibArgs(Args, CmdArgs, "stats");
}
- if (Sanitize.needsEsanRt())
- AddLinkSanitizerLibArgs(Args, CmdArgs, "esan");
const XRayArgs &XRay = getXRayArgs();
if (XRay.needsXRayRt()) {
@@ -1739,6 +1820,92 @@ void Darwin::AddDeploymentTarget(DerivedArgList &Args) const {
}
}
+// Returns the effective header sysroot path to use. This comes either from
+// -isysroot or --sysroot.
+llvm::StringRef DarwinClang::GetHeaderSysroot(const llvm::opt::ArgList &DriverArgs) const {
+ if(DriverArgs.hasArg(options::OPT_isysroot))
+ return DriverArgs.getLastArgValue(options::OPT_isysroot);
+ if (!getDriver().SysRoot.empty())
+ return getDriver().SysRoot;
+ return "/";
+}
+
+void DarwinClang::AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const {
+ const Driver &D = getDriver();
+
+ llvm::StringRef Sysroot = GetHeaderSysroot(DriverArgs);
+
+ bool NoStdInc = DriverArgs.hasArg(options::OPT_nostdinc);
+ bool NoStdlibInc = DriverArgs.hasArg(options::OPT_nostdlibinc);
+ bool NoBuiltinInc = DriverArgs.hasArg(options::OPT_nobuiltininc);
+
+ // Add <sysroot>/usr/local/include
+ if (!NoStdInc && !NoStdlibInc) {
+ SmallString<128> P(Sysroot);
+ llvm::sys::path::append(P, "usr", "local", "include");
+ addSystemInclude(DriverArgs, CC1Args, P);
+ }
+
+ // Add the Clang builtin headers (<resource>/include)
+ if (!NoStdInc && !NoBuiltinInc) {
+ SmallString<128> P(D.ResourceDir);
+ llvm::sys::path::append(P, "include");
+ addSystemInclude(DriverArgs, CC1Args, P);
+ }
+
+ if (NoStdInc || NoStdlibInc)
+ return;
+
+ // Check for configure-time C include directories.
+ llvm::StringRef CIncludeDirs(C_INCLUDE_DIRS);
+ if (!CIncludeDirs.empty()) {
+ llvm::SmallVector<llvm::StringRef, 5> dirs;
+ CIncludeDirs.split(dirs, ":");
+ for (llvm::StringRef dir : dirs) {
+ llvm::StringRef Prefix =
+ llvm::sys::path::is_absolute(dir) ? llvm::StringRef(Sysroot) : "";
+ addExternCSystemInclude(DriverArgs, CC1Args, Prefix + dir);
+ }
+ } else {
+ // Otherwise, add <sysroot>/usr/include.
+ SmallString<128> P(Sysroot);
+ llvm::sys::path::append(P, "usr", "include");
+ addExternCSystemInclude(DriverArgs, CC1Args, P.str());
+ }
+}
+
+bool DarwinClang::AddGnuCPlusPlusIncludePaths(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args,
+ llvm::SmallString<128> Base,
+ llvm::StringRef Version,
+ llvm::StringRef ArchDir,
+ llvm::StringRef BitDir) const {
+ llvm::sys::path::append(Base, Version);
+
+ // Add the base dir
+ addSystemInclude(DriverArgs, CC1Args, Base);
+
+ // Add the multilib dirs
+ {
+ llvm::SmallString<128> P = Base;
+ if (!ArchDir.empty())
+ llvm::sys::path::append(P, ArchDir);
+ if (!BitDir.empty())
+ llvm::sys::path::append(P, BitDir);
+ addSystemInclude(DriverArgs, CC1Args, P);
+ }
+
+ // Add the backward dir
+ {
+ llvm::SmallString<128> P = Base;
+ llvm::sys::path::append(P, "backward");
+ addSystemInclude(DriverArgs, CC1Args, P);
+ }
+
+ return getVFS().exists(Base);
+}
+
void DarwinClang::AddClangCXXStdlibIncludeArgs(
const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const {
@@ -1746,29 +1913,90 @@ void DarwinClang::AddClangCXXStdlibIncludeArgs(
// CC1Args.
// FIXME: this should not be necessary, remove usages in the frontend
// (e.g. HeaderSearchOptions::UseLibcxx) and don't pipe -stdlib.
+ // Also check whether this is used for setting library search paths.
ToolChain::AddClangCXXStdlibIncludeArgs(DriverArgs, CC1Args);
if (DriverArgs.hasArg(options::OPT_nostdlibinc) ||
DriverArgs.hasArg(options::OPT_nostdincxx))
return;
+ llvm::StringRef Sysroot = GetHeaderSysroot(DriverArgs);
+
switch (GetCXXStdlibType(DriverArgs)) {
case ToolChain::CST_Libcxx: {
- llvm::StringRef InstallDir = getDriver().getInstalledDir();
- if (InstallDir.empty())
- break;
- // On Darwin, libc++ may be installed alongside the compiler in
- // include/c++/v1.
- // Get from 'foo/bin' to 'foo/include/c++/v1'.
- SmallString<128> P = InstallDir;
- // Note that InstallDir can be relative, so we have to '..' and not
- // parent_path.
- llvm::sys::path::append(P, "..", "include", "c++", "v1");
- addSystemInclude(DriverArgs, CC1Args, P);
+ // On Darwin, libc++ is installed alongside the compiler in
+ // include/c++/v1, so get from '<install>/bin' to '<install>/include/c++/v1'.
+ {
+ llvm::SmallString<128> P = llvm::StringRef(getDriver().getInstalledDir());
+ // Note that P can be relative, so we have to '..' and not parent_path.
+ llvm::sys::path::append(P, "..", "include", "c++", "v1");
+ addSystemInclude(DriverArgs, CC1Args, P);
+ }
+ // Also add <sysroot>/usr/include/c++/v1 unless -nostdinc is used,
+ // to match the legacy behavior in CC1.
+ if (!DriverArgs.hasArg(options::OPT_nostdinc)) {
+ llvm::SmallString<128> P = Sysroot;
+ llvm::sys::path::append(P, "usr", "include", "c++", "v1");
+ addSystemInclude(DriverArgs, CC1Args, P);
+ }
break;
}
+
case ToolChain::CST_Libstdcxx:
- // FIXME: should we do something about it?
+ llvm::SmallString<128> UsrIncludeCxx = Sysroot;
+ llvm::sys::path::append(UsrIncludeCxx, "usr", "include", "c++");
+
+ llvm::Triple::ArchType arch = getTriple().getArch();
+ bool IsBaseFound = true;
+ switch (arch) {
+ default: break;
+
+ case llvm::Triple::ppc:
+ case llvm::Triple::ppc64:
+ IsBaseFound = AddGnuCPlusPlusIncludePaths(DriverArgs, CC1Args, UsrIncludeCxx,
+ "4.2.1",
+ "powerpc-apple-darwin10",
+ arch == llvm::Triple::ppc64 ? "ppc64" : "");
+ IsBaseFound |= AddGnuCPlusPlusIncludePaths(DriverArgs, CC1Args, UsrIncludeCxx,
+ "4.0.0", "powerpc-apple-darwin10",
+ arch == llvm::Triple::ppc64 ? "ppc64" : "");
+ break;
+
+ case llvm::Triple::x86:
+ case llvm::Triple::x86_64:
+ IsBaseFound = AddGnuCPlusPlusIncludePaths(DriverArgs, CC1Args, UsrIncludeCxx,
+ "4.2.1",
+ "i686-apple-darwin10",
+ arch == llvm::Triple::x86_64 ? "x86_64" : "");
+ IsBaseFound |= AddGnuCPlusPlusIncludePaths(DriverArgs, CC1Args, UsrIncludeCxx,
+ "4.0.0", "i686-apple-darwin8",
+ "");
+ break;
+
+ case llvm::Triple::arm:
+ case llvm::Triple::thumb:
+ IsBaseFound = AddGnuCPlusPlusIncludePaths(DriverArgs, CC1Args, UsrIncludeCxx,
+ "4.2.1",
+ "arm-apple-darwin10",
+ "v7");
+ IsBaseFound |= AddGnuCPlusPlusIncludePaths(DriverArgs, CC1Args, UsrIncludeCxx,
+ "4.2.1",
+ "arm-apple-darwin10",
+ "v6");
+ break;
+
+ case llvm::Triple::aarch64:
+ IsBaseFound = AddGnuCPlusPlusIncludePaths(DriverArgs, CC1Args, UsrIncludeCxx,
+ "4.2.1",
+ "arm64-apple-darwin10",
+ "");
+ break;
+ }
+
+ if (!IsBaseFound) {
+ getDriver().Diag(diag::warn_drv_libstdcxx_not_found);
+ }
+
break;
}
}
@@ -2056,7 +2284,8 @@ DerivedArgList *MachO::TranslateArgs(const DerivedArgList &Args,
}
void MachO::AddLinkRuntimeLibArgs(const ArgList &Args,
- ArgStringList &CmdArgs) const {
+ ArgStringList &CmdArgs,
+ bool ForceLinkBuiltinRT) const {
// Embedded targets are simple at the moment, not supporting sanitizers and
// with different libraries for each member of the product { static, PIC } x
// { hard-float, soft-float }
@@ -2290,22 +2519,27 @@ void Darwin::addStartObjectFileArgs(const ArgList &Args,
}
} else {
if (Args.hasArg(options::OPT_pg) && SupportsProfiling()) {
- if (Args.hasArg(options::OPT_static) ||
- Args.hasArg(options::OPT_object) ||
- Args.hasArg(options::OPT_preload)) {
- CmdArgs.push_back("-lgcrt0.o");
- } else {
- CmdArgs.push_back("-lgcrt1.o");
+ if (isTargetMacOS() && isMacosxVersionLT(10, 9)) {
+ if (Args.hasArg(options::OPT_static) ||
+ Args.hasArg(options::OPT_object) ||
+ Args.hasArg(options::OPT_preload)) {
+ CmdArgs.push_back("-lgcrt0.o");
+ } else {
+ CmdArgs.push_back("-lgcrt1.o");
- // darwin_crt2 spec is empty.
+ // darwin_crt2 spec is empty.
+ }
+ // By default on OS X 10.8 and later, we don't link with a crt1.o
+ // file and the linker knows to use _main as the entry point. But,
+ // when compiling with -pg, we need to link with the gcrt1.o file,
+ // so pass the -no_new_main option to tell the linker to use the
+ // "start" symbol as the entry point.
+ if (isTargetMacOS() && !isMacosxVersionLT(10, 8))
+ CmdArgs.push_back("-no_new_main");
+ } else {
+ getDriver().Diag(diag::err_drv_clang_unsupported_opt_pg_darwin)
+ << isTargetMacOS();
}
- // By default on OS X 10.8 and later, we don't link with a crt1.o
- // file and the linker knows to use _main as the entry point. But,
- // when compiling with -pg, we need to link with the gcrt1.o file,
- // so pass the -no_new_main option to tell the linker to use the
- // "start" symbol as the entry point.
- if (isTargetMacOS() && !isMacosxVersionLT(10, 8))
- CmdArgs.push_back("-no_new_main");
} else {
if (Args.hasArg(options::OPT_static) ||
Args.hasArg(options::OPT_object) ||
@@ -2357,6 +2591,8 @@ SanitizerMask Darwin::getSupportedSanitizers() const {
const bool IsX86_64 = getTriple().getArch() == llvm::Triple::x86_64;
SanitizerMask Res = ToolChain::getSupportedSanitizers();
Res |= SanitizerKind::Address;
+ Res |= SanitizerKind::PointerCompare;
+ Res |= SanitizerKind::PointerSubtract;
Res |= SanitizerKind::Leak;
Res |= SanitizerKind::Fuzzer;
Res |= SanitizerKind::FuzzerNoLink;
diff --git a/lib/Driver/ToolChains/Darwin.h b/lib/Driver/ToolChains/Darwin.h
index d753f8967a61..2dc7c85880f7 100644
--- a/lib/Driver/ToolChains/Darwin.h
+++ b/lib/Driver/ToolChains/Darwin.h
@@ -1,9 +1,8 @@
//===--- Darwin.h - Darwin ToolChain Implementations ------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -158,7 +157,8 @@ public:
/// FIXME: This API is intended for use with embedded libraries only, and is
/// misleadingly named.
virtual void AddLinkRuntimeLibArgs(const llvm::opt::ArgList &Args,
- llvm::opt::ArgStringList &CmdArgs) const;
+ llvm::opt::ArgStringList &CmdArgs,
+ bool ForceLinkBuiltinRT = false) const;
virtual void addStartObjectFileArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const {
@@ -496,12 +496,16 @@ public:
RuntimeLibType GetRuntimeLibType(const llvm::opt::ArgList &Args) const override;
void AddLinkRuntimeLibArgs(const llvm::opt::ArgList &Args,
- llvm::opt::ArgStringList &CmdArgs) const override;
+ llvm::opt::ArgStringList &CmdArgs,
+ bool ForceLinkBuiltinRT = false) const override;
void AddClangCXXStdlibIncludeArgs(
const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
+ void AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+
void AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const override;
@@ -528,6 +532,15 @@ private:
llvm::opt::ArgStringList &CmdArgs,
StringRef Sanitizer,
bool shared = true) const;
+
+ bool AddGnuCPlusPlusIncludePaths(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args,
+ llvm::SmallString<128> Base,
+ llvm::StringRef Version,
+ llvm::StringRef ArchDir,
+ llvm::StringRef BitDir) const;
+
+ llvm::StringRef GetHeaderSysroot(const llvm::opt::ArgList &DriverArgs) const;
};
} // end namespace toolchains
diff --git a/lib/Driver/ToolChains/DragonFly.cpp b/lib/Driver/ToolChains/DragonFly.cpp
index 648469e4cec5..0a7c8b1615e7 100644
--- a/lib/Driver/ToolChains/DragonFly.cpp
+++ b/lib/Driver/ToolChains/DragonFly.cpp
@@ -1,9 +1,8 @@
//===--- DragonFly.cpp - DragonFly ToolChain Implementations ----*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Driver/ToolChains/DragonFly.h b/lib/Driver/ToolChains/DragonFly.h
index 9a06fbd0d3c2..7e76904f1055 100644
--- a/lib/Driver/ToolChains/DragonFly.h
+++ b/lib/Driver/ToolChains/DragonFly.h
@@ -1,9 +1,8 @@
//===--- DragonFly.h - DragonFly ToolChain Implementations ------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Driver/ToolChains/FreeBSD.cpp b/lib/Driver/ToolChains/FreeBSD.cpp
index 7a176d260aee..3a0bab8d07f5 100644
--- a/lib/Driver/ToolChains/FreeBSD.cpp
+++ b/lib/Driver/ToolChains/FreeBSD.cpp
@@ -1,9 +1,8 @@
//===--- FreeBSD.cpp - FreeBSD ToolChain Implementations --------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -411,6 +410,8 @@ SanitizerMask FreeBSD::getSupportedSanitizers() const {
const bool IsMIPS64 = getTriple().isMIPS64();
SanitizerMask Res = ToolChain::getSupportedSanitizers();
Res |= SanitizerKind::Address;
+ Res |= SanitizerKind::PointerCompare;
+ Res |= SanitizerKind::PointerSubtract;
Res |= SanitizerKind::Vptr;
if (IsX86_64 || IsMIPS64) {
Res |= SanitizerKind::Leak;
diff --git a/lib/Driver/ToolChains/FreeBSD.h b/lib/Driver/ToolChains/FreeBSD.h
index 2943e1cacfbb..adfe21da372f 100644
--- a/lib/Driver/ToolChains/FreeBSD.h
+++ b/lib/Driver/ToolChains/FreeBSD.h
@@ -1,9 +1,8 @@
//===--- FreeBSD.h - FreeBSD ToolChain Implementations ----------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Driver/ToolChains/Fuchsia.cpp b/lib/Driver/ToolChains/Fuchsia.cpp
index de2c7411c5e4..1f5ec9ebb16d 100644
--- a/lib/Driver/ToolChains/Fuchsia.cpp
+++ b/lib/Driver/ToolChains/Fuchsia.cpp
@@ -1,9 +1,8 @@
//===--- Fuchsia.cpp - Fuchsia ToolChain Implementations --------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -16,7 +15,9 @@
#include "clang/Driver/Options.h"
#include "clang/Driver/SanitizerArgs.h"
#include "llvm/Option/ArgList.h"
+#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
+#include "llvm/Support/VirtualFileSystem.h"
using namespace clang::driver;
using namespace clang::driver::toolchains;
@@ -24,6 +25,8 @@ using namespace clang::driver::tools;
using namespace clang;
using namespace llvm::opt;
+using tools::addMultilibFlag;
+
void fuchsia::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output,
const InputInfoList &Inputs,
@@ -99,8 +102,6 @@ void fuchsia::Linker::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddAllArgs(CmdArgs, options::OPT_L);
Args.AddAllArgs(CmdArgs, options::OPT_u);
- addSanitizerPathLibArgs(ToolChain, Args, CmdArgs);
-
ToolChain.AddFilePathLibArgs(Args, CmdArgs);
if (D.isUsingLTO()) {
@@ -149,7 +150,8 @@ void fuchsia::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasArg(options::OPT_fsplit_stack))
CmdArgs.push_back("--wrap=pthread_create");
- CmdArgs.push_back("-lc");
+ if (!Args.hasArg(options::OPT_nolibc))
+ CmdArgs.push_back("-lc");
}
C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
@@ -169,6 +171,52 @@ Fuchsia::Fuchsia(const Driver &D, const llvm::Triple &Triple,
llvm::sys::path::append(P, "lib");
getFilePaths().push_back(P.str());
}
+
+ auto FilePaths = [&](const Multilib &M) -> std::vector<std::string> {
+ std::vector<std::string> FP;
+ if (D.CCCIsCXX()) {
+ if (auto CXXStdlibPath = getCXXStdlibPath()) {
+ SmallString<128> P(*CXXStdlibPath);
+ llvm::sys::path::append(P, M.gccSuffix());
+ FP.push_back(P.str());
+ }
+ }
+ return FP;
+ };
+
+ Multilibs.push_back(Multilib());
+ // Use the noexcept variant with -fno-exceptions to avoid the extra overhead.
+ Multilibs.push_back(Multilib("noexcept", {}, {}, 1)
+ .flag("-fexceptions")
+ .flag("+fno-exceptions"));
+ // ASan has higher priority because we always want the instrumentated version.
+ Multilibs.push_back(Multilib("asan", {}, {}, 2)
+ .flag("+fsanitize=address"));
+ // Use the asan+noexcept variant with ASan and -fno-exceptions.
+ Multilibs.push_back(Multilib("asan+noexcept", {}, {}, 3)
+ .flag("+fsanitize=address")
+ .flag("-fexceptions")
+ .flag("+fno-exceptions"));
+ Multilibs.FilterOut([&](const Multilib &M) {
+ std::vector<std::string> RD = FilePaths(M);
+ return std::all_of(RD.begin(), RD.end(), [&](std::string P) {
+ return !getVFS().exists(P);
+ });
+ });
+
+ Multilib::flags_list Flags;
+ addMultilibFlag(
+ Args.hasFlag(options::OPT_fexceptions, options::OPT_fno_exceptions, true),
+ "fexceptions", Flags);
+ addMultilibFlag(getSanitizerArgs().needsAsanRt(), "fsanitize=address", Flags);
+ Multilibs.setFilePathsCallback(FilePaths);
+
+ if (Multilibs.select(Flags, SelectedMultilib))
+ if (!SelectedMultilib.isDefault())
+ if (const auto &PathsCallback = Multilibs.filePathsCallback())
+ for (const auto &Path : PathsCallback(SelectedMultilib))
+ // Prepend the multilib path to ensure it takes the precedence.
+ getFilePaths().insert(getFilePaths().begin(), Path);
}
std::string Fuchsia::ComputeEffectiveClangTriple(const ArgList &Args,
@@ -257,8 +305,8 @@ void Fuchsia::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
switch (GetCXXStdlibType(DriverArgs)) {
case ToolChain::CST_Libcxx: {
- SmallString<128> P(getDriver().ResourceDir);
- llvm::sys::path::append(P, "include", "c++", "v1");
+ SmallString<128> P(getDriver().Dir);
+ llvm::sys::path::append(P, "..", "include", "c++", "v1");
addSystemInclude(DriverArgs, CC1Args, P.str());
break;
}
@@ -283,6 +331,8 @@ void Fuchsia::AddCXXStdlibLibArgs(const ArgList &Args,
SanitizerMask Fuchsia::getSupportedSanitizers() const {
SanitizerMask Res = ToolChain::getSupportedSanitizers();
Res |= SanitizerKind::Address;
+ Res |= SanitizerKind::PointerCompare;
+ Res |= SanitizerKind::PointerSubtract;
Res |= SanitizerKind::Fuzzer;
Res |= SanitizerKind::FuzzerNoLink;
Res |= SanitizerKind::SafeStack;
diff --git a/lib/Driver/ToolChains/Fuchsia.h b/lib/Driver/ToolChains/Fuchsia.h
index e61eddc2aad1..dd7c5c650352 100644
--- a/lib/Driver/ToolChains/Fuchsia.h
+++ b/lib/Driver/ToolChains/Fuchsia.h
@@ -1,9 +1,8 @@
//===--- Fuchsia.h - Fuchsia ToolChain Implementations ----------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Driver/ToolChains/Gnu.cpp b/lib/Driver/ToolChains/Gnu.cpp
index 2ad45097dce8..33cdd3585c29 100644
--- a/lib/Driver/ToolChains/Gnu.cpp
+++ b/lib/Driver/ToolChains/Gnu.cpp
@@ -1,9 +1,8 @@
//===--- Gnu.cpp - Gnu Tool and ToolChain Implementations -------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -22,6 +21,7 @@
#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/Options.h"
#include "clang/Driver/Tool.h"
+#include "clang/Driver/ToolChain.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Support/Path.h"
@@ -34,6 +34,8 @@ using namespace clang::driver::toolchains;
using namespace clang;
using namespace llvm::opt;
+using tools::addMultilibFlag;
+
void tools::GnuTool::anchor() {}
static bool forwardToGCC(const Option &O) {
@@ -309,7 +311,7 @@ static const char *getLDMOption(const llvm::Triple &T, const ArgList &Args) {
static bool getPIE(const ArgList &Args, const toolchains::Linux &ToolChain) {
if (Args.hasArg(options::OPT_shared) || Args.hasArg(options::OPT_static) ||
- Args.hasArg(options::OPT_r))
+ Args.hasArg(options::OPT_r) || Args.hasArg(options::OPT_static_pie))
return false;
Arg *A = Args.getLastArg(options::OPT_pie, options::OPT_no_pie,
@@ -319,6 +321,26 @@ static bool getPIE(const ArgList &Args, const toolchains::Linux &ToolChain) {
return A->getOption().matches(options::OPT_pie);
}
+static bool getStaticPIE(const ArgList &Args,
+ const toolchains::Linux &ToolChain) {
+ bool HasStaticPIE = Args.hasArg(options::OPT_static_pie);
+ // -no-pie is an alias for -nopie. So, handling -nopie takes care of
+ // -no-pie as well.
+ if (HasStaticPIE && Args.hasArg(options::OPT_nopie)) {
+ const Driver &D = ToolChain.getDriver();
+ const llvm::opt::OptTable &Opts = D.getOpts();
+ const char *StaticPIEName = Opts.getOptionName(options::OPT_static_pie);
+ const char *NoPIEName = Opts.getOptionName(options::OPT_nopie);
+ D.Diag(diag::err_drv_cannot_mix_options) << StaticPIEName << NoPIEName;
+ }
+ return HasStaticPIE;
+}
+
+static bool getStatic(const ArgList &Args) {
+ return Args.hasArg(options::OPT_static) &&
+ !Args.hasArg(options::OPT_static_pie);
+}
+
void tools::gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output,
const InputInfoList &Inputs,
@@ -334,6 +356,8 @@ void tools::gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const bool isAndroid = ToolChain.getTriple().isAndroid();
const bool IsIAMCU = ToolChain.getTriple().isOSIAMCU();
const bool IsPIE = getPIE(Args, ToolChain);
+ const bool IsStaticPIE = getStaticPIE(Args, ToolChain);
+ const bool IsStatic = getStatic(Args);
const bool HasCRTBeginEndFiles =
ToolChain.getTriple().hasEnvironment() ||
(ToolChain.getTriple().getVendor() != llvm::Triple::MipsTechnologies);
@@ -354,6 +378,19 @@ void tools::gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (IsPIE)
CmdArgs.push_back("-pie");
+ if (IsStaticPIE) {
+ CmdArgs.push_back("-static");
+ CmdArgs.push_back("-pie");
+ CmdArgs.push_back("--no-dynamic-linker");
+ CmdArgs.push_back("-z");
+ CmdArgs.push_back("text");
+ }
+
+ if (ToolChain.isNoExecStackDefault()) {
+ CmdArgs.push_back("-z");
+ CmdArgs.push_back("noexecstack");
+ }
+
if (Args.hasArg(options::OPT_rdynamic))
CmdArgs.push_back("-export-dynamic");
@@ -376,6 +413,11 @@ void tools::gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("--fix-cortex-a53-843419");
}
+ // Android does not allow shared text relocations. Emit a warning if the
+ // user's code contains any.
+ if (isAndroid)
+ CmdArgs.push_back("--warn-shared-textrel");
+
for (const auto &Opt : ToolChain.ExtraOpts)
CmdArgs.push_back(Opt.c_str());
@@ -389,7 +431,7 @@ void tools::gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
return;
}
- if (Args.hasArg(options::OPT_static)) {
+ if (IsStatic) {
if (Arch == llvm::Triple::arm || Arch == llvm::Triple::armeb ||
Arch == llvm::Triple::thumb || Arch == llvm::Triple::thumbeb)
CmdArgs.push_back("-Bstatic");
@@ -399,11 +441,11 @@ void tools::gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-shared");
}
- if (!Args.hasArg(options::OPT_static)) {
+ if (!IsStatic) {
if (Args.hasArg(options::OPT_rdynamic))
CmdArgs.push_back("-export-dynamic");
- if (!Args.hasArg(options::OPT_shared)) {
+ if (!Args.hasArg(options::OPT_shared) && !IsStaticPIE) {
const std::string Loader =
D.DyldPrefix + ToolChain.getDynamicLinker(Args);
CmdArgs.push_back("-dynamic-linker");
@@ -422,6 +464,8 @@ void tools::gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
crt1 = "gcrt1.o";
else if (IsPIE)
crt1 = "Scrt1.o";
+ else if (IsStaticPIE)
+ crt1 = "rcrt1.o";
else
crt1 = "crt1.o";
}
@@ -433,20 +477,29 @@ void tools::gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (IsIAMCU)
CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crt0.o")));
- else {
- const char *crtbegin;
- if (Args.hasArg(options::OPT_static))
- crtbegin = isAndroid ? "crtbegin_static.o" : "crtbeginT.o";
- else if (Args.hasArg(options::OPT_shared))
- crtbegin = isAndroid ? "crtbegin_so.o" : "crtbeginS.o";
- else if (IsPIE)
- crtbegin = isAndroid ? "crtbegin_dynamic.o" : "crtbeginS.o";
- else
- crtbegin = isAndroid ? "crtbegin_dynamic.o" : "crtbegin.o";
-
- if (HasCRTBeginEndFiles)
- CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crtbegin)));
- }
+ else if (HasCRTBeginEndFiles) {
+ std::string P;
+ if (ToolChain.GetRuntimeLibType(Args) == ToolChain::RLT_CompilerRT &&
+ !isAndroid) {
+ std::string crtbegin = ToolChain.getCompilerRT(Args, "crtbegin",
+ ToolChain::FT_Object);
+ if (ToolChain.getVFS().exists(crtbegin))
+ P = crtbegin;
+ }
+ if (P.empty()) {
+ const char *crtbegin;
+ if (IsStatic)
+ crtbegin = isAndroid ? "crtbegin_static.o" : "crtbeginT.o";
+ else if (Args.hasArg(options::OPT_shared))
+ crtbegin = isAndroid ? "crtbegin_so.o" : "crtbeginS.o";
+ else if (IsPIE || IsStaticPIE)
+ crtbegin = isAndroid ? "crtbegin_dynamic.o" : "crtbeginS.o";
+ else
+ crtbegin = isAndroid ? "crtbegin_dynamic.o" : "crtbegin.o";
+ P = ToolChain.GetFilePath(crtbegin);
+ }
+ CmdArgs.push_back(Args.MakeArgString(P));
+ }
// Add crtfastmath.o if available and fast math is enabled.
ToolChain.AddFastMathRuntimeIfAvailable(Args, CmdArgs);
@@ -490,7 +543,7 @@ void tools::gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (!Args.hasArg(options::OPT_nostdlib)) {
if (!Args.hasArg(options::OPT_nodefaultlibs)) {
- if (Args.hasArg(options::OPT_static))
+ if (IsStatic || IsStaticPIE)
CmdArgs.push_back("--start-group");
if (NeedsSanitizerDeps)
@@ -519,13 +572,14 @@ void tools::gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasArg(options::OPT_fsplit_stack))
CmdArgs.push_back("--wrap=pthread_create");
- CmdArgs.push_back("-lc");
+ if (!Args.hasArg(options::OPT_nolibc))
+ CmdArgs.push_back("-lc");
// Add IAMCU specific libs, if needed.
if (IsIAMCU)
CmdArgs.push_back("-lgloss");
- if (Args.hasArg(options::OPT_static))
+ if (IsStatic || IsStaticPIE)
CmdArgs.push_back("--end-group");
else
AddRunTimeLibs(ToolChain, D, CmdArgs, Args);
@@ -539,16 +593,27 @@ void tools::gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
if (!Args.hasArg(options::OPT_nostartfiles) && !IsIAMCU) {
- const char *crtend;
- if (Args.hasArg(options::OPT_shared))
- crtend = isAndroid ? "crtend_so.o" : "crtendS.o";
- else if (IsPIE)
- crtend = isAndroid ? "crtend_android.o" : "crtendS.o";
- else
- crtend = isAndroid ? "crtend_android.o" : "crtend.o";
-
- if (HasCRTBeginEndFiles)
- CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crtend)));
+ if (HasCRTBeginEndFiles) {
+ std::string P;
+ if (ToolChain.GetRuntimeLibType(Args) == ToolChain::RLT_CompilerRT &&
+ !isAndroid) {
+ std::string crtend = ToolChain.getCompilerRT(Args, "crtend",
+ ToolChain::FT_Object);
+ if (ToolChain.getVFS().exists(crtend))
+ P = crtend;
+ }
+ if (P.empty()) {
+ const char *crtend;
+ if (Args.hasArg(options::OPT_shared))
+ crtend = isAndroid ? "crtend_so.o" : "crtendS.o";
+ else if (IsPIE || IsStaticPIE)
+ crtend = isAndroid ? "crtend_android.o" : "crtendS.o";
+ else
+ crtend = isAndroid ? "crtend_android.o" : "crtend.o";
+ P = ToolChain.GetFilePath(crtend);
+ }
+ CmdArgs.push_back(Args.MakeArgString(P));
+ }
if (!isAndroid)
CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtn.o")));
}
@@ -585,14 +650,12 @@ void tools::gnutools::Assembler::ConstructJob(Compilation &C,
if (const Arg *A = Args.getLastArg(options::OPT_gz, options::OPT_gz_EQ)) {
if (A->getOption().getID() == options::OPT_gz) {
- CmdArgs.push_back("-compress-debug-sections");
+ CmdArgs.push_back("--compress-debug-sections");
} else {
StringRef Value = A->getValue();
- if (Value == "none") {
- CmdArgs.push_back("-compress-debug-sections=none");
- } else if (Value == "zlib" || Value == "zlib-gnu") {
+ if (Value == "none" || Value == "zlib" || Value == "zlib-gnu") {
CmdArgs.push_back(
- Args.MakeArgString("-compress-debug-sections=" + Twine(Value)));
+ Args.MakeArgString("--compress-debug-sections=" + Twine(Value)));
} else {
D.Diag(diag::err_drv_unsupported_option_argument)
<< A->getOption().getName() << Value;
@@ -600,6 +663,10 @@ void tools::gnutools::Assembler::ConstructJob(Compilation &C,
}
}
+ if (getToolChain().isNoExecStackDefault()) {
+ CmdArgs.push_back("--noexecstack");
+ }
+
switch (getToolChain().getArch()) {
default:
break;
@@ -652,14 +719,16 @@ void tools::gnutools::Assembler::ConstructJob(Compilation &C,
case llvm::Triple::sparcel: {
CmdArgs.push_back("-32");
std::string CPU = getCPUName(Args, getToolChain().getTriple());
- CmdArgs.push_back(sparc::getSparcAsmModeForCPU(CPU, getToolChain().getTriple()));
+ CmdArgs.push_back(
+ sparc::getSparcAsmModeForCPU(CPU, getToolChain().getTriple()));
AddAssemblerKPIC(getToolChain(), Args, CmdArgs);
break;
}
case llvm::Triple::sparcv9: {
CmdArgs.push_back("-64");
std::string CPU = getCPUName(Args, getToolChain().getTriple());
- CmdArgs.push_back(sparc::getSparcAsmModeForCPU(CPU, getToolChain().getTriple()));
+ CmdArgs.push_back(
+ sparc::getSparcAsmModeForCPU(CPU, getToolChain().getTriple()));
AddAssemblerKPIC(getToolChain(), Args, CmdArgs);
break;
}
@@ -817,7 +886,7 @@ void tools::gnutools::Assembler::ConstructJob(Compilation &C,
if (Args.hasArg(options::OPT_gsplit_dwarf) &&
getToolChain().getTriple().isOSLinux())
SplitDebugInfo(getToolChain(), C, *this, JA, Args, Output,
- SplitDebugName(Args, Output));
+ SplitDebugName(Args, Inputs[0], Output));
}
namespace {
@@ -846,16 +915,6 @@ static bool isSoftFloatABI(const ArgList &Args) {
A->getValue() == StringRef("soft"));
}
-/// \p Flag must be a flag accepted by the driver with its leading '-' removed,
-// otherwise '-print-multi-lib' will not emit them correctly.
-static void addMultilibFlag(bool Enabled, const char *const Flag,
- std::vector<std::string> &Flags) {
- if (Enabled)
- Flags.push_back(std::string("+") + Flag);
- else
- Flags.push_back(std::string("-") + Flag);
-}
-
static bool isArmOrThumbArch(llvm::Triple::ArchType Arch) {
return Arch == llvm::Triple::arm || Arch == llvm::Triple::thumb;
}
@@ -874,10 +933,6 @@ static bool isMicroMips(const ArgList &Args) {
return A && A->getOption().matches(options::OPT_mmicromips);
}
-static bool isRISCV(llvm::Triple::ArchType Arch) {
- return Arch == llvm::Triple::riscv32 || Arch == llvm::Triple::riscv64;
-}
-
static bool isMSP430(llvm::Triple::ArchType Arch) {
return Arch == llvm::Triple::msp430;
}
@@ -1850,6 +1905,7 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
// Non-Solaris is much simpler - most systems just go with "/usr".
if (SysRoot.empty() && TargetTriple.getOS() == llvm::Triple::Linux) {
// Yet, still look for RHEL devtoolsets.
+ Prefixes.push_back("/opt/rh/devtoolset-8/root/usr");
Prefixes.push_back("/opt/rh/devtoolset-7/root/usr");
Prefixes.push_back("/opt/rh/devtoolset-6/root/usr");
Prefixes.push_back("/opt/rh/devtoolset-4/root/usr");
@@ -1889,6 +1945,9 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
static const char *const ARMebHFTriples[] = {
"armeb-linux-gnueabihf", "armebv7hl-redhat-linux-gnueabi"};
+ static const char *const AVRLibDirs[] = {"/lib"};
+ static const char *const AVRTriples[] = {"avr"};
+
static const char *const X86_64LibDirs[] = {"/lib64", "/lib"};
static const char *const X86_64Triples[] = {
"x86_64-linux-gnu", "x86_64-unknown-linux-gnu",
@@ -1951,10 +2010,14 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
"powerpc64le-linux-gnu", "powerpc64le-unknown-linux-gnu",
"powerpc64le-suse-linux", "ppc64le-redhat-linux"};
- static const char *const RISCV32LibDirs[] = {"/lib", "/lib32"};
- static const char *const RISCVTriples[] = {"riscv32-unknown-linux-gnu",
- "riscv64-unknown-linux-gnu",
- "riscv32-unknown-elf"};
+ static const char *const RISCV32LibDirs[] = {"/lib32", "/lib"};
+ static const char *const RISCV32Triples[] = {"riscv32-unknown-linux-gnu",
+ "riscv32-linux-gnu",
+ "riscv32-unknown-elf"};
+ static const char *const RISCV64LibDirs[] = {"/lib64", "/lib"};
+ static const char *const RISCV64Triples[] = {"riscv64-unknown-linux-gnu",
+ "riscv64-linux-gnu",
+ "riscv64-unknown-elf"};
static const char *const SPARCv8LibDirs[] = {"/lib32", "/lib"};
static const char *const SPARCv8Triples[] = {"sparc-linux-gnu",
@@ -2105,6 +2168,10 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
TripleAliases.append(begin(ARMebTriples), end(ARMebTriples));
}
break;
+ case llvm::Triple::avr:
+ LibDirs.append(begin(AVRLibDirs), end(AVRLibDirs));
+ TripleAliases.append(begin(AVRTriples), end(AVRTriples));
+ break;
case llvm::Triple::x86_64:
LibDirs.append(begin(X86_64LibDirs), end(X86_64LibDirs));
TripleAliases.append(begin(X86_64Triples), end(X86_64Triples));
@@ -2184,9 +2251,15 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
break;
case llvm::Triple::riscv32:
LibDirs.append(begin(RISCV32LibDirs), end(RISCV32LibDirs));
+ TripleAliases.append(begin(RISCV32Triples), end(RISCV32Triples));
+ BiarchLibDirs.append(begin(RISCV64LibDirs), end(RISCV64LibDirs));
+ BiarchTripleAliases.append(begin(RISCV64Triples), end(RISCV64Triples));
+ break;
+ case llvm::Triple::riscv64:
+ LibDirs.append(begin(RISCV64LibDirs), end(RISCV64LibDirs));
+ TripleAliases.append(begin(RISCV64Triples), end(RISCV64Triples));
BiarchLibDirs.append(begin(RISCV32LibDirs), end(RISCV32LibDirs));
- TripleAliases.append(begin(RISCVTriples), end(RISCVTriples));
- BiarchTripleAliases.append(begin(RISCVTriples), end(RISCVTriples));
+ BiarchTripleAliases.append(begin(RISCV32Triples), end(RISCV32Triples));
break;
case llvm::Triple::sparc:
case llvm::Triple::sparcel:
@@ -2235,10 +2308,12 @@ bool Generic_GCC::GCCInstallationDetector::ScanGCCForMultilibs(
} else if (TargetTriple.isMIPS()) {
if (!findMIPSMultilibs(D, TargetTriple, Path, Args, Detected))
return false;
- } else if (isRISCV(TargetArch)) {
+ } else if (TargetTriple.isRISCV()) {
findRISCVMultilibs(D, TargetTriple, Path, Args, Detected);
} else if (isMSP430(TargetArch)) {
findMSP430Multilibs(D, TargetTriple, Path, Args, Detected);
+ } else if (TargetArch == llvm::Triple::avr) {
+ // AVR has no multilibs.
} else if (!findBiarchMultilibs(D, TargetTriple, Path, Args,
NeedsBiarchSuffix, Detected)) {
return false;
@@ -2512,7 +2587,8 @@ bool Generic_GCC::IsIntegratedAssemblerDefault() const {
case llvm::Triple::sparc:
case llvm::Triple::sparcel:
case llvm::Triple::sparcv9:
- if (getTriple().isOSSolaris() || getTriple().isOSOpenBSD())
+ if (getTriple().isOSFreeBSD() || getTriple().isOSOpenBSD() ||
+ getTriple().isOSSolaris())
return true;
return false;
default:
diff --git a/lib/Driver/ToolChains/Gnu.h b/lib/Driver/ToolChains/Gnu.h
index e8e74e4d80fd..3bb38c498b35 100644
--- a/lib/Driver/ToolChains/Gnu.h
+++ b/lib/Driver/ToolChains/Gnu.h
@@ -1,9 +1,8 @@
//===--- Gnu.h - Gnu Tool and ToolChain Implementations ---------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Driver/ToolChains/HIP.cpp b/lib/Driver/ToolChains/HIP.cpp
index 868765cf88e5..2ec97e798fd0 100644
--- a/lib/Driver/ToolChains/HIP.cpp
+++ b/lib/Driver/ToolChains/HIP.cpp
@@ -1,9 +1,8 @@
//===--- HIP.cpp - HIP Tool and ToolChain Implementations -------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -32,7 +31,7 @@ using namespace llvm::opt;
namespace {
-static void addBCLib(Compilation &C, const ArgList &Args,
+static void addBCLib(const Driver &D, const ArgList &Args,
ArgStringList &CmdArgs, ArgStringList LibraryPaths,
StringRef BCName) {
StringRef FullName;
@@ -41,11 +40,12 @@ static void addBCLib(Compilation &C, const ArgList &Args,
llvm::sys::path::append(Path, BCName);
FullName = Path;
if (llvm::sys::fs::exists(FullName)) {
+ CmdArgs.push_back("-mlink-builtin-bitcode");
CmdArgs.push_back(Args.MakeArgString(FullName));
return;
}
}
- C.getDriver().Diag(diag::err_drv_no_such_file) << BCName;
+ D.Diag(diag::err_drv_no_such_file) << BCName;
}
} // namespace
@@ -59,44 +59,6 @@ const char *AMDGCN::Linker::constructLLVMLinkCommand(
for (const auto &II : Inputs)
CmdArgs.push_back(II.getFilename());
- ArgStringList LibraryPaths;
-
- // Find in --hip-device-lib-path and HIP_LIBRARY_PATH.
- for (auto Path : Args.getAllArgValues(options::OPT_hip_device_lib_path_EQ))
- LibraryPaths.push_back(Args.MakeArgString(Path));
-
- addDirectoryList(Args, LibraryPaths, "-L", "HIP_DEVICE_LIB_PATH");
-
- llvm::SmallVector<std::string, 10> BCLibs;
-
- // Add bitcode library in --hip-device-lib.
- for (auto Lib : Args.getAllArgValues(options::OPT_hip_device_lib_EQ)) {
- BCLibs.push_back(Args.MakeArgString(Lib));
- }
-
- // If --hip-device-lib is not set, add the default bitcode libraries.
- if (BCLibs.empty()) {
- // Get the bc lib file name for ISA version. For example,
- // gfx803 => oclc_isa_version_803.amdgcn.bc.
- std::string ISAVerBC =
- "oclc_isa_version_" + SubArchName.drop_front(3).str() + ".amdgcn.bc";
-
- llvm::StringRef FlushDenormalControlBC;
- if (Args.hasArg(options::OPT_fcuda_flush_denormals_to_zero))
- FlushDenormalControlBC = "oclc_daz_opt_on.amdgcn.bc";
- else
- FlushDenormalControlBC = "oclc_daz_opt_off.amdgcn.bc";
-
- BCLibs.append({"hip.amdgcn.bc", "opencl.amdgcn.bc",
- "ocml.amdgcn.bc", "ockl.amdgcn.bc",
- "oclc_finite_only_off.amdgcn.bc",
- FlushDenormalControlBC,
- "oclc_correctly_rounded_sqrt_on.amdgcn.bc",
- "oclc_unsafe_math_off.amdgcn.bc", ISAVerBC});
- }
- for (auto Lib : BCLibs)
- addBCLib(C, Args, CmdArgs, LibraryPaths, Lib);
-
// Add an intermediate output file.
CmdArgs.push_back("-o");
std::string TmpName =
@@ -141,6 +103,11 @@ const char *AMDGCN::Linker::constructOptCommand(
}
OptArgs.push_back("-mtriple=amdgcn-amd-amdhsa");
OptArgs.push_back(Args.MakeArgString("-mcpu=" + SubArchName));
+
+ for (const Arg *A : Args.filtered(options::OPT_mllvm)) {
+ OptArgs.push_back(A->getValue(0));
+ }
+
OptArgs.push_back("-o");
std::string TmpFileName = C.getDriver().GetTemporaryPath(
OutputFilePrefix.str() + "-optimized", "bc");
@@ -160,8 +127,30 @@ const char *AMDGCN::Linker::constructLlcCommand(
llvm::StringRef OutputFilePrefix, const char *InputFileName) const {
// Construct llc command.
ArgStringList LlcArgs{InputFileName, "-mtriple=amdgcn-amd-amdhsa",
- "-filetype=obj", "-mattr=-code-object-v3",
- Args.MakeArgString("-mcpu=" + SubArchName), "-o"};
+ "-filetype=obj",
+ Args.MakeArgString("-mcpu=" + SubArchName)};
+
+ // Extract all the -m options
+ std::vector<llvm::StringRef> Features;
+ handleTargetFeaturesGroup(
+ Args, Features, options::OPT_m_amdgpu_Features_Group);
+
+ // Add features to mattr such as xnack
+ std::string MAttrString = "-mattr=";
+ for(auto OneFeature : Features) {
+ MAttrString.append(Args.MakeArgString(OneFeature));
+ if (OneFeature != Features.back())
+ MAttrString.append(",");
+ }
+ if(!Features.empty())
+ LlcArgs.push_back(Args.MakeArgString(MAttrString));
+
+ for (const Arg *A : Args.filtered(options::OPT_mllvm)) {
+ LlcArgs.push_back(A->getValue(0));
+ }
+
+ // Add output filename
+ LlcArgs.push_back("-o");
std::string LlcOutputFileName =
C.getDriver().GetTemporaryPath(OutputFilePrefix, "o");
const char *LlcOutputFile =
@@ -181,9 +170,8 @@ void AMDGCN::Linker::constructLldCommand(Compilation &C, const JobAction &JA,
const char *InputFileName) const {
// Construct lld command.
// The output from ld.lld is an HSA code object file.
- ArgStringList LldArgs{"-flavor", "gnu", "--no-undefined",
- "-shared", "-o", Output.getFilename(),
- InputFileName};
+ ArgStringList LldArgs{
+ "-flavor", "gnu", "-shared", "-o", Output.getFilename(), InputFileName};
SmallString<128> LldPath(C.getDriver().Dir);
llvm::sys::path::append(LldPath, "lld");
const char *Lld = Args.MakeArgString(LldPath);
@@ -294,8 +282,54 @@ void HIPToolChain::addClangTargetOptions(
// Default to "hidden" visibility, as object level linking will not be
// supported for the foreseeable future.
if (!DriverArgs.hasArg(options::OPT_fvisibility_EQ,
- options::OPT_fvisibility_ms_compat))
+ options::OPT_fvisibility_ms_compat)) {
CC1Args.append({"-fvisibility", "hidden"});
+ CC1Args.push_back("-fapply-global-visibility-to-externs");
+ }
+ ArgStringList LibraryPaths;
+
+ // Find in --hip-device-lib-path and HIP_LIBRARY_PATH.
+ for (auto Path :
+ DriverArgs.getAllArgValues(options::OPT_hip_device_lib_path_EQ))
+ LibraryPaths.push_back(DriverArgs.MakeArgString(Path));
+
+ addDirectoryList(DriverArgs, LibraryPaths, "-L", "HIP_DEVICE_LIB_PATH");
+
+ llvm::SmallVector<std::string, 10> BCLibs;
+
+ // Add bitcode library in --hip-device-lib.
+ for (auto Lib : DriverArgs.getAllArgValues(options::OPT_hip_device_lib_EQ)) {
+ BCLibs.push_back(DriverArgs.MakeArgString(Lib));
+ }
+
+ // If --hip-device-lib is not set, add the default bitcode libraries.
+ if (BCLibs.empty()) {
+ // Get the bc lib file name for ISA version. For example,
+ // gfx803 => oclc_isa_version_803.amdgcn.bc.
+ std::string GFXVersion = GpuArch.drop_front(3).str();
+ std::string ISAVerBC = "oclc_isa_version_" + GFXVersion + ".amdgcn.bc";
+
+ llvm::StringRef FlushDenormalControlBC;
+ if (DriverArgs.hasArg(options::OPT_fcuda_flush_denormals_to_zero))
+ FlushDenormalControlBC = "oclc_daz_opt_on.amdgcn.bc";
+ else
+ FlushDenormalControlBC = "oclc_daz_opt_off.amdgcn.bc";
+
+ llvm::StringRef WaveFrontSizeBC;
+ if (stoi(GFXVersion) < 1000)
+ WaveFrontSizeBC = "oclc_wavefrontsize64_on.amdgcn.bc";
+ else
+ WaveFrontSizeBC = "oclc_wavefrontsize64_off.amdgcn.bc";
+
+ BCLibs.append({"hip.amdgcn.bc", "opencl.amdgcn.bc", "ocml.amdgcn.bc",
+ "ockl.amdgcn.bc", "oclc_finite_only_off.amdgcn.bc",
+ FlushDenormalControlBC,
+ "oclc_correctly_rounded_sqrt_on.amdgcn.bc",
+ "oclc_unsafe_math_off.amdgcn.bc", ISAVerBC,
+ WaveFrontSizeBC});
+ }
+ for (auto Lib : BCLibs)
+ addBCLib(getDriver(), DriverArgs, CC1Args, LibraryPaths, Lib);
}
llvm::opt::DerivedArgList *
diff --git a/lib/Driver/ToolChains/HIP.h b/lib/Driver/ToolChains/HIP.h
index 3af19d44dae0..a650095d054d 100644
--- a/lib/Driver/ToolChains/HIP.h
+++ b/lib/Driver/ToolChains/HIP.h
@@ -1,9 +1,8 @@
//===--- HIP.h - HIP ToolChain Implementations ------------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Driver/ToolChains/Haiku.cpp b/lib/Driver/ToolChains/Haiku.cpp
index 12461ec9c4bd..18f550c9ceca 100644
--- a/lib/Driver/ToolChains/Haiku.cpp
+++ b/lib/Driver/ToolChains/Haiku.cpp
@@ -1,9 +1,8 @@
//===--- Haiku.cpp - Haiku ToolChain Implementations ------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Driver/ToolChains/Haiku.h b/lib/Driver/ToolChains/Haiku.h
index a12a48e00976..2bc98322bebf 100644
--- a/lib/Driver/ToolChains/Haiku.h
+++ b/lib/Driver/ToolChains/Haiku.h
@@ -1,9 +1,8 @@
//===--- Haiku.h - Haiku ToolChain Implementations --------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Driver/ToolChains/Hexagon.cpp b/lib/Driver/ToolChains/Hexagon.cpp
index d302a3e24d8b..f2c3ea11a9f3 100644
--- a/lib/Driver/ToolChains/Hexagon.cpp
+++ b/lib/Driver/ToolChains/Hexagon.cpp
@@ -1,9 +1,8 @@
//===--- Hexagon.cpp - Hexagon ToolChain Implementations --------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -131,11 +130,11 @@ void hexagon::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
const Driver &D = HTC.getDriver();
ArgStringList CmdArgs;
- CmdArgs.push_back("-march=hexagon");
+ CmdArgs.push_back("--arch=hexagon");
RenderExtraToolArgs(JA, CmdArgs);
- const char *AsName = "hexagon-llvm-mc";
+ const char *AsName = "llvm-mc";
CmdArgs.push_back("-filetype=obj");
CmdArgs.push_back(Args.MakeArgString(
"-mcpu=hexagon" +
@@ -431,7 +430,7 @@ void HexagonToolChain::getHexagonLibraryPaths(const ArgList &Args,
std::string TargetDir = getHexagonTargetDir(D.getInstalledDir(),
D.PrefixDirs);
- if (std::find(RootDirs.begin(), RootDirs.end(), TargetDir) == RootDirs.end())
+ if (llvm::find(RootDirs, TargetDir) == RootDirs.end())
RootDirs.push_back(TargetDir);
bool HasPIC = Args.hasArg(options::OPT_fpic, options::OPT_fPIC);
diff --git a/lib/Driver/ToolChains/Hexagon.h b/lib/Driver/ToolChains/Hexagon.h
index a9e599de7ae5..d7b4a13d3a4f 100644
--- a/lib/Driver/ToolChains/Hexagon.h
+++ b/lib/Driver/ToolChains/Hexagon.h
@@ -1,9 +1,8 @@
//===--- Hexagon.h - Hexagon ToolChain Implementations ----------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Driver/ToolChains/Hurd.cpp b/lib/Driver/ToolChains/Hurd.cpp
index ff7b685dae3f..92b0a7f2483f 100644
--- a/lib/Driver/ToolChains/Hurd.cpp
+++ b/lib/Driver/ToolChains/Hurd.cpp
@@ -1,9 +1,8 @@
//===--- Hurd.cpp - Hurd ToolChain Implementations --------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Driver/ToolChains/Hurd.h b/lib/Driver/ToolChains/Hurd.h
index d14619f0e2ce..a2c3d074e9f9 100644
--- a/lib/Driver/ToolChains/Hurd.h
+++ b/lib/Driver/ToolChains/Hurd.h
@@ -1,9 +1,8 @@
//===--- Hurd.h - Hurd ToolChain Implementations ----------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Driver/ToolChains/Lanai.h b/lib/Driver/ToolChains/Lanai.h
index bb92bfaea7e2..dc04b0cfe2ee 100644
--- a/lib/Driver/ToolChains/Lanai.h
+++ b/lib/Driver/ToolChains/Lanai.h
@@ -1,9 +1,8 @@
//===--- Lanai.h - Lanai ToolChain Implementations --------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Driver/ToolChains/Linux.cpp b/lib/Driver/ToolChains/Linux.cpp
index 65ab9b2daf54..d900508ad938 100644
--- a/lib/Driver/ToolChains/Linux.cpp
+++ b/lib/Driver/ToolChains/Linux.cpp
@@ -1,9 +1,8 @@
//===--- Linux.h - Linux ToolChain Implementations --------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -45,6 +44,7 @@ static std::string getMultiarchTriple(const Driver &D,
TargetTriple.getEnvironment();
bool IsAndroid = TargetTriple.isAndroid();
bool IsMipsR6 = TargetTriple.getSubArch() == llvm::Triple::MipsSubArch_r6;
+ bool IsMipsN32Abi = TargetTriple.getEnvironment() == llvm::Triple::GNUABIN32;
// For most architectures, just use whatever we have rather than trying to be
// clever.
@@ -103,33 +103,37 @@ static std::string getMultiarchTriple(const Driver &D,
return "aarch64_be-linux-gnu";
break;
case llvm::Triple::mips: {
- std::string Arch = IsMipsR6 ? "mipsisa32r6" : "mips";
- if (D.getVFS().exists(SysRoot + "/lib/" + Arch + "-linux-gnu"))
- return Arch + "-linux-gnu";
+ std::string MT = IsMipsR6 ? "mipsisa32r6-linux-gnu" : "mips-linux-gnu";
+ if (D.getVFS().exists(SysRoot + "/lib/" + MT))
+ return MT;
break;
}
case llvm::Triple::mipsel: {
if (IsAndroid)
return "mipsel-linux-android";
- std::string Arch = IsMipsR6 ? "mipsisa32r6el" : "mipsel";
- if (D.getVFS().exists(SysRoot + "/lib/" + Arch + "-linux-gnu"))
- return Arch + "-linux-gnu";
+ std::string MT = IsMipsR6 ? "mipsisa32r6el-linux-gnu" : "mipsel-linux-gnu";
+ if (D.getVFS().exists(SysRoot + "/lib/" + MT))
+ return MT;
break;
}
case llvm::Triple::mips64: {
- std::string Arch = IsMipsR6 ? "mipsisa64r6" : "mips64";
- std::string ABI = llvm::Triple::getEnvironmentTypeName(TargetEnvironment);
- if (D.getVFS().exists(SysRoot + "/lib/" + Arch + "-linux-" + ABI))
- return Arch + "-linux-" + ABI;
+ std::string MT = std::string(IsMipsR6 ? "mipsisa64r6" : "mips64") +
+ "-linux-" + (IsMipsN32Abi ? "gnuabin32" : "gnuabi64");
+ if (D.getVFS().exists(SysRoot + "/lib/" + MT))
+ return MT;
+ if (D.getVFS().exists(SysRoot + "/lib/mips64-linux-gnu"))
+ return "mips64-linux-gnu";
break;
}
case llvm::Triple::mips64el: {
if (IsAndroid)
return "mips64el-linux-android";
- std::string Arch = IsMipsR6 ? "mipsisa64r6el" : "mips64el";
- std::string ABI = llvm::Triple::getEnvironmentTypeName(TargetEnvironment);
- if (D.getVFS().exists(SysRoot + "/lib/" + Arch + "-linux-" + ABI))
- return Arch + "-linux-" + ABI;
+ std::string MT = std::string(IsMipsR6 ? "mipsisa64r6el" : "mips64el") +
+ "-linux-" + (IsMipsN32Abi ? "gnuabin32" : "gnuabi64");
+ if (D.getVFS().exists(SysRoot + "/lib/" + MT))
+ return MT;
+ if (D.getVFS().exists(SysRoot + "/lib/mips64el-linux-gnu"))
+ return "mips64el-linux-gnu";
break;
}
case llvm::Triple::ppc:
@@ -230,9 +234,11 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
// used to target i386.
// FIXME: This seems unlikely to be Linux-specific.
ToolChain::path_list &PPaths = getProgramPaths();
- PPaths.push_back(Twine(GCCInstallation.getParentLibPath() + "/../" +
- GCCInstallation.getTriple().str() + "/bin")
- .str());
+ if (GCCInstallation.isValid()) {
+ PPaths.push_back(Twine(GCCInstallation.getParentLibPath() + "/../" +
+ GCCInstallation.getTriple().str() + "/bin")
+ .str());
+ }
Distro Distro(D.getVFS());
@@ -270,8 +276,7 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
const bool IsAndroid = Triple.isAndroid();
const bool IsMips = Triple.isMIPS();
const bool IsHexagon = Arch == llvm::Triple::hexagon;
- const bool IsRISCV =
- Arch == llvm::Triple::riscv32 || Arch == llvm::Triple::riscv64;
+ const bool IsRISCV = Triple.isRISCV();
if (IsMips && !SysRoot.empty())
ExtraOpts.push_back("--sysroot=" + SysRoot);
@@ -295,9 +300,6 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
ExtraOpts.push_back("--hash-style=both");
}
- if (Distro.IsRedhat() && Distro != Distro::RHEL5 && Distro != Distro::RHEL6)
- ExtraOpts.push_back("--no-add-needed");
-
#ifdef ENABLE_LINKER_BUILD_ID
ExtraOpts.push_back("--build-id");
#endif
@@ -326,8 +328,9 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
// Sourcery CodeBench MIPS toolchain holds some libraries under
// a biarch-like suffix of the GCC installation.
- addPathIfExists(D, GCCInstallation.getInstallPath() + SelectedMultilib.gccSuffix(),
- Paths);
+ addPathIfExists(
+ D, GCCInstallation.getInstallPath() + SelectedMultilib.gccSuffix(),
+ Paths);
// GCC cross compiling toolchains will install target libraries which ship
// as part of the toolchain under <prefix>/<triple>/<libdir> rather than as
@@ -637,8 +640,9 @@ std::string Linux::getDynamicLinker(const ArgList &Args) const {
}
}
- if (Distro == Distro::Exherbo && (Triple.getVendor() == llvm::Triple::UnknownVendor ||
- Triple.getVendor() == llvm::Triple::PC))
+ if (Distro == Distro::Exherbo &&
+ (Triple.getVendor() == llvm::Triple::UnknownVendor ||
+ Triple.getVendor() == llvm::Triple::PC))
return "/usr/" + Triple.str() + "/lib/" + Loader;
return "/" + LibDir + "/" + Loader;
}
@@ -858,12 +862,13 @@ void Linux::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
addExternCSystemInclude(DriverArgs, CC1Args, SysRoot + "/usr/include");
}
-static std::string DetectLibcxxIncludePath(StringRef base) {
+static std::string DetectLibcxxIncludePath(llvm::vfs::FileSystem &vfs,
+ StringRef base) {
std::error_code EC;
int MaxVersion = 0;
std::string MaxVersionString = "";
- for (llvm::sys::fs::directory_iterator LI(base, EC), LE; !EC && LI != LE;
- LI = LI.increment(EC)) {
+ for (llvm::vfs::directory_iterator LI = vfs.dir_begin(base, EC), LE;
+ !EC && LI != LE; LI = LI.increment(EC)) {
StringRef VersionText = llvm::sys::path::filename(LI->path());
int Version;
if (VersionText[0] == 'v' &&
@@ -881,13 +886,12 @@ void Linux::addLibCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const {
const std::string& SysRoot = computeSysRoot();
const std::string LibCXXIncludePathCandidates[] = {
- DetectLibcxxIncludePath(getDriver().ResourceDir + "/include/c++"),
- DetectLibcxxIncludePath(getDriver().Dir + "/../include/c++"),
+ DetectLibcxxIncludePath(getVFS(), getDriver().Dir + "/../include/c++"),
// If this is a development, non-installed, clang, libcxx will
// not be found at ../include/c++ but it likely to be found at
// one of the following two locations:
- DetectLibcxxIncludePath(SysRoot + "/usr/local/include/c++"),
- DetectLibcxxIncludePath(SysRoot + "/usr/include/c++") };
+ DetectLibcxxIncludePath(getVFS(), SysRoot + "/usr/local/include/c++"),
+ DetectLibcxxIncludePath(getVFS(), SysRoot + "/usr/include/c++") };
for (const auto &IncludePath : LibCXXIncludePathCandidates) {
if (IncludePath.empty() || !getVFS().exists(IncludePath))
continue;
@@ -972,6 +976,10 @@ bool Linux::isPIEDefault() const {
getTriple().isMusl() || getSanitizerArgs().requiresPIE();
}
+bool Linux::isNoExecStackDefault() const {
+ return getTriple().isAndroid();
+}
+
bool Linux::IsMathErrnoDefault() const {
if (getTriple().isAndroid())
return false;
@@ -993,6 +1001,8 @@ SanitizerMask Linux::getSupportedSanitizers() const {
getTriple().getArch() == llvm::Triple::thumbeb;
SanitizerMask Res = ToolChain::getSupportedSanitizers();
Res |= SanitizerKind::Address;
+ Res |= SanitizerKind::PointerCompare;
+ Res |= SanitizerKind::PointerSubtract;
Res |= SanitizerKind::Fuzzer;
Res |= SanitizerKind::FuzzerNoLink;
Res |= SanitizerKind::KernelAddress;
@@ -1007,8 +1017,6 @@ SanitizerMask Linux::getSupportedSanitizers() const {
Res |= SanitizerKind::Thread;
if (IsX86_64)
Res |= SanitizerKind::KernelMemory;
- if (IsX86_64 || IsMIPS64)
- Res |= SanitizerKind::Efficiency;
if (IsX86 || IsX86_64)
Res |= SanitizerKind::Function;
if (IsX86_64 || IsMIPS64 || IsAArch64 || IsX86 || IsMIPS || IsArmArch ||
@@ -1018,6 +1026,8 @@ SanitizerMask Linux::getSupportedSanitizers() const {
Res |= SanitizerKind::HWAddress;
Res |= SanitizerKind::KernelHWAddress;
}
+ if (IsAArch64)
+ Res |= SanitizerKind::MemTag;
return Res;
}
@@ -1027,7 +1037,8 @@ void Linux::addProfileRTLibs(const llvm::opt::ArgList &Args,
// Add linker option -u__llvm_runtime_variable to cause runtime
// initialization module to be linked in.
- if ((!Args.hasArg(options::OPT_coverage)) && (!Args.hasArg(options::OPT_ftest_coverage)))
+ if ((!Args.hasArg(options::OPT_coverage)) &&
+ (!Args.hasArg(options::OPT_ftest_coverage)))
CmdArgs.push_back(Args.MakeArgString(
Twine("-u", llvm::getInstrProfRuntimeHookVarName())));
ToolChain::addProfileRTLibs(Args, CmdArgs);
diff --git a/lib/Driver/ToolChains/Linux.h b/lib/Driver/ToolChains/Linux.h
index 4a662cb4b427..4c61994691c7 100644
--- a/lib/Driver/ToolChains/Linux.h
+++ b/lib/Driver/ToolChains/Linux.h
@@ -1,9 +1,8 @@
//===--- Linux.h - Linux ToolChain Implementations --------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -39,6 +38,7 @@ public:
llvm::opt::ArgStringList &CC1Args) const override;
CXXStdlibType GetDefaultCXXStdlibType() const override;
bool isPIEDefault() const override;
+ bool isNoExecStackDefault() const override;
bool IsMathErrnoDefault() const override;
SanitizerMask getSupportedSanitizers() const override;
void addProfileRTLibs(const llvm::opt::ArgList &Args,
diff --git a/lib/Driver/ToolChains/MSP430.cpp b/lib/Driver/ToolChains/MSP430.cpp
index b2ff88dbd021..fc6048f17d78 100644
--- a/lib/Driver/ToolChains/MSP430.cpp
+++ b/lib/Driver/ToolChains/MSP430.cpp
@@ -1,9 +1,8 @@
//===--- MSP430.cpp - MSP430 Helpers for Tools ------------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Driver/ToolChains/MSP430.h b/lib/Driver/ToolChains/MSP430.h
index 0fdceb75b963..b5308a8dd687 100644
--- a/lib/Driver/ToolChains/MSP430.h
+++ b/lib/Driver/ToolChains/MSP430.h
@@ -1,9 +1,8 @@
//===--- MSP430.h - MSP430-specific Tool Helpers ----------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -37,6 +36,10 @@ public:
llvm::opt::ArgStringList &CC1Args,
Action::OffloadKind) const override;
+ bool isPICDefault() const override { return false; }
+ bool isPIEDefault() const override { return false; }
+ bool isPICDefaultForced() const override { return true; }
+
protected:
Tool *buildLinker() const override;
diff --git a/lib/Driver/ToolChains/MSVC.cpp b/lib/Driver/ToolChains/MSVC.cpp
index 7e34b0df5c8c..6ed80a8f4752 100644
--- a/lib/Driver/ToolChains/MSVC.cpp
+++ b/lib/Driver/ToolChains/MSVC.cpp
@@ -1,9 +1,8 @@
-//===--- ToolChains.cpp - ToolChain Implementations -----------------------===//
+//===-- MSVC.cpp - MSVC ToolChain Implementations -------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -378,7 +377,7 @@ void visualstudio::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (!Args.hasArg(options::OPT_shared))
CmdArgs.push_back(
Args.MakeArgString(std::string("-wholearchive:") +
- TC.getCompilerRTArgString(Args, "fuzzer", false)));
+ TC.getCompilerRTArgString(Args, "fuzzer")));
CmdArgs.push_back(Args.MakeArgString("-debug"));
// Prevent the linker from padding sections we use for instrumentation
// arrays.
@@ -489,15 +488,25 @@ void visualstudio::Linker::ConstructJob(Compilation &C, const JobAction &JA,
// their own link.exe which may come first.
linkPath = FindVisualStudioExecutable(TC, "link.exe");
- if (!TC.FoundMSVCInstall() && !llvm::sys::fs::can_execute(linkPath))
- C.getDriver().Diag(clang::diag::warn_drv_msvc_not_found);
+ if (!TC.FoundMSVCInstall() && !llvm::sys::fs::can_execute(linkPath)) {
+ llvm::SmallString<128> ClPath;
+ ClPath = TC.GetProgramPath("cl.exe");
+ if (llvm::sys::fs::can_execute(ClPath)) {
+ linkPath = llvm::sys::path::parent_path(ClPath);
+ llvm::sys::path::append(linkPath, "link.exe");
+ if (!llvm::sys::fs::can_execute(linkPath))
+ C.getDriver().Diag(clang::diag::warn_drv_msvc_not_found);
+ } else {
+ C.getDriver().Diag(clang::diag::warn_drv_msvc_not_found);
+ }
+ }
#ifdef _WIN32
// When cross-compiling with VS2017 or newer, link.exe expects to have
// its containing bin directory at the top of PATH, followed by the
// native target bin directory.
// e.g. when compiling for x86 on an x64 host, PATH should start with:
- // /bin/HostX64/x86;/bin/HostX64/x64
+ // /bin/Hostx64/x86;/bin/Hostx64/x64
// This doesn't attempt to handle ToolsetLayout::DevDivInternal.
if (TC.getIsVS2017OrNewer() &&
llvm::Triple(llvm::sys::getProcessTriple()).getArch() != TC.getArch()) {
@@ -617,11 +626,11 @@ std::unique_ptr<Command> visualstudio::Compiler::GetCommand(
// FIXME: How can we ensure this stays in sync with relevant clang-cl options?
if (Args.hasFlag(options::OPT__SLASH_GR_, options::OPT__SLASH_GR,
- /*default=*/false))
+ /*Default=*/false))
CmdArgs.push_back("/GR-");
if (Args.hasFlag(options::OPT__SLASH_GS_, options::OPT__SLASH_GS,
- /*default=*/false))
+ /*Default=*/false))
CmdArgs.push_back("/GS-");
if (Arg *A = Args.getLastArg(options::OPT_ffunction_sections,
@@ -839,7 +848,7 @@ MSVCToolChain::getSubDirectoryPath(SubDirectoryType Type,
if (VSLayout == ToolsetLayout::VS2017OrNewer) {
const bool HostIsX64 =
llvm::Triple(llvm::sys::getProcessTriple()).isArch64Bit();
- const char *const HostName = HostIsX64 ? "HostX64" : "HostX86";
+ const char *const HostName = HostIsX64 ? "Hostx64" : "Hostx86";
llvm::sys::path::append(Path, "bin", HostName, SubdirName);
} else { // OlderVS or DevDivInternal
llvm::sys::path::append(Path, "bin", SubdirName);
@@ -1318,6 +1327,8 @@ MSVCToolChain::ComputeEffectiveClangTriple(const ArgList &Args,
SanitizerMask MSVCToolChain::getSupportedSanitizers() const {
SanitizerMask Res = ToolChain::getSupportedSanitizers();
Res |= SanitizerKind::Address;
+ Res |= SanitizerKind::PointerCompare;
+ Res |= SanitizerKind::PointerSubtract;
Res |= SanitizerKind::Fuzzer;
Res |= SanitizerKind::FuzzerNoLink;
Res &= ~SanitizerKind::CFIMFCall;
@@ -1408,10 +1419,10 @@ static void TranslateOptArg(Arg *A, llvm::opt::DerivedArgList &DAL,
DAL.AddFlagArg(
A, Opts.getOption(options::OPT_fno_omit_frame_pointer));
} else {
- // Don't warn about /Oy- in 64-bit builds (where
+ // Don't warn about /Oy- in x86-64 builds (where
// SupportsForcingFramePointer is false). The flag having no effect
// there is a compiler-internal optimization, and people shouldn't have
- // to special-case their build files for 64-bit clang-cl.
+ // to special-case their build files for x86-64 clang-cl.
A->claim();
}
break;
@@ -1442,8 +1453,8 @@ MSVCToolChain::TranslateArgs(const llvm::opt::DerivedArgList &Args,
DerivedArgList *DAL = new DerivedArgList(Args.getBaseArgs());
const OptTable &Opts = getDriver().getOpts();
- // /Oy and /Oy- only has an effect under X86-32.
- bool SupportsForcingFramePointer = getArch() == llvm::Triple::x86;
+ // /Oy and /Oy- don't have an effect on X86-64
+ bool SupportsForcingFramePointer = getArch() != llvm::Triple::x86_64;
// The -O[12xd] flag actually expands to several flags. We must desugar the
// flags so that options embedded can be negated. For example, the '-O2' flag
diff --git a/lib/Driver/ToolChains/MSVC.h b/lib/Driver/ToolChains/MSVC.h
index ebca0018bb85..aba9417c9727 100644
--- a/lib/Driver/ToolChains/MSVC.h
+++ b/lib/Driver/ToolChains/MSVC.h
@@ -1,9 +1,8 @@
//===--- MSVC.h - MSVC ToolChain Implementations ----------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Driver/ToolChains/MinGW.cpp b/lib/Driver/ToolChains/MinGW.cpp
index 2d5217d03d3a..0e1873cce25b 100644
--- a/lib/Driver/ToolChains/MinGW.cpp
+++ b/lib/Driver/ToolChains/MinGW.cpp
@@ -1,9 +1,8 @@
//===--- MinGW.cpp - MinGWToolChain Implementation ------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -54,7 +53,7 @@ void tools::MinGW::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasArg(options::OPT_gsplit_dwarf))
SplitDebugInfo(getToolChain(), C, *this, JA, Args, Output,
- SplitDebugName(Args, Output));
+ SplitDebugName(Args, Inputs[0], Output));
}
void tools::MinGW::Linker::AddLibGCC(const ArgList &Args,
@@ -249,22 +248,24 @@ void tools::MinGW::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (Sanitize.needsAsanRt()) {
// MinGW always links against a shared MSVCRT.
- CmdArgs.push_back(
- TC.getCompilerRTArgString(Args, "asan_dynamic", true));
+ CmdArgs.push_back(TC.getCompilerRTArgString(Args, "asan_dynamic",
+ ToolChain::FT_Shared));
CmdArgs.push_back(
TC.getCompilerRTArgString(Args, "asan_dynamic_runtime_thunk"));
- CmdArgs.push_back(Args.MakeArgString("--require-defined"));
- CmdArgs.push_back(Args.MakeArgString(TC.getArch() == llvm::Triple::x86
- ? "___asan_seh_interceptor"
- : "__asan_seh_interceptor"));
+ CmdArgs.push_back("--require-defined");
+ CmdArgs.push_back(TC.getArch() == llvm::Triple::x86
+ ? "___asan_seh_interceptor"
+ : "__asan_seh_interceptor");
// Make sure the linker consider all object files from the dynamic
// runtime thunk.
- CmdArgs.push_back(Args.MakeArgString("--whole-archive"));
- CmdArgs.push_back(Args.MakeArgString(
- TC.getCompilerRT(Args, "asan_dynamic_runtime_thunk")));
- CmdArgs.push_back(Args.MakeArgString("--no-whole-archive"));
+ CmdArgs.push_back("--whole-archive");
+ CmdArgs.push_back(
+ TC.getCompilerRTArgString(Args, "asan_dynamic_runtime_thunk"));
+ CmdArgs.push_back("--no-whole-archive");
}
+ TC.addProfileRTLibs(Args, CmdArgs);
+
if (!HasWindowsApp) {
// Add system libraries. If linking to libwindowsapp.a, that import
// library replaces all these and we shouldn't accidentally try to
@@ -435,7 +436,8 @@ bool toolchains::MinGW::IsUnwindTablesDefault(const ArgList &Args) const {
if (ExceptionArg &&
ExceptionArg->getOption().matches(options::OPT_fseh_exceptions))
return true;
- return getArch() == llvm::Triple::x86_64;
+ return getArch() == llvm::Triple::x86_64 ||
+ getArch() == llvm::Triple::aarch64;
}
bool toolchains::MinGW::isPICDefault() const {
@@ -450,7 +452,7 @@ bool toolchains::MinGW::isPICDefaultForced() const {
llvm::ExceptionHandling
toolchains::MinGW::GetExceptionModel(const ArgList &Args) const {
- if (getArch() == llvm::Triple::x86_64)
+ if (getArch() == llvm::Triple::x86_64 || getArch() == llvm::Triple::aarch64)
return llvm::ExceptionHandling::WinEH;
return llvm::ExceptionHandling::DwarfCFI;
}
@@ -458,6 +460,8 @@ toolchains::MinGW::GetExceptionModel(const ArgList &Args) const {
SanitizerMask toolchains::MinGW::getSupportedSanitizers() const {
SanitizerMask Res = ToolChain::getSupportedSanitizers();
Res |= SanitizerKind::Address;
+ Res |= SanitizerKind::PointerCompare;
+ Res |= SanitizerKind::PointerSubtract;
return Res;
}
diff --git a/lib/Driver/ToolChains/MinGW.h b/lib/Driver/ToolChains/MinGW.h
index 04d23006ee75..08298e910ebb 100644
--- a/lib/Driver/ToolChains/MinGW.h
+++ b/lib/Driver/ToolChains/MinGW.h
@@ -1,9 +1,8 @@
//===--- MinGW.h - MinGW ToolChain Implementations --------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Driver/ToolChains/Minix.cpp b/lib/Driver/ToolChains/Minix.cpp
index 7fadcb129d46..dbcc1f8908c4 100644
--- a/lib/Driver/ToolChains/Minix.cpp
+++ b/lib/Driver/ToolChains/Minix.cpp
@@ -1,9 +1,8 @@
//===--- Minix.cpp - Minix ToolChain Implementations ------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Driver/ToolChains/Minix.h b/lib/Driver/ToolChains/Minix.h
index 6fd71850ad06..1ed6acebab9c 100644
--- a/lib/Driver/ToolChains/Minix.h
+++ b/lib/Driver/ToolChains/Minix.h
@@ -1,9 +1,8 @@
//===--- Minix.h - Minix ToolChain Implementations --------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Driver/ToolChains/MipsLinux.cpp b/lib/Driver/ToolChains/MipsLinux.cpp
index 9f23996b764a..cfda7f4bb4df 100644
--- a/lib/Driver/ToolChains/MipsLinux.cpp
+++ b/lib/Driver/ToolChains/MipsLinux.cpp
@@ -1,9 +1,8 @@
-//===--- Mips.cpp - Mips ToolChain Implementations --------------*- C++ -*-===//
+//===-- MipsLinux.cpp - Mips ToolChain Implementations ----------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -119,11 +118,23 @@ void MipsLLVMToolChain::AddCXXStdlibLibArgs(const ArgList &Args,
std::string MipsLLVMToolChain::getCompilerRT(const ArgList &Args,
StringRef Component,
- bool Shared) const {
+ FileType Type) const {
SmallString<128> Path(getDriver().ResourceDir);
llvm::sys::path::append(Path, SelectedMultilib.osSuffix(), "lib" + LibSuffix,
getOS());
- llvm::sys::path::append(Path, Twine("libclang_rt." + Component + "-" +
- "mips" + (Shared ? ".so" : ".a")));
+ const char *Suffix;
+ switch (Type) {
+ case ToolChain::FT_Object:
+ Suffix = ".o";
+ break;
+ case ToolChain::FT_Static:
+ Suffix = ".a";
+ break;
+ case ToolChain::FT_Shared:
+ Suffix = ".so";
+ break;
+ }
+ llvm::sys::path::append(
+ Path, Twine("libclang_rt." + Component + "-" + "mips" + Suffix));
return Path.str();
}
diff --git a/lib/Driver/ToolChains/MipsLinux.h b/lib/Driver/ToolChains/MipsLinux.h
index edf58a62b95c..31b547c0063c 100644
--- a/lib/Driver/ToolChains/MipsLinux.h
+++ b/lib/Driver/ToolChains/MipsLinux.h
@@ -1,9 +1,8 @@
//===--- Mips.h - Mips ToolChain Implementations ----------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -38,8 +37,9 @@ public:
void AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const override;
- std::string getCompilerRT(const llvm::opt::ArgList &Args, StringRef Component,
- bool Shared = false) const override;
+ std::string
+ getCompilerRT(const llvm::opt::ArgList &Args, StringRef Component,
+ FileType Type = ToolChain::FT_Static) const override;
std::string computeSysRoot() const override;
diff --git a/lib/Driver/ToolChains/Myriad.cpp b/lib/Driver/ToolChains/Myriad.cpp
index 2b4c1d165576..16eea1f13b14 100644
--- a/lib/Driver/ToolChains/Myriad.cpp
+++ b/lib/Driver/ToolChains/Myriad.cpp
@@ -1,9 +1,8 @@
//===--- Myriad.cpp - Myriad ToolChain Implementations ----------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Driver/ToolChains/Myriad.h b/lib/Driver/ToolChains/Myriad.h
index 33307c3f871a..9f5225fbc62c 100644
--- a/lib/Driver/ToolChains/Myriad.h
+++ b/lib/Driver/ToolChains/Myriad.h
@@ -1,9 +1,8 @@
//===--- Myriad.h - Myriad ToolChain Implementations ------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Driver/ToolChains/NaCl.cpp b/lib/Driver/ToolChains/NaCl.cpp
index 89a18944c319..984afc1758b1 100644
--- a/lib/Driver/ToolChains/NaCl.cpp
+++ b/lib/Driver/ToolChains/NaCl.cpp
@@ -1,9 +1,8 @@
//===--- NaCl.cpp - Native Client ToolChain Implementations -----*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Driver/ToolChains/NaCl.h b/lib/Driver/ToolChains/NaCl.h
index e0885b526d70..ab243f8087bb 100644
--- a/lib/Driver/ToolChains/NaCl.h
+++ b/lib/Driver/ToolChains/NaCl.h
@@ -1,9 +1,8 @@
//===--- NaCl.h - Native Client ToolChain Implementations -------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Driver/ToolChains/NetBSD.cpp b/lib/Driver/ToolChains/NetBSD.cpp
index b1321cacaf7a..3219a5d1e4f4 100644
--- a/lib/Driver/ToolChains/NetBSD.cpp
+++ b/lib/Driver/ToolChains/NetBSD.cpp
@@ -1,9 +1,8 @@
//===--- NetBSD.cpp - NetBSD ToolChain Implementations ----------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -17,6 +16,7 @@
#include "clang/Driver/Options.h"
#include "clang/Driver/SanitizerArgs.h"
#include "llvm/Option/ArgList.h"
+#include "llvm/Support/VirtualFileSystem.h"
using namespace clang::driver;
using namespace clang::driver::tools;
@@ -256,6 +256,13 @@ void netbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
bool NeedsXRayDeps = addXRayRuntime(ToolChain, Args, CmdArgs);
AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs, JA);
+ const SanitizerArgs &SanArgs = ToolChain.getSanitizerArgs();
+ if (SanArgs.needsSharedRt()) {
+ CmdArgs.push_back("-rpath");
+ CmdArgs.push_back(Args.MakeArgString(
+ ToolChain.getCompilerRTPath().c_str()));
+ }
+
unsigned Major, Minor, Micro;
ToolChain.getTriple().getOSVersion(Major, Minor, Micro);
bool useLibgcc = true;
@@ -416,8 +423,23 @@ ToolChain::CXXStdlibType NetBSD::GetDefaultCXXStdlibType() const {
void NetBSD::addLibCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const {
- addSystemInclude(DriverArgs, CC1Args,
- getDriver().SysRoot + "/usr/include/c++/");
+ const std::string Candidates[] = {
+ // directory relative to build tree
+ getDriver().Dir + "/../include/c++/v1",
+ // system install with full upstream path
+ getDriver().SysRoot + "/usr/include/c++/v1",
+ // system install from src
+ getDriver().SysRoot + "/usr/include/c++",
+ };
+
+ for (const auto &IncludePath : Candidates) {
+ if (!getVFS().exists(IncludePath + "/__config"))
+ continue;
+
+ // Use the first candidate that looks valid.
+ addSystemInclude(DriverArgs, CC1Args, IncludePath);
+ return;
+ }
}
void NetBSD::addLibStdCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
@@ -441,6 +463,8 @@ SanitizerMask NetBSD::getSupportedSanitizers() const {
SanitizerMask Res = ToolChain::getSupportedSanitizers();
if (IsX86 || IsX86_64) {
Res |= SanitizerKind::Address;
+ Res |= SanitizerKind::PointerCompare;
+ Res |= SanitizerKind::PointerSubtract;
Res |= SanitizerKind::Function;
Res |= SanitizerKind::Leak;
Res |= SanitizerKind::SafeStack;
@@ -449,7 +473,6 @@ SanitizerMask NetBSD::getSupportedSanitizers() const {
}
if (IsX86_64) {
Res |= SanitizerKind::DataFlow;
- Res |= SanitizerKind::Efficiency;
Res |= SanitizerKind::Fuzzer;
Res |= SanitizerKind::FuzzerNoLink;
Res |= SanitizerKind::HWAddress;
diff --git a/lib/Driver/ToolChains/NetBSD.h b/lib/Driver/ToolChains/NetBSD.h
index ae0865fd6573..6d404263f625 100644
--- a/lib/Driver/ToolChains/NetBSD.h
+++ b/lib/Driver/ToolChains/NetBSD.h
@@ -1,9 +1,8 @@
//===--- NetBSD.h - NetBSD ToolChain Implementations ------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Driver/ToolChains/OpenBSD.cpp b/lib/Driver/ToolChains/OpenBSD.cpp
index 3d35d37b7db3..8441b83c29a9 100644
--- a/lib/Driver/ToolChains/OpenBSD.cpp
+++ b/lib/Driver/ToolChains/OpenBSD.cpp
@@ -1,9 +1,8 @@
//===--- OpenBSD.cpp - OpenBSD ToolChain Implementations --------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -189,11 +188,11 @@ void openbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-lm");
}
if (NeedsSanitizerDeps) {
- CmdArgs.push_back(ToolChain.getCompilerRTArgString(Args, "builtins", false));
+ CmdArgs.push_back(ToolChain.getCompilerRTArgString(Args, "builtins"));
linkSanitizerRuntimeDeps(ToolChain, CmdArgs);
}
if (NeedsXRayDeps) {
- CmdArgs.push_back(ToolChain.getCompilerRTArgString(Args, "builtins", false));
+ CmdArgs.push_back(ToolChain.getCompilerRTArgString(Args, "builtins"));
linkXRayRuntimeDeps(ToolChain, CmdArgs);
}
// FIXME: For some reason GCC passes -lgcc before adding
@@ -227,9 +226,7 @@ void openbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crtend)));
}
- const char *Exec = Args.MakeArgString(
- !NeedsSanitizerDeps ? ToolChain.GetLinkerPath()
- : ToolChain.GetProgramPath("ld.lld"));
+ const char *Exec = Args.MakeArgString(ToolChain.GetLinkerPath());
C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
}
diff --git a/lib/Driver/ToolChains/OpenBSD.h b/lib/Driver/ToolChains/OpenBSD.h
index 1912abdb95bc..c92d109b7c16 100644
--- a/lib/Driver/ToolChains/OpenBSD.h
+++ b/lib/Driver/ToolChains/OpenBSD.h
@@ -1,9 +1,8 @@
//===--- OpenBSD.h - OpenBSD ToolChain Implementations ----------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Driver/ToolChains/PPCLinux.cpp b/lib/Driver/ToolChains/PPCLinux.cpp
new file mode 100644
index 000000000000..5221e5d0e22c
--- /dev/null
+++ b/lib/Driver/ToolChains/PPCLinux.cpp
@@ -0,0 +1,31 @@
+//===-- PPCLinux.cpp - PowerPC ToolChain Implementations --------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "PPCLinux.h"
+#include "clang/Driver/Driver.h"
+#include "clang/Driver/Options.h"
+#include "llvm/Support/Path.h"
+
+using namespace clang::driver::toolchains;
+using namespace llvm::opt;
+
+void PPCLinuxToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ // PPC wrapper headers are implementation of x86 intrinsics on PowerPC, which
+ // is not supported on PPC32 platform.
+ if (getArch() != llvm::Triple::ppc &&
+ !DriverArgs.hasArg(clang::driver::options::OPT_nostdinc) &&
+ !DriverArgs.hasArg(options::OPT_nobuiltininc)) {
+ const Driver &D = getDriver();
+ SmallString<128> P(D.ResourceDir);
+ llvm::sys::path::append(P, "include", "ppc_wrappers");
+ addSystemInclude(DriverArgs, CC1Args, P);
+ }
+
+ Linux::AddClangSystemIncludeArgs(DriverArgs, CC1Args);
+}
diff --git a/lib/Driver/ToolChains/PPCLinux.h b/lib/Driver/ToolChains/PPCLinux.h
new file mode 100644
index 000000000000..b3ef7b61dc3a
--- /dev/null
+++ b/lib/Driver/ToolChains/PPCLinux.h
@@ -0,0 +1,33 @@
+//===--- PPCLinux.h - PowerPC ToolChain Implementations ---------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_PPC_LINUX_H
+#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_PPC_LINUX_H
+
+#include "Linux.h"
+
+namespace clang {
+namespace driver {
+namespace toolchains {
+
+class LLVM_LIBRARY_VISIBILITY PPCLinuxToolChain : public Linux {
+public:
+ PPCLinuxToolChain(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args)
+ : Linux(D, Triple, Args) {}
+
+ void
+ AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+};
+
+} // end namespace toolchains
+} // end namespace driver
+} // end namespace clang
+
+#endif // LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_PPC_LINUX_H
diff --git a/lib/Driver/ToolChains/PS4CPU.cpp b/lib/Driver/ToolChains/PS4CPU.cpp
index 0708d25fe45c..7be471365668 100644
--- a/lib/Driver/ToolChains/PS4CPU.cpp
+++ b/lib/Driver/ToolChains/PS4CPU.cpp
@@ -1,9 +1,8 @@
//===--- PS4CPU.cpp - PS4CPU ToolChain Implementations ----------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -426,6 +425,8 @@ bool toolchains::PS4CPU::HasNativeLLVMSupport() const { return true; }
SanitizerMask toolchains::PS4CPU::getSupportedSanitizers() const {
SanitizerMask Res = ToolChain::getSupportedSanitizers();
Res |= SanitizerKind::Address;
+ Res |= SanitizerKind::PointerCompare;
+ Res |= SanitizerKind::PointerSubtract;
Res |= SanitizerKind::Vptr;
return Res;
}
diff --git a/lib/Driver/ToolChains/PS4CPU.h b/lib/Driver/ToolChains/PS4CPU.h
index bd0a44352f4d..e9f0891c1194 100644
--- a/lib/Driver/ToolChains/PS4CPU.h
+++ b/lib/Driver/ToolChains/PS4CPU.h
@@ -1,9 +1,8 @@
//===--- PS4CPU.h - PS4CPU ToolChain Implementations ------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Driver/ToolChains/RISCVToolchain.cpp b/lib/Driver/ToolChains/RISCVToolchain.cpp
index e787c82b28a8..c5fdd129c3a8 100644
--- a/lib/Driver/ToolChains/RISCVToolchain.cpp
+++ b/lib/Driver/ToolChains/RISCVToolchain.cpp
@@ -1,9 +1,8 @@
//===--- RISCVToolchain.cpp - RISCV ToolChain Implementations ---*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Driver/ToolChains/RISCVToolchain.h b/lib/Driver/ToolChains/RISCVToolchain.h
index 4b38690b1b61..b2b56b066efd 100644
--- a/lib/Driver/ToolChains/RISCVToolchain.h
+++ b/lib/Driver/ToolChains/RISCVToolchain.h
@@ -1,9 +1,8 @@
//===--- RISCVToolchain.h - RISCV ToolChain Implementations -----*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Driver/ToolChains/Solaris.cpp b/lib/Driver/ToolChains/Solaris.cpp
index b48edbb08ee6..38f24d4cf7e7 100644
--- a/lib/Driver/ToolChains/Solaris.cpp
+++ b/lib/Driver/ToolChains/Solaris.cpp
@@ -1,9 +1,8 @@
//===--- Solaris.cpp - Solaris ToolChain Implementations --------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -66,10 +65,6 @@ void solaris::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-Bdynamic");
if (Args.hasArg(options::OPT_shared)) {
CmdArgs.push_back("-shared");
- } else {
- CmdArgs.push_back("--dynamic-linker");
- CmdArgs.push_back(
- Args.MakeArgString(getToolChain().GetFilePath("ld.so.1")));
}
// libpthread has been folded into libc since Solaris 10, no need to do
@@ -97,13 +92,6 @@ void solaris::Linker::ConstructJob(Compilation &C, const JobAction &JA,
Args.MakeArgString(getToolChain().GetFilePath("crtbegin.o")));
}
- // Provide __start___sancov_guards. Solaris ld doesn't automatically create
- // __start_SECNAME labels.
- CmdArgs.push_back("--whole-archive");
- CmdArgs.push_back(
- getToolChain().getCompilerRTArgString(Args, "sancov_begin", false));
- CmdArgs.push_back("--no-whole-archive");
-
getToolChain().AddFilePathLibArgs(Args, CmdArgs);
Args.AddAllArgs(CmdArgs, {options::OPT_L, options::OPT_T_Group,
@@ -132,13 +120,6 @@ void solaris::Linker::ConstructJob(Compilation &C, const JobAction &JA,
linkSanitizerRuntimeDeps(getToolChain(), CmdArgs);
}
- // Provide __stop___sancov_guards. Solaris ld doesn't automatically create
- // __stop_SECNAME labels.
- CmdArgs.push_back("--whole-archive");
- CmdArgs.push_back(
- getToolChain().getCompilerRTArgString(Args, "sancov_end", false));
- CmdArgs.push_back("--no-whole-archive");
-
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
CmdArgs.push_back(
Args.MakeArgString(getToolChain().GetFilePath("crtend.o")));
@@ -200,6 +181,8 @@ SanitizerMask Solaris::getSupportedSanitizers() const {
// FIXME: Omit X86_64 until 64-bit support is figured out.
if (IsX86) {
Res |= SanitizerKind::Address;
+ Res |= SanitizerKind::PointerCompare;
+ Res |= SanitizerKind::PointerSubtract;
}
Res |= SanitizerKind::Vptr;
return Res;
diff --git a/lib/Driver/ToolChains/Solaris.h b/lib/Driver/ToolChains/Solaris.h
index 4d9c828b5c6b..b79e626ef38d 100644
--- a/lib/Driver/ToolChains/Solaris.h
+++ b/lib/Driver/ToolChains/Solaris.h
@@ -1,9 +1,8 @@
//===--- Solaris.h - Solaris ToolChain Implementations ----------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Driver/ToolChains/TCE.cpp b/lib/Driver/ToolChains/TCE.cpp
index ae8a1c806485..33a81c54bd42 100644
--- a/lib/Driver/ToolChains/TCE.cpp
+++ b/lib/Driver/ToolChains/TCE.cpp
@@ -1,9 +1,8 @@
//===--- TCE.cpp - TCE ToolChain Implementations ----------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Driver/ToolChains/TCE.h b/lib/Driver/ToolChains/TCE.h
index 4644f4eedb0e..72933dae965e 100644
--- a/lib/Driver/ToolChains/TCE.h
+++ b/lib/Driver/ToolChains/TCE.h
@@ -1,9 +1,8 @@
//===--- TCE.h - TCE Tool and ToolChain Implementations ---------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Driver/ToolChains/WebAssembly.cpp b/lib/Driver/ToolChains/WebAssembly.cpp
index 6310d5fabaec..7a40c13c065a 100644
--- a/lib/Driver/ToolChains/WebAssembly.cpp
+++ b/lib/Driver/ToolChains/WebAssembly.cpp
@@ -1,18 +1,20 @@
//===--- WebAssembly.cpp - WebAssembly ToolChain Implementation -*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "WebAssembly.h"
#include "CommonArgs.h"
+#include "clang/Config/config.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/Options.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Path.h"
#include "llvm/Option/ArgList.h"
using namespace clang::driver;
@@ -21,9 +23,6 @@ using namespace clang::driver::toolchains;
using namespace clang;
using namespace llvm::opt;
-wasm::Linker::Linker(const ToolChain &TC)
- : GnuTool("wasm::Linker", "lld", TC) {}
-
/// Following the conventions in https://wiki.debian.org/Multiarch/Tuples,
/// we remove the vendor field to form the multiarch triple.
static std::string getMultiarchTriple(const Driver &D,
@@ -33,9 +32,24 @@ static std::string getMultiarchTriple(const Driver &D,
TargetTriple.getOSAndEnvironmentName()).str();
}
-bool wasm::Linker::isLinkJob() const { return true; }
+std::string wasm::Linker::getLinkerPath(const ArgList &Args) const {
+ const ToolChain &ToolChain = getToolChain();
+ if (const Arg* A = Args.getLastArg(options::OPT_fuse_ld_EQ)) {
+ StringRef UseLinker = A->getValue();
+ if (!UseLinker.empty()) {
+ if (llvm::sys::path::is_absolute(UseLinker) &&
+ llvm::sys::fs::can_execute(UseLinker))
+ return UseLinker;
+
+ // Accept 'lld', and 'ld' as aliases for the default linker
+ if (UseLinker != "lld" && UseLinker != "ld")
+ ToolChain.getDriver().Diag(diag::err_drv_invalid_linker_name)
+ << A->getAsString(Args);
+ }
+ }
-bool wasm::Linker::hasIntegratedCPP() const { return false; }
+ return ToolChain.GetProgramPath(ToolChain.getDefaultLinker());
+}
void wasm::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output,
@@ -44,7 +58,7 @@ void wasm::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const char *LinkingOutput) const {
const ToolChain &ToolChain = getToolChain();
- const char *Linker = Args.MakeArgString(ToolChain.GetLinkerPath());
+ const char *Linker = Args.MakeArgString(getLinkerPath(Args));
ArgStringList CmdArgs;
if (Args.hasArg(options::OPT_s))
@@ -63,8 +77,10 @@ void wasm::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (ToolChain.ShouldLinkCXXStdlib(Args))
ToolChain.AddCXXStdlibLibArgs(Args, CmdArgs);
- if (Args.hasArg(options::OPT_pthread))
+ if (Args.hasArg(options::OPT_pthread)) {
CmdArgs.push_back("-lpthread");
+ CmdArgs.push_back("--shared-memory");
+ }
CmdArgs.push_back("-lc");
AddRunTimeLibs(ToolChain, ToolChain.getDriver(), CmdArgs, Args);
@@ -124,6 +140,32 @@ void WebAssembly::addClangTargetOptions(const ArgList &DriverArgs,
if (DriverArgs.hasFlag(clang::driver::options::OPT_fuse_init_array,
options::OPT_fno_use_init_array, true))
CC1Args.push_back("-fuse-init-array");
+
+ // '-pthread' implies atomics, bulk-memory, and mutable-globals
+ if (DriverArgs.hasFlag(options::OPT_pthread, options::OPT_no_pthread,
+ false)) {
+ if (DriverArgs.hasFlag(options::OPT_mno_atomics, options::OPT_matomics,
+ false))
+ getDriver().Diag(diag::err_drv_argument_not_allowed_with)
+ << "-pthread"
+ << "-mno-atomics";
+ if (DriverArgs.hasFlag(options::OPT_mno_bulk_memory,
+ options::OPT_mbulk_memory, false))
+ getDriver().Diag(diag::err_drv_argument_not_allowed_with)
+ << "-pthread"
+ << "-mno-bulk-memory";
+ if (DriverArgs.hasFlag(options::OPT_mno_mutable_globals,
+ options::OPT_mmutable_globals, false))
+ getDriver().Diag(diag::err_drv_argument_not_allowed_with)
+ << "-pthread"
+ << "-mno-mutable-globals";
+ CC1Args.push_back("-target-feature");
+ CC1Args.push_back("+atomics");
+ CC1Args.push_back("-target-feature");
+ CC1Args.push_back("+bulk-memory");
+ CC1Args.push_back("-target-feature");
+ CC1Args.push_back("+mutable-globals");
+ }
}
ToolChain::RuntimeLibType WebAssembly::GetDefaultRuntimeLibType() const {
@@ -143,14 +185,39 @@ WebAssembly::GetCXXStdlibType(const ArgList &Args) const {
void WebAssembly::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {
- if (!DriverArgs.hasArg(options::OPT_nostdinc)) {
- if (getTriple().getOS() != llvm::Triple::UnknownOS) {
- const std::string MultiarchTriple =
- getMultiarchTriple(getDriver(), getTriple(), getDriver().SysRoot);
- addSystemInclude(DriverArgs, CC1Args, getDriver().SysRoot + "/include/" + MultiarchTriple);
+ if (DriverArgs.hasArg(clang::driver::options::OPT_nostdinc))
+ return;
+
+ const Driver &D = getDriver();
+
+ if (!DriverArgs.hasArg(options::OPT_nobuiltininc)) {
+ SmallString<128> P(D.ResourceDir);
+ llvm::sys::path::append(P, "include");
+ addSystemInclude(DriverArgs, CC1Args, P);
+ }
+
+ if (DriverArgs.hasArg(options::OPT_nostdlibinc))
+ return;
+
+ // Check for configure-time C include directories.
+ StringRef CIncludeDirs(C_INCLUDE_DIRS);
+ if (CIncludeDirs != "") {
+ SmallVector<StringRef, 5> dirs;
+ CIncludeDirs.split(dirs, ":");
+ for (StringRef dir : dirs) {
+ StringRef Prefix =
+ llvm::sys::path::is_absolute(dir) ? StringRef(D.SysRoot) : "";
+ addExternCSystemInclude(DriverArgs, CC1Args, Prefix + dir);
}
- addSystemInclude(DriverArgs, CC1Args, getDriver().SysRoot + "/include");
+ return;
}
+
+ if (getTriple().getOS() != llvm::Triple::UnknownOS) {
+ const std::string MultiarchTriple =
+ getMultiarchTriple(D, getTriple(), D.SysRoot);
+ addSystemInclude(DriverArgs, CC1Args, D.SysRoot + "/include/" + MultiarchTriple);
+ }
+ addSystemInclude(DriverArgs, CC1Args, D.SysRoot + "/include");
}
void WebAssembly::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
@@ -161,7 +228,8 @@ void WebAssembly::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
const std::string MultiarchTriple =
getMultiarchTriple(getDriver(), getTriple(), getDriver().SysRoot);
addSystemInclude(DriverArgs, CC1Args,
- getDriver().SysRoot + "/include/" + MultiarchTriple + "/c++/v1");
+ getDriver().SysRoot + "/include/" + MultiarchTriple +
+ "/c++/v1");
}
addSystemInclude(DriverArgs, CC1Args,
getDriver().SysRoot + "/include/c++/v1");
@@ -181,12 +249,12 @@ void WebAssembly::AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
}
}
-std::string WebAssembly::getThreadModel() const {
- // The WebAssembly MVP does not yet support threads; for now, use the
- // "single" threading model, which lowers atomics to non-atomic operations.
- // When threading support is standardized and implemented in popular engines,
- // this override should be removed.
- return "single";
+SanitizerMask WebAssembly::getSupportedSanitizers() const {
+ SanitizerMask Res = ToolChain::getSupportedSanitizers();
+ if (getTriple().isOSEmscripten()) {
+ Res |= SanitizerKind::Vptr | SanitizerKind::Leak | SanitizerKind::Address;
+ }
+ return Res;
}
Tool *WebAssembly::buildLinker() const {
diff --git a/lib/Driver/ToolChains/WebAssembly.h b/lib/Driver/ToolChains/WebAssembly.h
index d795bad90020..67d5fce84576 100644
--- a/lib/Driver/ToolChains/WebAssembly.h
+++ b/lib/Driver/ToolChains/WebAssembly.h
@@ -1,9 +1,8 @@
//===--- WebAssembly.h - WebAssembly ToolChain Implementations --*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -21,9 +20,11 @@ namespace wasm {
class LLVM_LIBRARY_VISIBILITY Linker : public GnuTool {
public:
- explicit Linker(const ToolChain &TC);
- bool isLinkJob() const override;
- bool hasIntegratedCPP() const override;
+ explicit Linker(const ToolChain &TC)
+ : GnuTool("wasm::Linker", "linker", TC) {}
+ bool isLinkJob() const override { return true; }
+ bool hasIntegratedCPP() const override { return false; }
+ std::string getLinkerPath(const llvm::opt::ArgList &Args) const;
void ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output, const InputInfoList &Inputs,
const llvm::opt::ArgList &TCArgs,
@@ -65,7 +66,7 @@ private:
llvm::opt::ArgStringList &CC1Args) const override;
void AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const override;
- std::string getThreadModel() const override;
+ SanitizerMask getSupportedSanitizers() const override;
const char *getDefaultLinker() const override { return "wasm-ld"; }
diff --git a/lib/Driver/ToolChains/XCore.cpp b/lib/Driver/ToolChains/XCore.cpp
index 43175ad7d632..477cdb760914 100644
--- a/lib/Driver/ToolChains/XCore.cpp
+++ b/lib/Driver/ToolChains/XCore.cpp
@@ -1,9 +1,8 @@
//===--- XCore.cpp - XCore ToolChain Implementations ------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Driver/ToolChains/XCore.h b/lib/Driver/ToolChains/XCore.h
index 00c89bd15f78..41dce08454c0 100644
--- a/lib/Driver/ToolChains/XCore.h
+++ b/lib/Driver/ToolChains/XCore.h
@@ -1,9 +1,8 @@
//===--- XCore.h - XCore ToolChain Implementations --------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Driver/Types.cpp b/lib/Driver/Types.cpp
index 9d2737bbc719..96937678ac6b 100644
--- a/lib/Driver/Types.cpp
+++ b/lib/Driver/Types.cpp
@@ -1,9 +1,8 @@
//===--- Types.cpp - Driver input & temporary type information ------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Driver/XRayArgs.cpp b/lib/Driver/XRayArgs.cpp
index 1a48493d7dc7..45ef96e12b3b 100644
--- a/lib/Driver/XRayArgs.cpp
+++ b/lib/Driver/XRayArgs.cpp
@@ -1,9 +1,8 @@
//===--- XRayArgs.cpp - Arguments for XRay --------------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "clang/Driver/XRayArgs.h"
diff --git a/lib/Edit/Commit.cpp b/lib/Edit/Commit.cpp
index e72b13cf8198..7c5aea6e5069 100644
--- a/lib/Edit/Commit.cpp
+++ b/lib/Edit/Commit.cpp
@@ -1,9 +1,8 @@
//===- Commit.cpp - A unit of edits ---------------------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Edit/EditedSource.cpp b/lib/Edit/EditedSource.cpp
index b38f8fd0d9cb..b3bc63a903a7 100644
--- a/lib/Edit/EditedSource.cpp
+++ b/lib/Edit/EditedSource.cpp
@@ -1,9 +1,8 @@
//===- EditedSource.cpp - Collection of source edits ----------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -61,7 +60,7 @@ void EditedSource::finishedCommit() {
MacroArgUse ArgUse;
std::tie(ExpLoc, ArgUse) = ExpArg;
auto &ArgUses = ExpansionToArgMap[ExpLoc.getRawEncoding()];
- if (std::find(ArgUses.begin(), ArgUses.end(), ArgUse) == ArgUses.end())
+ if (llvm::find(ArgUses, ArgUse) == ArgUses.end())
ArgUses.push_back(ArgUse);
}
CurrCommitMacroArgExps.clear();
diff --git a/lib/Edit/RewriteObjCFoundationAPI.cpp b/lib/Edit/RewriteObjCFoundationAPI.cpp
index 7c9ab170093f..6f4a880b649a 100644
--- a/lib/Edit/RewriteObjCFoundationAPI.cpp
+++ b/lib/Edit/RewriteObjCFoundationAPI.cpp
@@ -1,9 +1,8 @@
//===--- RewriteObjCFoundationAPI.cpp - Foundation API Rewriter -----------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -1043,6 +1042,7 @@ static bool rewriteToNumericBoxedExpression(const ObjCMessageExpr *Msg,
case CK_Dependent:
case CK_BitCast:
case CK_LValueBitCast:
+ case CK_LValueToRValueBitCast:
case CK_BaseToDerived:
case CK_DerivedToBase:
case CK_UncheckedDerivedToBase:
@@ -1087,6 +1087,8 @@ static bool rewriteToNumericBoxedExpression(const ObjCMessageExpr *Msg,
case CK_FixedPointCast:
case CK_FixedPointToBoolean:
+ case CK_FixedPointToIntegral:
+ case CK_IntegralToFixedPoint:
llvm_unreachable("Fixed point types are disabled for Objective-C");
}
}
diff --git a/lib/Format/AffectedRangeManager.cpp b/lib/Format/AffectedRangeManager.cpp
index b14316a14cd9..7ad1f7070d0a 100644
--- a/lib/Format/AffectedRangeManager.cpp
+++ b/lib/Format/AffectedRangeManager.cpp
@@ -1,9 +1,8 @@
//===--- AffectedRangeManager.cpp - Format C++ code -----------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
diff --git a/lib/Format/AffectedRangeManager.h b/lib/Format/AffectedRangeManager.h
index b0c9dd259fb8..8cf39443fd41 100644
--- a/lib/Format/AffectedRangeManager.h
+++ b/lib/Format/AffectedRangeManager.h
@@ -1,9 +1,8 @@
//===--- AffectedRangeManager.h - Format C++ code ---------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
diff --git a/lib/Format/BreakableToken.cpp b/lib/Format/BreakableToken.cpp
index e6ce01b520b5..72886ed00736 100644
--- a/lib/Format/BreakableToken.cpp
+++ b/lib/Format/BreakableToken.cpp
@@ -1,9 +1,8 @@
//===--- BreakableToken.cpp - Format C++ code -----------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
@@ -63,12 +62,11 @@ static StringRef getLineCommentIndentPrefix(StringRef Comment,
return LongestPrefix;
}
-static BreakableToken::Split getCommentSplit(StringRef Text,
- unsigned ContentStartColumn,
- unsigned ColumnLimit,
- unsigned TabWidth,
- encoding::Encoding Encoding,
- const FormatStyle &Style) {
+static BreakableToken::Split
+getCommentSplit(StringRef Text, unsigned ContentStartColumn,
+ unsigned ColumnLimit, unsigned TabWidth,
+ encoding::Encoding Encoding, const FormatStyle &Style,
+ bool DecorationEndsWithStar = false) {
LLVM_DEBUG(llvm::dbgs() << "Comment split: \"" << Text
<< "\", Column limit: " << ColumnLimit
<< ", Content start: " << ContentStartColumn << "\n");
@@ -126,7 +124,10 @@ static BreakableToken::Split getCommentSplit(StringRef Text,
if (SpaceOffset == 1 && Text[SpaceOffset - 1] == '*')
return BreakableToken::Split(StringRef::npos, 0);
StringRef BeforeCut = Text.substr(0, SpaceOffset).rtrim(Blanks);
- StringRef AfterCut = Text.substr(SpaceOffset).ltrim(Blanks);
+ StringRef AfterCut = Text.substr(SpaceOffset);
+ // Don't trim the leading blanks if it would create a */ after the break.
+ if (!DecorationEndsWithStar || AfterCut.size() <= 1 || AfterCut[1] != '/')
+ AfterCut = AfterCut.ltrim(Blanks);
return BreakableToken::Split(BeforeCut.size(),
AfterCut.begin() - BeforeCut.end());
}
@@ -192,7 +193,7 @@ bool switchesFormatting(const FormatToken &Token) {
unsigned
BreakableToken::getLengthAfterCompression(unsigned RemainingTokenColumns,
- Split Split) const {
+ Split Split) const {
// Example: consider the content
// lala lala
// - RemainingTokenColumns is the original number of columns, 10;
@@ -332,7 +333,7 @@ static bool mayReflowContent(StringRef Content) {
BreakableBlockComment::BreakableBlockComment(
const FormatToken &Token, unsigned StartColumn,
unsigned OriginalStartColumn, bool FirstInLine, bool InPPDirective,
- encoding::Encoding Encoding, const FormatStyle &Style)
+ encoding::Encoding Encoding, const FormatStyle &Style, bool UseCRLF)
: BreakableComment(Token, StartColumn, InPPDirective, Encoding, Style),
DelimitersOnNewline(false),
UnbreakableTailLength(Token.UnbreakableTailLength) {
@@ -341,7 +342,8 @@ BreakableBlockComment::BreakableBlockComment(
StringRef TokenText(Tok.TokenText);
assert(TokenText.startswith("/*") && TokenText.endswith("*/"));
- TokenText.substr(2, TokenText.size() - 4).split(Lines, "\n");
+ TokenText.substr(2, TokenText.size() - 4).split(Lines,
+ UseCRLF ? "\r\n" : "\n");
int IndentDelta = StartColumn - OriginalStartColumn;
Content.resize(Lines.size());
@@ -454,6 +456,18 @@ BreakableBlockComment::BreakableBlockComment(
});
}
+BreakableToken::Split
+BreakableBlockComment::getSplit(unsigned LineIndex, unsigned TailOffset,
+ unsigned ColumnLimit, unsigned ContentStartColumn,
+ llvm::Regex &CommentPragmasRegex) const {
+ // Don't break lines matching the comment pragmas regex.
+ if (CommentPragmasRegex.match(Content[LineIndex]))
+ return Split(StringRef::npos, 0);
+ return getCommentSplit(Content[LineIndex].substr(TailOffset),
+ ContentStartColumn, ColumnLimit, Style.TabWidth,
+ Encoding, Style, Decoration.endswith("*"));
+}
+
void BreakableBlockComment::adjustWhitespace(unsigned LineIndex,
int IndentDelta) {
// When in a preprocessor directive, the trailing backslash in a block comment
@@ -475,7 +489,7 @@ void BreakableBlockComment::adjustWhitespace(unsigned LineIndex,
// Calculate the start of the non-whitespace text in the current line.
size_t StartOfLine = Lines[LineIndex].find_first_not_of(Blanks);
if (StartOfLine == StringRef::npos)
- StartOfLine = Lines[LineIndex].rtrim("\r\n").size();
+ StartOfLine = Lines[LineIndex].size();
StringRef Whitespace = Lines[LineIndex].substr(0, StartOfLine);
// Adjust Lines to only contain relevant text.
@@ -871,23 +885,20 @@ void BreakableLineCommentSection::reflow(unsigned LineIndex,
// the next line.
unsigned WhitespaceLength =
Lines[LineIndex].data() - tokenAt(LineIndex).TokenText.data() - Offset;
- Whitespaces.replaceWhitespaceInToken(*Tokens[LineIndex],
- Offset,
+ Whitespaces.replaceWhitespaceInToken(*Tokens[LineIndex], Offset,
/*ReplaceChars=*/WhitespaceLength,
/*PreviousPostfix=*/"",
/*CurrentPrefix=*/"",
/*InPPDirective=*/false,
/*Newlines=*/0,
/*Spaces=*/0);
-
}
// Replace the indent and prefix of the token with the reflow prefix.
unsigned Offset =
Lines[LineIndex].data() - tokenAt(LineIndex).TokenText.data();
unsigned WhitespaceLength =
Content[LineIndex].data() - Lines[LineIndex].data();
- Whitespaces.replaceWhitespaceInToken(*Tokens[LineIndex],
- Offset,
+ Whitespaces.replaceWhitespaceInToken(*Tokens[LineIndex], Offset,
/*ReplaceChars=*/WhitespaceLength,
/*PreviousPostfix=*/"",
/*CurrentPrefix=*/ReflowPrefix,
diff --git a/lib/Format/BreakableToken.h b/lib/Format/BreakableToken.h
index 10e180178021..72852d59f9c4 100644
--- a/lib/Format/BreakableToken.h
+++ b/lib/Format/BreakableToken.h
@@ -1,9 +1,8 @@
//===--- BreakableToken.h - Format C++ code ---------------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
@@ -147,9 +146,7 @@ public:
// * @param loooooooooooooong line
// * continuation
// */
- virtual unsigned getContentIndent(unsigned LineIndex) const {
- return 0;
- }
+ virtual unsigned getContentIndent(unsigned LineIndex) const { return 0; }
/// Returns a range (offset, length) at which to break the line at
/// \p LineIndex, if previously broken at \p TailOffset. If possible, do not
@@ -203,9 +200,7 @@ public:
/// Returns whether there will be a line break at the start of the
/// token.
- virtual bool introducesBreakBeforeToken() const {
- return false;
- }
+ virtual bool introducesBreakBeforeToken() const { return false; }
/// Replaces the whitespace between \p LineIndex-1 and \p LineIndex.
virtual void adaptStartOfLine(unsigned LineIndex,
@@ -364,8 +359,11 @@ public:
BreakableBlockComment(const FormatToken &Token, unsigned StartColumn,
unsigned OriginalStartColumn, bool FirstInLine,
bool InPPDirective, encoding::Encoding Encoding,
- const FormatStyle &Style);
+ const FormatStyle &Style, bool UseCRLF);
+ Split getSplit(unsigned LineIndex, unsigned TailOffset, unsigned ColumnLimit,
+ unsigned ContentStartColumn,
+ llvm::Regex &CommentPragmasRegex) const override;
unsigned getRangeLength(unsigned LineIndex, unsigned Offset,
StringRef::size_type Length,
unsigned StartColumn) const override;
diff --git a/lib/Format/ContinuationIndenter.cpp b/lib/Format/ContinuationIndenter.cpp
index c369b94b9987..b04ede6fa939 100644
--- a/lib/Format/ContinuationIndenter.cpp
+++ b/lib/Format/ContinuationIndenter.cpp
@@ -1,9 +1,8 @@
//===--- ContinuationIndenter.cpp - Format C++ code -----------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
@@ -404,8 +403,7 @@ bool ContinuationIndenter::mustBreak(const LineState &State) {
// FIXME: We should find a more generic solution to this problem.
!(State.Column <= NewLineColumn &&
Style.Language == FormatStyle::LK_JavaScript) &&
- !(Previous.closesScopeAfterBlock() &&
- State.Column <= NewLineColumn))
+ !(Previous.closesScopeAfterBlock() && State.Column <= NewLineColumn))
return true;
// If the template declaration spans multiple lines, force wrap before the
@@ -420,7 +418,8 @@ bool ContinuationIndenter::mustBreak(const LineState &State) {
if (Style.AlwaysBreakBeforeMultilineStrings &&
(NewLineColumn == State.FirstIndent + Style.ContinuationIndentWidth ||
Previous.is(tok::comma) || Current.NestingLevel < 2) &&
- !Previous.isOneOf(tok::kw_return, tok::lessless, tok::at) &&
+ !Previous.isOneOf(tok::kw_return, tok::lessless, tok::at,
+ Keywords.kw_dollar) &&
!Previous.isOneOf(TT_InlineASMColon, TT_ConditionalExpr) &&
nextIsMultilineString(State))
return true;
@@ -836,8 +835,8 @@ unsigned ContinuationIndenter::addTokenOnNewLine(LineState &State,
// about removing empty lines on closing blocks. Special case them here.
MaxEmptyLinesToKeep = 1;
}
- unsigned Newlines = std::max(
- 1u, std::min(Current.NewlinesBefore, MaxEmptyLinesToKeep));
+ unsigned Newlines =
+ std::max(1u, std::min(Current.NewlinesBefore, MaxEmptyLinesToKeep));
bool ContinuePPDirective =
State.Line->InPPDirective && State.Line->Type != LT_ImportStatement;
Whitespaces.replaceWhitespace(Current, Newlines, State.Column, State.Column,
@@ -882,14 +881,30 @@ unsigned ContinuationIndenter::addTokenOnNewLine(LineState &State,
State.Stack.back().BreakBeforeClosingBrace = true;
if (State.Stack.back().AvoidBinPacking) {
- // If we are breaking after '(', '{', '<', this is not bin packing
- // unless AllowAllParametersOfDeclarationOnNextLine is false or this is a
- // dict/object literal.
- if (!Previous.isOneOf(tok::l_paren, tok::l_brace, TT_BinaryOperator) ||
+ // If we are breaking after '(', '{', '<', or this is the break after a ':'
+ // to start a member initializater list in a constructor, this should not
+ // be considered bin packing unless the relevant AllowAll option is false or
+ // this is a dict/object literal.
+ bool PreviousIsBreakingCtorInitializerColon =
+ Previous.is(TT_CtorInitializerColon) &&
+ Style.BreakConstructorInitializers == FormatStyle::BCIS_AfterColon;
+ if (!(Previous.isOneOf(tok::l_paren, tok::l_brace, TT_BinaryOperator) ||
+ PreviousIsBreakingCtorInitializerColon) ||
(!Style.AllowAllParametersOfDeclarationOnNextLine &&
State.Line->MustBeDeclaration) ||
+ (!Style.AllowAllArgumentsOnNextLine &&
+ !State.Line->MustBeDeclaration) ||
+ (!Style.AllowAllConstructorInitializersOnNextLine &&
+ PreviousIsBreakingCtorInitializerColon) ||
Previous.is(TT_DictLiteral))
State.Stack.back().BreakBeforeParameter = true;
+
+ // If we are breaking after a ':' to start a member initializer list,
+ // and we allow all arguments on the next line, we should not break
+ // before the next parameter.
+ if (PreviousIsBreakingCtorInitializerColon &&
+ Style.AllowAllConstructorInitializersOnNextLine)
+ State.Stack.back().BreakBeforeParameter = false;
}
return Penalty;
@@ -930,18 +945,24 @@ unsigned ContinuationIndenter::getNewLineColumn(const LineState &State) {
return State.Stack[State.Stack.size() - 2].LastSpace;
return State.FirstIndent;
}
- // Indent a closing parenthesis at the previous level if followed by a semi or
- // opening brace. This allows indentations such as:
+ // Indent a closing parenthesis at the previous level if followed by a semi,
+ // const, or opening brace. This allows indentations such as:
// foo(
// a,
// );
+ // int Foo::getter(
+ // //
+ // ) const {
+ // return foo;
+ // }
// function foo(
// a,
// ) {
// code(); //
// }
if (Current.is(tok::r_paren) && State.Stack.size() > 1 &&
- (!Current.Next || Current.Next->isOneOf(tok::semi, tok::l_brace)))
+ (!Current.Next ||
+ Current.Next->isOneOf(tok::semi, tok::kw_const, tok::l_brace)))
return State.Stack[State.Stack.size() - 2].LastSpace;
if (NextNonComment->is(TT_TemplateString) && NextNonComment->closesScope())
return State.Stack[State.Stack.size() - 2].LastSpace;
@@ -1042,7 +1063,7 @@ unsigned ContinuationIndenter::getNewLineColumn(const LineState &State) {
if (Current.is(TT_ProtoExtensionLSquare))
return State.Stack.back().Indent;
if (State.Stack.back().Indent == State.FirstIndent && PreviousNonComment &&
- PreviousNonComment->isNot(tok::r_brace))
+ !PreviousNonComment->isOneOf(tok::r_brace, TT_CtorInitializerComma))
// Ensure that we fall back to the continuation indent width instead of
// just flushing continuations left.
return State.Stack.back().Indent + Style.ContinuationIndentWidth;
@@ -1103,9 +1124,13 @@ unsigned ContinuationIndenter::moveStateToNextToken(LineState &State,
? 0
: 2);
State.Stack.back().NestedBlockIndent = State.Stack.back().Indent;
- if (Style.ConstructorInitializerAllOnOneLineOrOnePerLine)
+ if (Style.ConstructorInitializerAllOnOneLineOrOnePerLine) {
State.Stack.back().AvoidBinPacking = true;
- State.Stack.back().BreakBeforeParameter = false;
+ State.Stack.back().BreakBeforeParameter =
+ !Style.AllowAllConstructorInitializersOnNextLine;
+ } else {
+ State.Stack.back().BreakBeforeParameter = false;
+ }
}
if (Current.is(TT_CtorInitializerColon) &&
Style.BreakConstructorInitializers == FormatStyle::BCIS_AfterColon) {
@@ -1160,6 +1185,8 @@ unsigned ContinuationIndenter::moveStateToNextToken(LineState &State,
if (Current.is(TT_ObjCStringLiteral) && State.StartOfStringLiteral == 0)
State.StartOfStringLiteral = State.Column + 1;
+ if (Current.is(TT_CSharpStringLiteral) && State.StartOfStringLiteral == 0)
+ State.StartOfStringLiteral = State.Column + 1;
else if (Current.isStringLiteral() && State.StartOfStringLiteral == 0)
State.StartOfStringLiteral = State.Column;
else if (!Current.isOneOf(tok::comment, tok::identifier, tok::hash) &&
@@ -1170,7 +1197,7 @@ unsigned ContinuationIndenter::moveStateToNextToken(LineState &State,
State.NextToken = State.NextToken->Next;
unsigned Penalty =
- handleEndOfLine(Current, State, DryRun, AllowBreak);
+ handleEndOfLine(Current, State, DryRun, AllowBreak, Newline);
if (Current.Role)
Current.Role->formatFromToken(State, this, DryRun);
@@ -1464,7 +1491,7 @@ static unsigned getLastLineEndColumn(StringRef Text, unsigned StartColumn,
unsigned ContinuationIndenter::reformatRawStringLiteral(
const FormatToken &Current, LineState &State,
- const FormatStyle &RawStringStyle, bool DryRun) {
+ const FormatStyle &RawStringStyle, bool DryRun, bool Newline) {
unsigned StartColumn = State.Column - Current.ColumnWidth;
StringRef OldDelimiter = *getRawStringDelimiter(Current.TokenText);
StringRef NewDelimiter =
@@ -1504,8 +1531,10 @@ unsigned ContinuationIndenter::reformatRawStringLiteral(
// source.
bool ContentStartsOnNewline = Current.TokenText[OldPrefixSize] == '\n';
// If this token is the last parameter (checked by looking if it's followed by
- // `)`, the base the indent off the line's nested block indent. Otherwise,
- // base the indent off the arguments indent, so we can achieve:
+ // `)` and is not on a newline, the base the indent off the line's nested
+ // block indent. Otherwise, base the indent off the arguments indent, so we
+ // can achieve:
+ //
// fffffffffff(1, 2, 3, R"pb(
// key1: 1 #
// key2: 2)pb");
@@ -1514,11 +1543,18 @@ unsigned ContinuationIndenter::reformatRawStringLiteral(
// R"pb(
// key1: 1 #
// key2: 2
+ // )pb");
+ //
+ // fffffffffff(1, 2, 3,
+ // R"pb(
+ // key1: 1 #
+ // key2: 2
// )pb",
// 5);
- unsigned CurrentIndent = (Current.Next && Current.Next->is(tok::r_paren))
- ? State.Stack.back().NestedBlockIndent
- : State.Stack.back().Indent;
+ unsigned CurrentIndent =
+ (!Newline && Current.Next && Current.Next->is(tok::r_paren))
+ ? State.Stack.back().NestedBlockIndent
+ : State.Stack.back().Indent;
unsigned NextStartColumn = ContentStartsOnNewline
? CurrentIndent + Style.IndentWidth
: FirstStartColumn;
@@ -1531,9 +1567,8 @@ unsigned ContinuationIndenter::reformatRawStringLiteral(
// that raw string prefix starts, and
// - if the raw string prefix does not start on a newline, it is the current
// indent.
- unsigned LastStartColumn = Current.NewlinesBefore
- ? FirstStartColumn - NewPrefixSize
- : CurrentIndent;
+ unsigned LastStartColumn =
+ Current.NewlinesBefore ? FirstStartColumn - NewPrefixSize : CurrentIndent;
std::pair<tooling::Replacements, unsigned> Fixes = internal::reformat(
RawStringStyle, RawText, {tooling::Range(0, RawText.size())},
@@ -1590,8 +1625,9 @@ unsigned ContinuationIndenter::reformatRawStringLiteral(
// have to manually add the penalty for the prefix R"delim( over the column
// limit.
unsigned PrefixExcessCharacters =
- StartColumn + NewPrefixSize > Style.ColumnLimit ?
- StartColumn + NewPrefixSize - Style.ColumnLimit : 0;
+ StartColumn + NewPrefixSize > Style.ColumnLimit
+ ? StartColumn + NewPrefixSize - Style.ColumnLimit
+ : 0;
bool IsMultiline =
ContentStartsOnNewline || (NewCode->find('\n') != std::string::npos);
if (IsMultiline) {
@@ -1620,13 +1656,14 @@ unsigned ContinuationIndenter::addMultilineToken(const FormatToken &Current,
unsigned ContinuationIndenter::handleEndOfLine(const FormatToken &Current,
LineState &State, bool DryRun,
- bool AllowBreak) {
+ bool AllowBreak, bool Newline) {
unsigned Penalty = 0;
// Compute the raw string style to use in case this is a raw string literal
// that can be reformatted.
auto RawStringStyle = getRawStringStyle(Current, State);
if (RawStringStyle && !Current.Finalized) {
- Penalty = reformatRawStringLiteral(Current, State, *RawStringStyle, DryRun);
+ Penalty = reformatRawStringLiteral(Current, State, *RawStringStyle, DryRun,
+ Newline);
} else if (Current.IsMultiline && Current.isNot(TT_BlockComment)) {
// Don't break multi-line tokens other than block comments and raw string
// literals. Instead, just update the state.
@@ -1709,16 +1746,17 @@ ContinuationIndenter::getRawStringStyle(const FormatToken &Current,
return RawStringStyle;
}
-std::unique_ptr<BreakableToken> ContinuationIndenter::createBreakableToken(
- const FormatToken &Current, LineState &State, bool AllowBreak) {
+std::unique_ptr<BreakableToken>
+ContinuationIndenter::createBreakableToken(const FormatToken &Current,
+ LineState &State, bool AllowBreak) {
unsigned StartColumn = State.Column - Current.ColumnWidth;
if (Current.isStringLiteral()) {
- // FIXME: String literal breaking is currently disabled for Java and JS, as
- // it requires strings to be merged using "+" which we don't support.
+ // FIXME: String literal breaking is currently disabled for C#,Java and
+ // JavaScript, as it requires strings to be merged using "+" which we
+ // don't support.
if (Style.Language == FormatStyle::LK_Java ||
- Style.Language == FormatStyle::LK_JavaScript ||
- !Style.BreakStringLiterals ||
- !AllowBreak)
+ Style.Language == FormatStyle::LK_JavaScript || Style.isCSharp() ||
+ !Style.BreakStringLiterals || !AllowBreak)
return nullptr;
// Don't break string literals inside preprocessor directives (except for
@@ -1772,7 +1810,7 @@ std::unique_ptr<BreakableToken> ContinuationIndenter::createBreakableToken(
}
return llvm::make_unique<BreakableBlockComment>(
Current, StartColumn, Current.OriginalColumn, !Current.Previous,
- State.Line->InPPDirective, Encoding, Style);
+ State.Line->InPPDirective, Encoding, Style, Whitespaces.useCRLF());
} else if (Current.is(TT_LineComment) &&
(Current.Previous == nullptr ||
Current.Previous->isNot(TT_ImplicitStringLiteral))) {
diff --git a/lib/Format/ContinuationIndenter.h b/lib/Format/ContinuationIndenter.h
index fde89db864b1..11df619e0f40 100644
--- a/lib/Format/ContinuationIndenter.h
+++ b/lib/Format/ContinuationIndenter.h
@@ -1,9 +1,8 @@
//===--- ContinuationIndenter.h - Format C++ code ---------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
@@ -112,12 +111,12 @@ private:
unsigned reformatRawStringLiteral(const FormatToken &Current,
LineState &State,
const FormatStyle &RawStringStyle,
- bool DryRun);
+ bool DryRun, bool Newline);
/// If the current token is at the end of the current line, handle
/// the transition to the next line.
unsigned handleEndOfLine(const FormatToken &Current, LineState &State,
- bool DryRun, bool AllowBreak);
+ bool DryRun, bool AllowBreak, bool Newline);
/// If \p Current is a raw string that is configured to be reformatted,
/// return the style to be used.
diff --git a/lib/Format/Encoding.h b/lib/Format/Encoding.h
index 4c877e7e49d5..fe3d5f019858 100644
--- a/lib/Format/Encoding.h
+++ b/lib/Format/Encoding.h
@@ -1,9 +1,8 @@
//===--- Encoding.h - Format C++ code ---------------------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
diff --git a/lib/Format/Format.cpp b/lib/Format/Format.cpp
index 2c4f8760540a..c48182976b04 100644
--- a/lib/Format/Format.cpp
+++ b/lib/Format/Format.cpp
@@ -1,9 +1,8 @@
//===--- Format.cpp - Format C++ code -------------------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
@@ -62,6 +61,7 @@ template <> struct ScalarEnumerationTraits<FormatStyle::LanguageKind> {
IO.enumCase(Value, "Proto", FormatStyle::LK_Proto);
IO.enumCase(Value, "TableGen", FormatStyle::LK_TableGen);
IO.enumCase(Value, "TextProto", FormatStyle::LK_TextProto);
+ IO.enumCase(Value, "CSharp", FormatStyle::LK_CSharp);
}
};
@@ -107,6 +107,29 @@ template <> struct ScalarEnumerationTraits<FormatStyle::ShortFunctionStyle> {
}
};
+template <> struct ScalarEnumerationTraits<FormatStyle::ShortIfStyle> {
+ static void enumeration(IO &IO, FormatStyle::ShortIfStyle &Value) {
+ IO.enumCase(Value, "Never", FormatStyle::SIS_Never);
+ IO.enumCase(Value, "Always", FormatStyle::SIS_Always);
+ IO.enumCase(Value, "WithoutElse", FormatStyle::SIS_WithoutElse);
+
+ // For backward compatibility.
+ IO.enumCase(Value, "false", FormatStyle::SIS_Never);
+ IO.enumCase(Value, "true", FormatStyle::SIS_WithoutElse);
+ }
+};
+
+template <> struct ScalarEnumerationTraits<FormatStyle::ShortLambdaStyle> {
+ static void enumeration(IO &IO, FormatStyle::ShortLambdaStyle &Value) {
+ IO.enumCase(Value, "None", FormatStyle::SLS_None);
+ IO.enumCase(Value, "false", FormatStyle::SLS_None);
+ IO.enumCase(Value, "Empty", FormatStyle::SLS_Empty);
+ IO.enumCase(Value, "Inline", FormatStyle::SLS_Inline);
+ IO.enumCase(Value, "All", FormatStyle::SLS_All);
+ IO.enumCase(Value, "true", FormatStyle::SLS_All);
+ }
+};
+
template <> struct ScalarEnumerationTraits<FormatStyle::BinPackStyle> {
static void enumeration(IO &IO, FormatStyle::BinPackStyle &Value) {
IO.enumCase(Value, "Auto", FormatStyle::BPS_Auto);
@@ -150,8 +173,8 @@ struct ScalarEnumerationTraits<FormatStyle::BreakConstructorInitializersStyle> {
template <>
struct ScalarEnumerationTraits<FormatStyle::BreakInheritanceListStyle> {
- static void
- enumeration(IO &IO, FormatStyle::BreakInheritanceListStyle &Value) {
+ static void enumeration(IO &IO,
+ FormatStyle::BreakInheritanceListStyle &Value) {
IO.enumCase(Value, "BeforeColon", FormatStyle::BILS_BeforeColon);
IO.enumCase(Value, "BeforeComma", FormatStyle::BILS_BeforeComma);
IO.enumCase(Value, "AfterColon", FormatStyle::BILS_AfterColon);
@@ -163,6 +186,7 @@ struct ScalarEnumerationTraits<FormatStyle::PPDirectiveIndentStyle> {
static void enumeration(IO &IO, FormatStyle::PPDirectiveIndentStyle &Value) {
IO.enumCase(Value, "None", FormatStyle::PPDIS_None);
IO.enumCase(Value, "AfterHash", FormatStyle::PPDIS_AfterHash);
+ IO.enumCase(Value, "BeforeHash", FormatStyle::PPDIS_BeforeHash);
}
};
@@ -180,7 +204,8 @@ struct ScalarEnumerationTraits<FormatStyle::ReturnTypeBreakingStyle> {
template <>
struct ScalarEnumerationTraits<FormatStyle::BreakTemplateDeclarationsStyle> {
- static void enumeration(IO &IO, FormatStyle::BreakTemplateDeclarationsStyle &Value) {
+ static void enumeration(IO &IO,
+ FormatStyle::BreakTemplateDeclarationsStyle &Value) {
IO.enumCase(Value, "No", FormatStyle::BTDS_No);
IO.enumCase(Value, "MultiLine", FormatStyle::BTDS_MultiLine);
IO.enumCase(Value, "Yes", FormatStyle::BTDS_Yes);
@@ -260,6 +285,8 @@ struct ScalarEnumerationTraits<FormatStyle::SpaceBeforeParensOptions> {
IO.enumCase(Value, "Never", FormatStyle::SBPO_Never);
IO.enumCase(Value, "ControlStatements",
FormatStyle::SBPO_ControlStatements);
+ IO.enumCase(Value, "NonEmptyParentheses",
+ FormatStyle::SBPO_NonEmptyParentheses);
IO.enumCase(Value, "Always", FormatStyle::SBPO_Always);
// For backward compatibility.
@@ -274,8 +301,8 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("Language", Style.Language);
if (IO.outputting()) {
- StringRef StylesArray[] = {"LLVM", "Google", "Chromium",
- "Mozilla", "WebKit", "GNU"};
+ StringRef StylesArray[] = {"LLVM", "Google", "Chromium", "Mozilla",
+ "WebKit", "GNU", "Microsoft"};
ArrayRef<StringRef> Styles(StylesArray);
for (size_t i = 0, e = Styles.size(); i < e; ++i) {
StringRef StyleName(Styles[i]);
@@ -314,6 +341,7 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("AccessModifierOffset", Style.AccessModifierOffset);
IO.mapOptional("AlignAfterOpenBracket", Style.AlignAfterOpenBracket);
+ IO.mapOptional("AlignConsecutiveMacros", Style.AlignConsecutiveMacros);
IO.mapOptional("AlignConsecutiveAssignments",
Style.AlignConsecutiveAssignments);
IO.mapOptional("AlignConsecutiveDeclarations",
@@ -321,6 +349,10 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("AlignEscapedNewlines", Style.AlignEscapedNewlines);
IO.mapOptional("AlignOperands", Style.AlignOperands);
IO.mapOptional("AlignTrailingComments", Style.AlignTrailingComments);
+ IO.mapOptional("AllowAllArgumentsOnNextLine",
+ Style.AllowAllArgumentsOnNextLine);
+ IO.mapOptional("AllowAllConstructorInitializersOnNextLine",
+ Style.AllowAllConstructorInitializersOnNextLine);
IO.mapOptional("AllowAllParametersOfDeclarationOnNextLine",
Style.AllowAllParametersOfDeclarationOnNextLine);
IO.mapOptional("AllowShortBlocksOnASingleLine",
@@ -329,6 +361,8 @@ template <> struct MappingTraits<FormatStyle> {
Style.AllowShortCaseLabelsOnASingleLine);
IO.mapOptional("AllowShortFunctionsOnASingleLine",
Style.AllowShortFunctionsOnASingleLine);
+ IO.mapOptional("AllowShortLambdasOnASingleLine",
+ Style.AllowShortLambdasOnASingleLine);
IO.mapOptional("AllowShortIfStatementsOnASingleLine",
Style.AllowShortIfStatementsOnASingleLine);
IO.mapOptional("AllowShortLoopsOnASingleLine",
@@ -337,6 +371,7 @@ template <> struct MappingTraits<FormatStyle> {
Style.AlwaysBreakAfterDefinitionReturnType);
IO.mapOptional("AlwaysBreakAfterReturnType",
Style.AlwaysBreakAfterReturnType);
+
// If AlwaysBreakAfterDefinitionReturnType was specified but
// AlwaysBreakAfterReturnType was not, initialize the latter from the
// former for backwards compatibility.
@@ -362,10 +397,8 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("BreakBeforeBraces", Style.BreakBeforeBraces);
bool BreakBeforeInheritanceComma = false;
- IO.mapOptional("BreakBeforeInheritanceComma",
- BreakBeforeInheritanceComma);
- IO.mapOptional("BreakInheritanceList",
- Style.BreakInheritanceList);
+ IO.mapOptional("BreakBeforeInheritanceComma", BreakBeforeInheritanceComma);
+ IO.mapOptional("BreakInheritanceList", Style.BreakInheritanceList);
// If BreakBeforeInheritanceComma was specified but
// BreakInheritance was not, initialize the latter from the
// former for backwards compatibility.
@@ -423,6 +456,7 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("MacroBlockEnd", Style.MacroBlockEnd);
IO.mapOptional("MaxEmptyLinesToKeep", Style.MaxEmptyLinesToKeep);
IO.mapOptional("NamespaceIndentation", Style.NamespaceIndentation);
+ IO.mapOptional("NamespaceMacros", Style.NamespaceMacros);
IO.mapOptional("ObjCBinPackProtocolList", Style.ObjCBinPackProtocolList);
IO.mapOptional("ObjCBlockIndentWidth", Style.ObjCBlockIndentWidth);
IO.mapOptional("ObjCSpaceAfterProperty", Style.ObjCSpaceAfterProperty);
@@ -446,6 +480,7 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("SortIncludes", Style.SortIncludes);
IO.mapOptional("SortUsingDeclarations", Style.SortUsingDeclarations);
IO.mapOptional("SpaceAfterCStyleCast", Style.SpaceAfterCStyleCast);
+ IO.mapOptional("SpaceAfterLogicalNot", Style.SpaceAfterLogicalNot);
IO.mapOptional("SpaceAfterTemplateKeyword",
Style.SpaceAfterTemplateKeyword);
IO.mapOptional("SpaceBeforeAssignmentOperators",
@@ -472,12 +507,14 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("Standard", Style.Standard);
IO.mapOptional("StatementMacros", Style.StatementMacros);
IO.mapOptional("TabWidth", Style.TabWidth);
+ IO.mapOptional("TypenameMacros", Style.TypenameMacros);
IO.mapOptional("UseTab", Style.UseTab);
}
};
template <> struct MappingTraits<FormatStyle::BraceWrappingFlags> {
static void mapping(IO &IO, FormatStyle::BraceWrappingFlags &Wrapping) {
+ IO.mapOptional("AfterCaseLabel", Wrapping.AfterCaseLabel);
IO.mapOptional("AfterClass", Wrapping.AfterClass);
IO.mapOptional("AfterControlStatement", Wrapping.AfterControlStatement);
IO.mapOptional("AfterEnum", Wrapping.AfterEnum);
@@ -570,7 +607,7 @@ static FormatStyle expandPresets(const FormatStyle &Style) {
if (Style.BreakBeforeBraces == FormatStyle::BS_Custom)
return Style;
FormatStyle Expanded = Style;
- Expanded.BraceWrapping = {false, false, false, false, false,
+ Expanded.BraceWrapping = {false, false, false, false, false, false,
false, false, false, false, false,
false, false, true, true, true};
switch (Style.BreakBeforeBraces) {
@@ -595,6 +632,7 @@ static FormatStyle expandPresets(const FormatStyle &Style) {
Expanded.BraceWrapping.BeforeElse = true;
break;
case FormatStyle::BS_Allman:
+ Expanded.BraceWrapping.AfterCaseLabel = true;
Expanded.BraceWrapping.AfterClass = true;
Expanded.BraceWrapping.AfterControlStatement = true;
Expanded.BraceWrapping.AfterEnum = true;
@@ -608,7 +646,7 @@ static FormatStyle expandPresets(const FormatStyle &Style) {
break;
case FormatStyle::BS_GNU:
Expanded.BraceWrapping = {true, true, true, true, true, true, true, true,
- true, true, true, true, true, true, true};
+ true, true, true, true, true, true, true, true};
break;
case FormatStyle::BS_WebKit:
Expanded.BraceWrapping.AfterFunction = true;
@@ -619,9 +657,9 @@ static FormatStyle expandPresets(const FormatStyle &Style) {
return Expanded;
}
-FormatStyle getLLVMStyle() {
+FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
FormatStyle LLVMStyle;
- LLVMStyle.Language = FormatStyle::LK_Cpp;
+ LLVMStyle.Language = Language;
LLVMStyle.AccessModifierOffset = -2;
LLVMStyle.AlignEscapedNewlines = FormatStyle::ENAS_Right;
LLVMStyle.AlignAfterOpenBracket = FormatStyle::BAS_Align;
@@ -629,11 +667,15 @@ FormatStyle getLLVMStyle() {
LLVMStyle.AlignTrailingComments = true;
LLVMStyle.AlignConsecutiveAssignments = false;
LLVMStyle.AlignConsecutiveDeclarations = false;
+ LLVMStyle.AlignConsecutiveMacros = false;
+ LLVMStyle.AllowAllArgumentsOnNextLine = true;
+ LLVMStyle.AllowAllConstructorInitializersOnNextLine = true;
LLVMStyle.AllowAllParametersOfDeclarationOnNextLine = true;
LLVMStyle.AllowShortFunctionsOnASingleLine = FormatStyle::SFS_All;
LLVMStyle.AllowShortBlocksOnASingleLine = false;
LLVMStyle.AllowShortCaseLabelsOnASingleLine = false;
- LLVMStyle.AllowShortIfStatementsOnASingleLine = false;
+ LLVMStyle.AllowShortIfStatementsOnASingleLine = FormatStyle::SIS_Never;
+ LLVMStyle.AllowShortLambdasOnASingleLine = FormatStyle::SLS_All;
LLVMStyle.AllowShortLoopsOnASingleLine = false;
LLVMStyle.AlwaysBreakAfterReturnType = FormatStyle::RTBS_None;
LLVMStyle.AlwaysBreakAfterDefinitionReturnType = FormatStyle::DRTBS_None;
@@ -644,7 +686,7 @@ FormatStyle getLLVMStyle() {
LLVMStyle.BreakBeforeBinaryOperators = FormatStyle::BOS_None;
LLVMStyle.BreakBeforeTernaryOperators = true;
LLVMStyle.BreakBeforeBraces = FormatStyle::BS_Attach;
- LLVMStyle.BraceWrapping = {false, false, false, false, false,
+ LLVMStyle.BraceWrapping = {false, false, false, false, false, false,
false, false, false, false, false,
false, false, true, true, true};
LLVMStyle.BreakAfterJavaFieldAnnotations = false;
@@ -695,6 +737,7 @@ FormatStyle getLLVMStyle() {
LLVMStyle.SpacesInContainerLiterals = true;
LLVMStyle.SpacesInCStyleCastParentheses = false;
LLVMStyle.SpaceAfterCStyleCast = false;
+ LLVMStyle.SpaceAfterLogicalNot = false;
LLVMStyle.SpaceAfterTemplateKeyword = true;
LLVMStyle.SpaceBeforeCtorInitializerColon = true;
LLVMStyle.SpaceBeforeInheritanceColon = true;
@@ -719,6 +762,11 @@ FormatStyle getLLVMStyle() {
LLVMStyle.StatementMacros.push_back("Q_UNUSED");
LLVMStyle.StatementMacros.push_back("QT_REQUIRE_VERSION");
+ // Defaults that differ when not C++.
+ if (Language == FormatStyle::LK_TableGen) {
+ LLVMStyle.SpacesInContainerLiterals = false;
+ }
+
return LLVMStyle;
}
@@ -730,12 +778,12 @@ FormatStyle getGoogleStyle(FormatStyle::LanguageKind Language) {
return GoogleStyle;
}
- FormatStyle GoogleStyle = getLLVMStyle();
- GoogleStyle.Language = Language;
+ FormatStyle GoogleStyle = getLLVMStyle(Language);
GoogleStyle.AccessModifierOffset = -1;
GoogleStyle.AlignEscapedNewlines = FormatStyle::ENAS_Left;
- GoogleStyle.AllowShortIfStatementsOnASingleLine = true;
+ GoogleStyle.AllowShortIfStatementsOnASingleLine =
+ FormatStyle::SIS_WithoutElse;
GoogleStyle.AllowShortLoopsOnASingleLine = true;
GoogleStyle.AlwaysBreakBeforeMultilineStrings = true;
GoogleStyle.AlwaysBreakTemplateDeclarations = FormatStyle::BTDS_Yes;
@@ -744,6 +792,7 @@ FormatStyle getGoogleStyle(FormatStyle::LanguageKind Language) {
GoogleStyle.IncludeStyle.IncludeCategories = {
{"^<ext/.*\\.h>", 2}, {"^<.*\\.h>", 1}, {"^<.*", 2}, {".*", 3}};
GoogleStyle.IncludeStyle.IncludeIsMainRegex = "([-_](test|unittest))?$";
+ GoogleStyle.IncludeStyle.IncludeBlocks = tooling::IncludeStyle::IBS_Regroup;
GoogleStyle.IndentCaseLabels = true;
GoogleStyle.KeepEmptyLinesAtTheStartOfBlocks = false;
GoogleStyle.ObjCBinPackProtocolList = FormatStyle::BPS_Never;
@@ -802,7 +851,7 @@ FormatStyle getGoogleStyle(FormatStyle::LanguageKind Language) {
GoogleStyle.AlignOperands = false;
GoogleStyle.AlignTrailingComments = false;
GoogleStyle.AllowShortFunctionsOnASingleLine = FormatStyle::SFS_Empty;
- GoogleStyle.AllowShortIfStatementsOnASingleLine = false;
+ GoogleStyle.AllowShortIfStatementsOnASingleLine = FormatStyle::SIS_Never;
GoogleStyle.AlwaysBreakBeforeMultilineStrings = false;
GoogleStyle.BreakBeforeBinaryOperators = FormatStyle::BOS_NonAssignment;
GoogleStyle.ColumnLimit = 100;
@@ -836,6 +885,11 @@ FormatStyle getGoogleStyle(FormatStyle::LanguageKind Language) {
} else if (Language == FormatStyle::LK_ObjC) {
GoogleStyle.AlwaysBreakBeforeMultilineStrings = false;
GoogleStyle.ColumnLimit = 100;
+ // "Regroup" doesn't work well for ObjC yet (main header heuristic,
+ // relationship between ObjC standard library headers and other heades,
+ // #imports, etc.)
+ GoogleStyle.IncludeStyle.IncludeBlocks =
+ tooling::IncludeStyle::IBS_Preserve;
}
return GoogleStyle;
@@ -844,7 +898,8 @@ FormatStyle getGoogleStyle(FormatStyle::LanguageKind Language) {
FormatStyle getChromiumStyle(FormatStyle::LanguageKind Language) {
FormatStyle ChromiumStyle = getGoogleStyle(Language);
if (Language == FormatStyle::LK_Java) {
- ChromiumStyle.AllowShortIfStatementsOnASingleLine = true;
+ ChromiumStyle.AllowShortIfStatementsOnASingleLine =
+ FormatStyle::SIS_WithoutElse;
ChromiumStyle.BreakAfterJavaFieldAnnotations = true;
ChromiumStyle.ContinuationIndentWidth = 8;
ChromiumStyle.IndentWidth = 4;
@@ -852,6 +907,7 @@ FormatStyle getChromiumStyle(FormatStyle::LanguageKind Language) {
// https://chromium.googlesource.com/chromium/src/+/master/styleguide/java/java.md#Import-Order
ChromiumStyle.JavaImportGroups = {
"android",
+ "androidx",
"com",
"dalvik",
"junit",
@@ -863,12 +919,12 @@ FormatStyle getChromiumStyle(FormatStyle::LanguageKind Language) {
};
ChromiumStyle.SortIncludes = true;
} else if (Language == FormatStyle::LK_JavaScript) {
- ChromiumStyle.AllowShortIfStatementsOnASingleLine = false;
+ ChromiumStyle.AllowShortIfStatementsOnASingleLine = FormatStyle::SIS_Never;
ChromiumStyle.AllowShortLoopsOnASingleLine = false;
} else {
ChromiumStyle.AllowAllParametersOfDeclarationOnNextLine = false;
ChromiumStyle.AllowShortFunctionsOnASingleLine = FormatStyle::SFS_Inline;
- ChromiumStyle.AllowShortIfStatementsOnASingleLine = false;
+ ChromiumStyle.AllowShortIfStatementsOnASingleLine = FormatStyle::SIS_Never;
ChromiumStyle.AllowShortLoopsOnASingleLine = false;
ChromiumStyle.BinPackParameters = false;
ChromiumStyle.DerivePointerAlignment = false;
@@ -940,6 +996,32 @@ FormatStyle getGNUStyle() {
return Style;
}
+FormatStyle getMicrosoftStyle(FormatStyle::LanguageKind Language) {
+ FormatStyle Style = getLLVMStyle();
+ Style.ColumnLimit = 120;
+ Style.TabWidth = 4;
+ Style.IndentWidth = 4;
+ Style.UseTab = FormatStyle::UT_Never;
+ Style.BreakBeforeBraces = FormatStyle::BS_Custom;
+ Style.BraceWrapping.AfterClass = true;
+ Style.BraceWrapping.AfterControlStatement = true;
+ Style.BraceWrapping.AfterEnum = true;
+ Style.BraceWrapping.AfterFunction = true;
+ Style.BraceWrapping.AfterNamespace = true;
+ Style.BraceWrapping.AfterObjCDeclaration = true;
+ Style.BraceWrapping.AfterStruct = true;
+ Style.BraceWrapping.AfterExternBlock = true;
+ Style.BraceWrapping.BeforeCatch = true;
+ Style.BraceWrapping.BeforeElse = true;
+ Style.PenaltyReturnTypeOnItsOwnLine = 1000;
+ Style.AllowShortFunctionsOnASingleLine = FormatStyle::SFS_None;
+ Style.AllowShortBlocksOnASingleLine = false;
+ Style.AllowShortCaseLabelsOnASingleLine = false;
+ Style.AllowShortIfStatementsOnASingleLine = FormatStyle::SIS_Never;
+ Style.AllowShortLoopsOnASingleLine = false;
+ return Style;
+}
+
FormatStyle getNoStyle() {
FormatStyle NoStyle = getLLVMStyle();
NoStyle.DisableFormat = true;
@@ -951,7 +1033,7 @@ FormatStyle getNoStyle() {
bool getPredefinedStyle(StringRef Name, FormatStyle::LanguageKind Language,
FormatStyle *Style) {
if (Name.equals_lower("llvm")) {
- *Style = getLLVMStyle();
+ *Style = getLLVMStyle(Language);
} else if (Name.equals_lower("chromium")) {
*Style = getChromiumStyle(Language);
} else if (Name.equals_lower("mozilla")) {
@@ -962,6 +1044,8 @@ bool getPredefinedStyle(StringRef Name, FormatStyle::LanguageKind Language,
*Style = getWebKitStyle();
} else if (Name.equals_lower("gnu")) {
*Style = getGNUStyle();
+ } else if (Name.equals_lower("microsoft")) {
+ *Style = getMicrosoftStyle(Language);
} else if (Name.equals_lower("none")) {
*Style = getNoStyle();
} else {
@@ -1060,9 +1144,7 @@ void FormatStyle::FormatStyleSet::Add(FormatStyle Style) {
(*Styles)[Style.Language] = std::move(Style);
}
-void FormatStyle::FormatStyleSet::Clear() {
- Styles.reset();
-}
+void FormatStyle::FormatStyleSet::Clear() { Styles.reset(); }
llvm::Optional<FormatStyle>
FormatStyle::GetLanguageStyle(FormatStyle::LanguageKind Language) const {
@@ -1691,6 +1773,7 @@ FindCursorIndex(const SmallVectorImpl<IncludeDirective> &Includes,
static void sortCppIncludes(const FormatStyle &Style,
const SmallVectorImpl<IncludeDirective> &Includes,
ArrayRef<tooling::Range> Ranges, StringRef FileName,
+ StringRef Code,
tooling::Replacements &Replaces, unsigned *Cursor) {
unsigned IncludesBeginOffset = Includes.front().Offset;
unsigned IncludesEndOffset =
@@ -1701,11 +1784,10 @@ static void sortCppIncludes(const FormatStyle &Style,
SmallVector<unsigned, 16> Indices;
for (unsigned i = 0, e = Includes.size(); i != e; ++i)
Indices.push_back(i);
- std::stable_sort(
- Indices.begin(), Indices.end(), [&](unsigned LHSI, unsigned RHSI) {
- return std::tie(Includes[LHSI].Category, Includes[LHSI].Filename) <
- std::tie(Includes[RHSI].Category, Includes[RHSI].Filename);
- });
+ llvm::stable_sort(Indices, [&](unsigned LHSI, unsigned RHSI) {
+ return std::tie(Includes[LHSI].Category, Includes[LHSI].Filename) <
+ std::tie(Includes[RHSI].Category, Includes[RHSI].Filename);
+ });
// The index of the include on which the cursor will be put after
// sorting/deduplicating.
unsigned CursorIndex;
@@ -1726,6 +1808,10 @@ static void sortCppIncludes(const FormatStyle &Style,
// If the #includes are out of order, we generate a single replacement fixing
// the entire block. Otherwise, no replacement is generated.
+ // In case Style.IncldueStyle.IncludeBlocks != IBS_Preserve, this check is not
+ // enough as additional newlines might be added or removed across #include
+ // blocks. This we handle below by generating the updated #imclude blocks and
+ // comparing it to the original.
if (Indices.size() == Includes.size() &&
std::is_sorted(Indices.begin(), Indices.end()) &&
Style.IncludeStyle.IncludeBlocks == tooling::IncludeStyle::IBS_Preserve)
@@ -1746,6 +1832,11 @@ static void sortCppIncludes(const FormatStyle &Style,
CurrentCategory = Includes[Index].Category;
}
+ // If the #includes are out of order, we generate a single replacement fixing
+ // the entire range of blocks. Otherwise, no replacement is generated.
+ if (result == Code.substr(IncludesBeginOffset, IncludesBlockSize))
+ return;
+
auto Err = Replaces.add(tooling::Replacement(
FileName, Includes.front().Offset, IncludesBlockSize, result));
// FIXME: better error handling. For now, just skip the replacement for the
@@ -1792,9 +1883,10 @@ tooling::Replacements sortCppIncludes(const FormatStyle &Style, StringRef Code,
Code.substr(Prev, (Pos != StringRef::npos ? Pos : Code.size()) - Prev);
StringRef Trimmed = Line.trim();
- if (Trimmed == "// clang-format off")
+ if (Trimmed == "// clang-format off" || Trimmed == "/* clang-format off */")
FormattingOff = true;
- else if (Trimmed == "// clang-format on")
+ else if (Trimmed == "// clang-format on" ||
+ Trimmed == "/* clang-format on */")
FormattingOff = false;
const bool EmptyLineSkipped =
@@ -1813,8 +1905,8 @@ tooling::Replacements sortCppIncludes(const FormatStyle &Style, StringRef Code,
MainIncludeFound = true;
IncludesInBlock.push_back({IncludeName, Line, Prev, Category});
} else if (!IncludesInBlock.empty() && !EmptyLineSkipped) {
- sortCppIncludes(Style, IncludesInBlock, Ranges, FileName, Replaces,
- Cursor);
+ sortCppIncludes(Style, IncludesInBlock, Ranges, FileName, Code,
+ Replaces, Cursor);
IncludesInBlock.clear();
FirstIncludeBlock = false;
}
@@ -1824,8 +1916,10 @@ tooling::Replacements sortCppIncludes(const FormatStyle &Style, StringRef Code,
break;
SearchFrom = Pos + 1;
}
- if (!IncludesInBlock.empty())
- sortCppIncludes(Style, IncludesInBlock, Ranges, FileName, Replaces, Cursor);
+ if (!IncludesInBlock.empty()) {
+ sortCppIncludes(Style, IncludesInBlock, Ranges, FileName, Code, Replaces,
+ Cursor);
+ }
return Replaces;
}
@@ -1854,7 +1948,7 @@ static unsigned findJavaImportGroup(const FormatStyle &Style,
static void sortJavaImports(const FormatStyle &Style,
const SmallVectorImpl<JavaImportDirective> &Imports,
ArrayRef<tooling::Range> Ranges, StringRef FileName,
- tooling::Replacements &Replaces) {
+ StringRef Code, tooling::Replacements &Replaces) {
unsigned ImportsBeginOffset = Imports.front().Offset;
unsigned ImportsEndOffset =
Imports.back().Offset + Imports.back().Text.size();
@@ -1868,13 +1962,13 @@ static void sortJavaImports(const FormatStyle &Style,
JavaImportGroups.push_back(
findJavaImportGroup(Style, Imports[i].Identifier));
}
- llvm::sort(Indices.begin(), Indices.end(), [&](unsigned LHSI, unsigned RHSI) {
- // Negating IsStatic to push static imports above non-static imports.
- return std::make_tuple(!Imports[LHSI].IsStatic, JavaImportGroups[LHSI],
- Imports[LHSI].Identifier) <
- std::make_tuple(!Imports[RHSI].IsStatic, JavaImportGroups[RHSI],
- Imports[RHSI].Identifier);
- });
+ llvm::sort(Indices, [&](unsigned LHSI, unsigned RHSI) {
+ // Negating IsStatic to push static imports above non-static imports.
+ return std::make_tuple(!Imports[LHSI].IsStatic, JavaImportGroups[LHSI],
+ Imports[LHSI].Identifier) <
+ std::make_tuple(!Imports[RHSI].IsStatic, JavaImportGroups[RHSI],
+ Imports[RHSI].Identifier);
+ });
// Deduplicate imports.
Indices.erase(std::unique(Indices.begin(), Indices.end(),
@@ -1903,6 +1997,11 @@ static void sortJavaImports(const FormatStyle &Style,
CurrentImportGroup = JavaImportGroups[Index];
}
+ // If the imports are out of order, we generate a single replacement fixing
+ // the entire block. Otherwise, no replacement is generated.
+ if (result == Code.substr(Imports.front().Offset, ImportsBlockSize))
+ return;
+
auto Err = Replaces.add(tooling::Replacement(FileName, Imports.front().Offset,
ImportsBlockSize, result));
// FIXME: better error handling. For now, just skip the replacement for the
@@ -1916,7 +2015,7 @@ static void sortJavaImports(const FormatStyle &Style,
namespace {
const char JavaImportRegexPattern[] =
- "^[\t ]*import[\t ]*(static[\t ]*)?([^\t ]*)[\t ]*;";
+ "^[\t ]*import[\t ]+(static[\t ]*)?([^\t ]*)[\t ]*;";
} // anonymous namespace
@@ -1956,7 +2055,8 @@ tooling::Replacements sortJavaImports(const FormatStyle &Style, StringRef Code,
if (Static.contains("static")) {
IsStatic = true;
}
- ImportsInBlock.push_back({Identifier, Line, Prev, AssociatedCommentLines, IsStatic});
+ ImportsInBlock.push_back(
+ {Identifier, Line, Prev, AssociatedCommentLines, IsStatic});
AssociatedCommentLines.clear();
} else if (Trimmed.size() > 0 && !ImportsInBlock.empty()) {
// Associating comments within the imports with the nearest import below
@@ -1968,7 +2068,7 @@ tooling::Replacements sortJavaImports(const FormatStyle &Style, StringRef Code,
SearchFrom = Pos + 1;
}
if (!ImportsInBlock.empty())
- sortJavaImports(Style, ImportsInBlock, Ranges, FileName, Replaces);
+ sortJavaImports(Style, ImportsInBlock, Ranges, FileName, Code, Replaces);
return Replaces;
}
@@ -2085,7 +2185,6 @@ fixCppIncludeInsertions(StringRef Code, const tooling::Replacements &Replaces,
if (HeaderInsertions.empty() && HeadersToDelete.empty())
return Replaces;
-
StringRef FileName = Replaces.begin()->getFilePath();
tooling::HeaderIncludes Includes(FileName, Code, Style.IncludeStyle);
@@ -2118,7 +2217,8 @@ fixCppIncludeInsertions(StringRef Code, const tooling::Replacements &Replaces,
auto Err = Result.add(*Replace);
if (Err) {
llvm::consumeError(std::move(Err));
- unsigned NewOffset = Result.getShiftedCodePosition(Replace->getOffset());
+ unsigned NewOffset =
+ Result.getShiftedCodePosition(Replace->getOffset());
auto Shifted = tooling::Replacement(FileName, NewOffset, 0,
Replace->getReplacementText());
Result = Result.merge(tooling::Replacements(Shifted));
@@ -2307,6 +2407,8 @@ static FormatStyle::LanguageKind getLanguageByFileName(StringRef FileName) {
return FormatStyle::LK_TextProto;
if (FileName.endswith_lower(".td"))
return FormatStyle::LK_TableGen;
+ if (FileName.endswith_lower(".cs"))
+ return FormatStyle::LK_CSharp;
return FormatStyle::LK_Cpp;
}
@@ -2339,8 +2441,7 @@ llvm::Expected<FormatStyle> getStyle(StringRef StyleName, StringRef FileName,
if (!FS) {
FS = llvm::vfs::getRealFileSystem().get();
}
- FormatStyle Style = getLLVMStyle();
- Style.Language = guessLanguage(FileName, Code);
+ FormatStyle Style = getLLVMStyle(guessLanguage(FileName, Code));
FormatStyle FallbackStyle = getNoStyle();
if (!getPredefinedStyle(FallbackStyleName, Style.Language, &FallbackStyle))
diff --git a/lib/Format/FormatInternal.h b/lib/Format/FormatInternal.h
index 5c59e7656eee..3aa616da23d8 100644
--- a/lib/Format/FormatInternal.h
+++ b/lib/Format/FormatInternal.h
@@ -1,9 +1,8 @@
//===--- FormatInternal.h - Format C++ code ---------------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
diff --git a/lib/Format/FormatToken.cpp b/lib/Format/FormatToken.cpp
index 62b08c576e05..90d09064bb43 100644
--- a/lib/Format/FormatToken.cpp
+++ b/lib/Format/FormatToken.cpp
@@ -1,9 +1,8 @@
//===--- FormatToken.cpp - Format C++ code --------------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
diff --git a/lib/Format/FormatToken.h b/lib/Format/FormatToken.h
index 10390c42911b..df7493742025 100644
--- a/lib/Format/FormatToken.h
+++ b/lib/Format/FormatToken.h
@@ -1,9 +1,8 @@
//===--- FormatToken.h - Format C++ code ------------------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
@@ -61,15 +60,18 @@ namespace format {
TYPE(JsExponentiationEqual) \
TYPE(JsFatArrow) \
TYPE(JsNonNullAssertion) \
+ TYPE(JsPrivateIdentifier) \
TYPE(JsTypeColon) \
TYPE(JsTypeOperator) \
TYPE(JsTypeOptionalQuestion) \
TYPE(LambdaArrow) \
+ TYPE(LambdaLBrace) \
TYPE(LambdaLSquare) \
TYPE(LeadingJavaAnnotation) \
TYPE(LineComment) \
TYPE(MacroBlockBegin) \
TYPE(MacroBlockEnd) \
+ TYPE(NamespaceMacro) \
TYPE(ObjCBlockLBrace) \
TYPE(ObjCBlockLParen) \
TYPE(ObjCDecl) \
@@ -95,7 +97,10 @@ namespace format {
TYPE(TrailingAnnotation) \
TYPE(TrailingReturnArrow) \
TYPE(TrailingUnaryOperator) \
+ TYPE(TypenameMacro) \
TYPE(UnaryOperator) \
+ TYPE(CSharpStringLiteral) \
+ TYPE(CSharpNullCoalescing) \
TYPE(Unknown)
enum TokenType {
@@ -490,8 +495,7 @@ struct FormatToken {
bool opensBlockOrBlockTypeList(const FormatStyle &Style) const {
if (is(TT_TemplateString) && opensScope())
return true;
- return is(TT_ArrayInitializerLSquare) ||
- is(TT_ProtoExtensionLSquare) ||
+ return is(TT_ArrayInitializerLSquare) || is(TT_ProtoExtensionLSquare) ||
(is(tok::l_brace) &&
(BlockKind == BK_Block || is(TT_DictLiteral) ||
(!Style.Cpp11BracedListStyle && NestingLevel == 0))) ||
@@ -528,8 +532,10 @@ struct FormatToken {
// Detect "(inline|export)? namespace" in the beginning of a line.
if (NamespaceTok && NamespaceTok->isOneOf(tok::kw_inline, tok::kw_export))
NamespaceTok = NamespaceTok->getNextNonComment();
- return NamespaceTok && NamespaceTok->is(tok::kw_namespace) ? NamespaceTok
- : nullptr;
+ return NamespaceTok &&
+ NamespaceTok->isOneOf(tok::kw_namespace, TT_NamespaceMacro)
+ ? NamespaceTok
+ : nullptr;
}
private:
@@ -724,7 +730,36 @@ struct AdditionalKeywords {
kw_slots = &IdentTable.get("slots");
kw_qslots = &IdentTable.get("Q_SLOTS");
- // Keep this at the end of the constructor to make sure everything here is
+ // C# keywords
+ kw_dollar = &IdentTable.get("dollar");
+ kw_base = &IdentTable.get("base");
+ kw_byte = &IdentTable.get("byte");
+ kw_checked = &IdentTable.get("checked");
+ kw_decimal = &IdentTable.get("decimal");
+ kw_delegate = &IdentTable.get("delegate");
+ kw_event = &IdentTable.get("event");
+ kw_fixed = &IdentTable.get("fixed");
+ kw_foreach = &IdentTable.get("foreach");
+ kw_implicit = &IdentTable.get("implicit");
+ kw_internal = &IdentTable.get("internal");
+ kw_lock = &IdentTable.get("lock");
+ kw_null = &IdentTable.get("null");
+ kw_object = &IdentTable.get("object");
+ kw_out = &IdentTable.get("out");
+ kw_params = &IdentTable.get("params");
+ kw_ref = &IdentTable.get("ref");
+ kw_string = &IdentTable.get("string");
+ kw_stackalloc = &IdentTable.get("stackalloc");
+ kw_sbyte = &IdentTable.get("sbyte");
+ kw_sealed = &IdentTable.get("sealed");
+ kw_uint = &IdentTable.get("uint");
+ kw_ulong = &IdentTable.get("ulong");
+ kw_unchecked = &IdentTable.get("unchecked");
+ kw_unsafe = &IdentTable.get("unsafe");
+ kw_ushort = &IdentTable.get("ushort");
+
+ // Keep this at the end of the constructor to make sure everything here
+ // is
// already initialized.
JsExtraKeywords = std::unordered_set<IdentifierInfo *>(
{kw_as, kw_async, kw_await, kw_declare, kw_finally, kw_from,
@@ -732,6 +767,19 @@ struct AdditionalKeywords {
kw_set, kw_type, kw_typeof, kw_var, kw_yield,
// Keywords from the Java section.
kw_abstract, kw_extends, kw_implements, kw_instanceof, kw_interface});
+
+ CSharpExtraKeywords = std::unordered_set<IdentifierInfo *>(
+ {kw_base, kw_byte, kw_checked, kw_decimal, kw_delegate, kw_event,
+ kw_fixed, kw_foreach, kw_implicit, kw_in, kw_interface, kw_internal,
+ kw_is, kw_lock, kw_null, kw_object, kw_out, kw_override, kw_params,
+ kw_readonly, kw_ref, kw_string, kw_stackalloc, kw_sbyte, kw_sealed,
+ kw_uint, kw_ulong, kw_unchecked, kw_unsafe, kw_ushort,
+ // Keywords from the JavaScript section.
+ kw_as, kw_async, kw_await, kw_declare, kw_finally, kw_from,
+ kw_function, kw_get, kw_import, kw_is, kw_let, kw_module, kw_readonly,
+ kw_set, kw_type, kw_typeof, kw_var, kw_yield,
+ // Keywords from the Java section.
+ kw_abstract, kw_extends, kw_implements, kw_instanceof, kw_interface});
}
// Context sensitive keywords.
@@ -797,6 +845,37 @@ struct AdditionalKeywords {
IdentifierInfo *kw_slots;
IdentifierInfo *kw_qslots;
+ // C# keywords
+ IdentifierInfo *kw_dollar;
+ IdentifierInfo *kw_base;
+ IdentifierInfo *kw_byte;
+ IdentifierInfo *kw_checked;
+ IdentifierInfo *kw_decimal;
+ IdentifierInfo *kw_delegate;
+ IdentifierInfo *kw_event;
+ IdentifierInfo *kw_fixed;
+ IdentifierInfo *kw_foreach;
+ IdentifierInfo *kw_implicit;
+ IdentifierInfo *kw_internal;
+
+ IdentifierInfo *kw_lock;
+ IdentifierInfo *kw_null;
+ IdentifierInfo *kw_object;
+ IdentifierInfo *kw_out;
+
+ IdentifierInfo *kw_params;
+
+ IdentifierInfo *kw_ref;
+ IdentifierInfo *kw_string;
+ IdentifierInfo *kw_stackalloc;
+ IdentifierInfo *kw_sbyte;
+ IdentifierInfo *kw_sealed;
+ IdentifierInfo *kw_uint;
+ IdentifierInfo *kw_ulong;
+ IdentifierInfo *kw_unchecked;
+ IdentifierInfo *kw_unsafe;
+ IdentifierInfo *kw_ushort;
+
/// Returns \c true if \p Tok is a true JavaScript identifier, returns
/// \c false if it is a keyword or a pseudo keyword.
bool IsJavaScriptIdentifier(const FormatToken &Tok) const {
@@ -805,9 +884,68 @@ struct AdditionalKeywords {
JsExtraKeywords.end();
}
+ /// Returns \c true if \p Tok is a C# keyword, returns
+ /// \c false if it is a anything else.
+ bool isCSharpKeyword(const FormatToken &Tok) const {
+ switch (Tok.Tok.getKind()) {
+ case tok::kw_bool:
+ case tok::kw_break:
+ case tok::kw_case:
+ case tok::kw_catch:
+ case tok::kw_char:
+ case tok::kw_class:
+ case tok::kw_const:
+ case tok::kw_continue:
+ case tok::kw_default:
+ case tok::kw_do:
+ case tok::kw_double:
+ case tok::kw_else:
+ case tok::kw_enum:
+ case tok::kw_explicit:
+ case tok::kw_extern:
+ case tok::kw_false:
+ case tok::kw_float:
+ case tok::kw_for:
+ case tok::kw_goto:
+ case tok::kw_if:
+ case tok::kw_int:
+ case tok::kw_long:
+ case tok::kw_namespace:
+ case tok::kw_new:
+ case tok::kw_operator:
+ case tok::kw_private:
+ case tok::kw_protected:
+ case tok::kw_public:
+ case tok::kw_return:
+ case tok::kw_short:
+ case tok::kw_sizeof:
+ case tok::kw_static:
+ case tok::kw_struct:
+ case tok::kw_switch:
+ case tok::kw_this:
+ case tok::kw_throw:
+ case tok::kw_true:
+ case tok::kw_try:
+ case tok::kw_typeof:
+ case tok::kw_using:
+ case tok::kw_virtual:
+ case tok::kw_void:
+ case tok::kw_volatile:
+ case tok::kw_while:
+ return true;
+ default:
+ return Tok.is(tok::identifier) &&
+ CSharpExtraKeywords.find(Tok.Tok.getIdentifierInfo()) ==
+ CSharpExtraKeywords.end();
+ }
+ }
+
private:
/// The JavaScript keywords beyond the C++ keyword set.
std::unordered_set<IdentifierInfo *> JsExtraKeywords;
+
+ /// The C# keywords beyond the C++ keyword set
+ std::unordered_set<IdentifierInfo *> CSharpExtraKeywords;
};
} // namespace format
diff --git a/lib/Format/FormatTokenLexer.cpp b/lib/Format/FormatTokenLexer.cpp
index 146f5d68b559..009b8849753c 100644
--- a/lib/Format/FormatTokenLexer.cpp
+++ b/lib/Format/FormatTokenLexer.cpp
@@ -1,9 +1,8 @@
//===--- FormatTokenLexer.cpp - Lex FormatTokens -------------*- C++ ----*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
@@ -40,6 +39,10 @@ FormatTokenLexer::FormatTokenLexer(const SourceManager &SourceMgr, FileID ID,
Macros.insert({&IdentTable.get(ForEachMacro), TT_ForEachMacro});
for (const std::string &StatementMacro : Style.StatementMacros)
Macros.insert({&IdentTable.get(StatementMacro), TT_StatementMacro});
+ for (const std::string &TypenameMacro : Style.TypenameMacros)
+ Macros.insert({&IdentTable.get(TypenameMacro), TT_TypenameMacro});
+ for (const std::string &NamespaceMacro : Style.NamespaceMacros)
+ Macros.insert({&IdentTable.get(NamespaceMacro), TT_NamespaceMacro});
}
ArrayRef<FormatToken *> FormatTokenLexer::lex() {
@@ -67,6 +70,21 @@ void FormatTokenLexer::tryMergePreviousTokens() {
return;
if (tryMergeLessLess())
return;
+
+ if (Style.isCSharp()) {
+ if (tryMergeCSharpKeywordVariables())
+ return;
+ if (tryMergeCSharpVerbatimStringLiteral())
+ return;
+ if (tryMergeCSharpDoubleQuestion())
+ return;
+ if (tryMergeCSharpNullConditionals())
+ return;
+ static const tok::TokenKind JSRightArrow[] = {tok::equal, tok::greater};
+ if (tryMergeTokens(JSRightArrow, TT_JsFatArrow))
+ return;
+ }
+
if (tryMergeNSStringLiteral())
return;
@@ -96,6 +114,8 @@ void FormatTokenLexer::tryMergePreviousTokens() {
Tokens.back()->Tok.setKind(tok::starequal);
return;
}
+ if (tryMergeJSPrivateIdentifier())
+ return;
}
if (Style.Language == FormatStyle::LK_Java) {
@@ -122,6 +142,118 @@ bool FormatTokenLexer::tryMergeNSStringLiteral() {
return true;
}
+bool FormatTokenLexer::tryMergeJSPrivateIdentifier() {
+ // Merges #idenfier into a single identifier with the text #identifier
+ // but the token tok::identifier.
+ if (Tokens.size() < 2)
+ return false;
+ auto &Hash = *(Tokens.end() - 2);
+ auto &Identifier = *(Tokens.end() - 1);
+ if (!Hash->is(tok::hash) || !Identifier->is(tok::identifier))
+ return false;
+ Hash->Tok.setKind(tok::identifier);
+ Hash->TokenText =
+ StringRef(Hash->TokenText.begin(),
+ Identifier->TokenText.end() - Hash->TokenText.begin());
+ Hash->ColumnWidth += Identifier->ColumnWidth;
+ Hash->Type = TT_JsPrivateIdentifier;
+ Tokens.erase(Tokens.end() - 1);
+ return true;
+}
+
+// Search for verbatim or interpolated string literals @"ABC" or
+// $"aaaaa{abc}aaaaa" i and mark the token as TT_CSharpStringLiteral, and to
+// prevent splitting of @, $ and ".
+bool FormatTokenLexer::tryMergeCSharpVerbatimStringLiteral() {
+ if (Tokens.size() < 2)
+ return false;
+ auto &At = *(Tokens.end() - 2);
+ auto &String = *(Tokens.end() - 1);
+
+ // Look for $"aaaaaa" @"aaaaaa".
+ if (!(At->is(tok::at) || At->TokenText == "$") ||
+ !String->is(tok::string_literal))
+ return false;
+
+ if (Tokens.size() >= 2 && At->is(tok::at)) {
+ auto &Dollar = *(Tokens.end() - 3);
+ if (Dollar->TokenText == "$") {
+ // This looks like $@"aaaaa" so we need to combine all 3 tokens.
+ Dollar->Tok.setKind(tok::string_literal);
+ Dollar->TokenText =
+ StringRef(Dollar->TokenText.begin(),
+ String->TokenText.end() - Dollar->TokenText.begin());
+ Dollar->ColumnWidth += (At->ColumnWidth + String->ColumnWidth);
+ Dollar->Type = TT_CSharpStringLiteral;
+ Tokens.erase(Tokens.end() - 2);
+ Tokens.erase(Tokens.end() - 1);
+ return true;
+ }
+ }
+
+ // Convert back into just a string_literal.
+ At->Tok.setKind(tok::string_literal);
+ At->TokenText = StringRef(At->TokenText.begin(),
+ String->TokenText.end() - At->TokenText.begin());
+ At->ColumnWidth += String->ColumnWidth;
+ At->Type = TT_CSharpStringLiteral;
+ Tokens.erase(Tokens.end() - 1);
+ return true;
+}
+
+bool FormatTokenLexer::tryMergeCSharpDoubleQuestion() {
+ if (Tokens.size() < 2)
+ return false;
+ auto &FirstQuestion = *(Tokens.end() - 2);
+ auto &SecondQuestion = *(Tokens.end() - 1);
+ if (!FirstQuestion->is(tok::question) || !SecondQuestion->is(tok::question))
+ return false;
+ FirstQuestion->Tok.setKind(tok::question);
+ FirstQuestion->TokenText = StringRef(FirstQuestion->TokenText.begin(),
+ SecondQuestion->TokenText.end() -
+ FirstQuestion->TokenText.begin());
+ FirstQuestion->ColumnWidth += SecondQuestion->ColumnWidth;
+ FirstQuestion->Type = TT_CSharpNullCoalescing;
+ Tokens.erase(Tokens.end() - 1);
+ return true;
+}
+
+bool FormatTokenLexer::tryMergeCSharpKeywordVariables() {
+ if (Tokens.size() < 2)
+ return false;
+ auto &At = *(Tokens.end() - 2);
+ auto &Keyword = *(Tokens.end() - 1);
+ if (!At->is(tok::at))
+ return false;
+ if (!Keywords.isCSharpKeyword(*Keyword))
+ return false;
+
+ At->Tok.setKind(tok::identifier);
+ At->TokenText = StringRef(At->TokenText.begin(),
+ Keyword->TokenText.end() - At->TokenText.begin());
+ At->ColumnWidth += Keyword->ColumnWidth;
+ At->Type = Keyword->Type;
+ Tokens.erase(Tokens.end() - 1);
+ return true;
+}
+
+// In C# merge the Identifier and the ? together e.g. arg?.
+bool FormatTokenLexer::tryMergeCSharpNullConditionals() {
+ if (Tokens.size() < 2)
+ return false;
+ auto &Identifier = *(Tokens.end() - 2);
+ auto &Question = *(Tokens.end() - 1);
+ if (!Identifier->isOneOf(tok::r_square, tok::identifier) ||
+ !Question->is(tok::question))
+ return false;
+ Identifier->TokenText =
+ StringRef(Identifier->TokenText.begin(),
+ Question->TokenText.end() - Identifier->TokenText.begin());
+ Identifier->ColumnWidth += Question->ColumnWidth;
+ Tokens.erase(Tokens.end() - 1);
+ return true;
+}
+
bool FormatTokenLexer::tryMergeLessLess() {
// Merge X,less,less,Y into X,lessless,Y unless X or Y is less.
if (Tokens.size() < 3)
diff --git a/lib/Format/FormatTokenLexer.h b/lib/Format/FormatTokenLexer.h
index 0cf357c85f3b..1e096fc50205 100644
--- a/lib/Format/FormatTokenLexer.h
+++ b/lib/Format/FormatTokenLexer.h
@@ -1,9 +1,8 @@
//===--- FormatTokenLexer.h - Format C++ code ----------------*- C++ ----*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
@@ -21,8 +20,8 @@
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Format/Format.h"
-#include "llvm/Support/Regex.h"
#include "llvm/ADT/MapVector.h"
+#include "llvm/Support/Regex.h"
#include <stack>
@@ -49,6 +48,11 @@ private:
bool tryMergeLessLess();
bool tryMergeNSStringLiteral();
+ bool tryMergeJSPrivateIdentifier();
+ bool tryMergeCSharpVerbatimStringLiteral();
+ bool tryMergeCSharpKeywordVariables();
+ bool tryMergeCSharpNullConditionals();
+ bool tryMergeCSharpDoubleQuestion();
bool tryMergeTokens(ArrayRef<tok::TokenKind> Kinds, TokenType NewType);
diff --git a/lib/Format/NamespaceEndCommentsFixer.cpp b/lib/Format/NamespaceEndCommentsFixer.cpp
index dd364866d1ce..d04fc8f115fb 100644
--- a/lib/Format/NamespaceEndCommentsFixer.cpp
+++ b/lib/Format/NamespaceEndCommentsFixer.cpp
@@ -1,9 +1,8 @@
//===--- NamespaceEndCommentsFixer.cpp --------------------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
@@ -30,24 +29,41 @@ static const int kShortNamespaceMaxLines = 1;
// Computes the name of a namespace given the namespace token.
// Returns "" for anonymous namespace.
std::string computeName(const FormatToken *NamespaceTok) {
- assert(NamespaceTok && NamespaceTok->is(tok::kw_namespace) &&
+ assert(NamespaceTok &&
+ NamespaceTok->isOneOf(tok::kw_namespace, TT_NamespaceMacro) &&
"expecting a namespace token");
std::string name = "";
- // Collects all the non-comment tokens between 'namespace' and '{'.
const FormatToken *Tok = NamespaceTok->getNextNonComment();
- while (Tok && !Tok->is(tok::l_brace)) {
- name += Tok->TokenText;
+ if (NamespaceTok->is(TT_NamespaceMacro)) {
+ // Collects all the non-comment tokens between opening parenthesis
+ // and closing parenthesis or comma
+ assert(Tok && Tok->is(tok::l_paren) && "expected an opening parenthesis");
Tok = Tok->getNextNonComment();
+ while (Tok && !Tok->isOneOf(tok::r_paren, tok::comma)) {
+ name += Tok->TokenText;
+ Tok = Tok->getNextNonComment();
+ }
+ } else {
+ // Collects all the non-comment tokens between 'namespace' and '{'.
+ while (Tok && !Tok->is(tok::l_brace)) {
+ name += Tok->TokenText;
+ Tok = Tok->getNextNonComment();
+ }
}
return name;
}
-std::string computeEndCommentText(StringRef NamespaceName, bool AddNewline) {
- std::string text = "// namespace";
- if (!NamespaceName.empty()) {
+std::string computeEndCommentText(StringRef NamespaceName, bool AddNewline,
+ const FormatToken *NamespaceTok) {
+ std::string text = "// ";
+ text += NamespaceTok->TokenText;
+ if (NamespaceTok->is(TT_NamespaceMacro))
+ text += "(";
+ else if (!NamespaceName.empty())
text += ' ';
- text += NamespaceName;
- }
+ text += NamespaceName;
+ if (NamespaceTok->is(TT_NamespaceMacro))
+ text += ")";
if (AddNewline)
text += '\n';
return text;
@@ -57,7 +73,8 @@ bool hasEndComment(const FormatToken *RBraceTok) {
return RBraceTok->Next && RBraceTok->Next->is(tok::comment);
}
-bool validEndComment(const FormatToken *RBraceTok, StringRef NamespaceName) {
+bool validEndComment(const FormatToken *RBraceTok, StringRef NamespaceName,
+ const FormatToken *NamespaceTok) {
assert(hasEndComment(RBraceTok));
const FormatToken *Comment = RBraceTok->Next;
@@ -67,19 +84,32 @@ bool validEndComment(const FormatToken *RBraceTok, StringRef NamespaceName) {
new llvm::Regex("^/[/*] *(end (of )?)? *(anonymous|unnamed)? *"
"namespace( +([a-zA-Z0-9:_]+))?\\.? *(\\*/)?$",
llvm::Regex::IgnoreCase);
- SmallVector<StringRef, 7> Groups;
- if (NamespaceCommentPattern->match(Comment->TokenText, &Groups)) {
- StringRef NamespaceNameInComment = Groups.size() > 5 ? Groups[5] : "";
- // Anonymous namespace comments must not mention a namespace name.
- if (NamespaceName.empty() && !NamespaceNameInComment.empty())
- return false;
- StringRef AnonymousInComment = Groups.size() > 3 ? Groups[3] : "";
- // Named namespace comments must not mention anonymous namespace.
- if (!NamespaceName.empty() && !AnonymousInComment.empty())
+ static llvm::Regex *const NamespaceMacroCommentPattern =
+ new llvm::Regex("^/[/*] *(end (of )?)? *(anonymous|unnamed)? *"
+ "([a-zA-Z0-9_]+)\\(([a-zA-Z0-9:_]*)\\)\\.? *(\\*/)?$",
+ llvm::Regex::IgnoreCase);
+
+ SmallVector<StringRef, 8> Groups;
+ if (NamespaceTok->is(TT_NamespaceMacro) &&
+ NamespaceMacroCommentPattern->match(Comment->TokenText, &Groups)) {
+ StringRef NamespaceTokenText = Groups.size() > 4 ? Groups[4] : "";
+ // The name of the macro must be used.
+ if (NamespaceTokenText != NamespaceTok->TokenText)
return false;
- return NamespaceNameInComment == NamespaceName;
+ } else if (NamespaceTok->isNot(tok::kw_namespace) ||
+ !NamespaceCommentPattern->match(Comment->TokenText, &Groups)) {
+ // Comment does not match regex.
+ return false;
}
- return false;
+ StringRef NamespaceNameInComment = Groups.size() > 5 ? Groups[5] : "";
+ // Anonymous namespace comments must not mention a namespace name.
+ if (NamespaceName.empty() && !NamespaceNameInComment.empty())
+ return false;
+ StringRef AnonymousInComment = Groups.size() > 3 ? Groups[3] : "";
+ // Named namespace comments must not mention anonymous namespace.
+ if (!NamespaceName.empty() && !AnonymousInComment.empty())
+ return false;
+ return NamespaceNameInComment == NamespaceName;
}
void addEndComment(const FormatToken *RBraceTok, StringRef EndCommentText,
@@ -128,6 +158,13 @@ getNamespaceToken(const AnnotatedLine *Line,
return NamespaceTok->getNamespaceToken();
}
+StringRef
+getNamespaceTokenText(const AnnotatedLine *Line,
+ const SmallVectorImpl<AnnotatedLine *> &AnnotatedLines) {
+ const FormatToken *NamespaceTok = getNamespaceToken(Line, AnnotatedLines);
+ return NamespaceTok ? NamespaceTok->TokenText : StringRef();
+}
+
NamespaceEndCommentsFixer::NamespaceEndCommentsFixer(const Environment &Env,
const FormatStyle &Style)
: TokenAnalyzer(Env, Style) {}
@@ -140,6 +177,7 @@ std::pair<tooling::Replacements, unsigned> NamespaceEndCommentsFixer::analyze(
tooling::Replacements Fixes;
std::string AllNamespaceNames = "";
size_t StartLineIndex = SIZE_MAX;
+ StringRef NamespaceTokenText;
unsigned int CompactedNamespacesCount = 0;
for (size_t I = 0, E = AnnotatedLines.size(); I != E; ++I) {
const AnnotatedLine *EndLine = AnnotatedLines[I];
@@ -161,8 +199,11 @@ std::pair<tooling::Replacements, unsigned> NamespaceEndCommentsFixer::analyze(
StartLineIndex = EndLine->MatchingOpeningBlockLineIndex;
std::string NamespaceName = computeName(NamespaceTok);
if (Style.CompactNamespaces) {
+ if (CompactedNamespacesCount == 0)
+ NamespaceTokenText = NamespaceTok->TokenText;
if ((I + 1 < E) &&
- getNamespaceToken(AnnotatedLines[I + 1], AnnotatedLines) &&
+ NamespaceTokenText ==
+ getNamespaceTokenText(AnnotatedLines[I + 1], AnnotatedLines) &&
StartLineIndex - CompactedNamespacesCount - 1 ==
AnnotatedLines[I + 1]->MatchingOpeningBlockLineIndex &&
!AnnotatedLines[I + 1]->First->Finalized) {
@@ -190,12 +231,13 @@ std::pair<tooling::Replacements, unsigned> NamespaceEndCommentsFixer::analyze(
EndCommentNextTok->NewlinesBefore == 0 &&
EndCommentNextTok->isNot(tok::eof);
const std::string EndCommentText =
- computeEndCommentText(NamespaceName, AddNewline);
+ computeEndCommentText(NamespaceName, AddNewline, NamespaceTok);
if (!hasEndComment(EndCommentPrevTok)) {
bool isShort = I - StartLineIndex <= kShortNamespaceMaxLines + 1;
if (!isShort)
addEndComment(EndCommentPrevTok, EndCommentText, SourceMgr, &Fixes);
- } else if (!validEndComment(EndCommentPrevTok, NamespaceName)) {
+ } else if (!validEndComment(EndCommentPrevTok, NamespaceName,
+ NamespaceTok)) {
updateEndComment(EndCommentPrevTok, EndCommentText, SourceMgr, &Fixes);
}
StartLineIndex = SIZE_MAX;
diff --git a/lib/Format/NamespaceEndCommentsFixer.h b/lib/Format/NamespaceEndCommentsFixer.h
index 07a1c7bb0c35..eba24236f268 100644
--- a/lib/Format/NamespaceEndCommentsFixer.h
+++ b/lib/Format/NamespaceEndCommentsFixer.h
@@ -1,9 +1,8 @@
//===--- NamespaceEndCommentsFixer.h ----------------------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
diff --git a/lib/Format/SortJavaScriptImports.cpp b/lib/Format/SortJavaScriptImports.cpp
index 2ec577382ffb..5be243f4c07a 100644
--- a/lib/Format/SortJavaScriptImports.cpp
+++ b/lib/Format/SortJavaScriptImports.cpp
@@ -1,9 +1,8 @@
//===--- SortJavaScriptImports.cpp - Sort ES6 Imports -----------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
@@ -142,10 +141,9 @@ public:
SmallVector<unsigned, 16> Indices;
for (unsigned i = 0, e = References.size(); i != e; ++i)
Indices.push_back(i);
- std::stable_sort(Indices.begin(), Indices.end(),
- [&](unsigned LHSI, unsigned RHSI) {
- return References[LHSI] < References[RHSI];
- });
+ llvm::stable_sort(Indices, [&](unsigned LHSI, unsigned RHSI) {
+ return References[LHSI] < References[RHSI];
+ });
bool ReferencesInOrder = std::is_sorted(Indices.begin(), Indices.end());
std::string ReferencesText;
@@ -247,9 +245,8 @@ private:
// Sort the individual symbols within the import.
// E.g. `import {b, a} from 'x';` -> `import {a, b} from 'x';`
SmallVector<JsImportedSymbol, 1> Symbols = Reference.Symbols;
- std::stable_sort(
- Symbols.begin(), Symbols.end(),
- [&](const JsImportedSymbol &LHS, const JsImportedSymbol &RHS) {
+ llvm::stable_sort(
+ Symbols, [&](const JsImportedSymbol &LHS, const JsImportedSymbol &RHS) {
return LHS.Symbol.compare_lower(RHS.Symbol) < 0;
});
if (Symbols == Reference.Symbols) {
diff --git a/lib/Format/SortJavaScriptImports.h b/lib/Format/SortJavaScriptImports.h
index ecab0ae54cb3..7336db9537b0 100644
--- a/lib/Format/SortJavaScriptImports.h
+++ b/lib/Format/SortJavaScriptImports.h
@@ -1,9 +1,8 @@
//===--- SortJavaScriptImports.h - Sort ES6 Imports -------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
diff --git a/lib/Format/TokenAnalyzer.cpp b/lib/Format/TokenAnalyzer.cpp
index 99fc61ef1c32..eb98a205d526 100644
--- a/lib/Format/TokenAnalyzer.cpp
+++ b/lib/Format/TokenAnalyzer.cpp
@@ -1,9 +1,8 @@
//===--- TokenAnalyzer.cpp - Analyze Token Streams --------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
diff --git a/lib/Format/TokenAnalyzer.h b/lib/Format/TokenAnalyzer.h
index e43a860e46cf..5ce44a0f3ea7 100644
--- a/lib/Format/TokenAnalyzer.h
+++ b/lib/Format/TokenAnalyzer.h
@@ -1,9 +1,8 @@
//===--- TokenAnalyzer.h - Analyze Token Streams ----------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
@@ -36,10 +35,6 @@ namespace format {
class Environment {
public:
- Environment(SourceManager &SM, FileID ID, ArrayRef<CharSourceRange> Ranges)
- : SM(SM), ID(ID), CharRanges(Ranges.begin(), Ranges.end()),
- FirstStartColumn(0), NextStartColumn(0), LastStartColumn(0) {}
-
// This sets up an virtual file system with file \p FileName containing the
// fragment \p Code. Assumes that \p Code starts at \p FirstStartColumn,
// that the next lines of \p Code should start at \p NextStartColumn, and
diff --git a/lib/Format/TokenAnnotator.cpp b/lib/Format/TokenAnnotator.cpp
index 24c2f998c388..490c4f46135e 100644
--- a/lib/Format/TokenAnnotator.cpp
+++ b/lib/Format/TokenAnnotator.cpp
@@ -1,9 +1,8 @@
//===--- TokenAnnotator.cpp - Format C++ code -----------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
@@ -14,6 +13,7 @@
//===----------------------------------------------------------------------===//
#include "TokenAnnotator.h"
+#include "FormatToken.h"
#include "clang/Basic/SourceManager.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Support/Debug.h"
@@ -62,7 +62,7 @@ private:
if (NonTemplateLess.count(CurrentToken->Previous))
return false;
- const FormatToken &Previous = *CurrentToken->Previous; // The '<'.
+ const FormatToken &Previous = *CurrentToken->Previous; // The '<'.
if (Previous.Previous) {
if (Previous.Previous->Tok.isLiteral())
return false;
@@ -299,6 +299,8 @@ private:
CurrentToken->Type = TT_JavaAnnotation;
if (Left->Previous && Left->Previous->is(TT_LeadingJavaAnnotation))
CurrentToken->Type = TT_LeadingJavaAnnotation;
+ if (Left->Previous && Left->Previous->is(TT_AttributeSquare))
+ CurrentToken->Type = TT_AttributeSquare;
if (!HasMultipleLines)
Left->PackingKind = PPK_Inconclusive;
@@ -349,9 +351,47 @@ private:
return false;
}
+ bool isCSharpAttributeSpecifier(const FormatToken &Tok) {
+ if (!Style.isCSharp())
+ return false;
+
+ const FormatToken *AttrTok = Tok.Next;
+ if (!AttrTok)
+ return false;
+
+ // Just an empty declaration e.g. string [].
+ if (AttrTok->is(tok::r_square))
+ return false;
+
+ // Move along the tokens inbetween the '[' and ']' e.g. [STAThread].
+ while (AttrTok && AttrTok->isNot(tok::r_square)) {
+ AttrTok = AttrTok->Next;
+ }
+
+ if (!AttrTok)
+ return false;
+
+ // Move past the end of ']'.
+ AttrTok = AttrTok->Next;
+ if (!AttrTok)
+ return false;
+
+ // Limit this to being an access modifier that follows.
+ if (AttrTok->isOneOf(tok::kw_public, tok::kw_private, tok::kw_protected,
+ tok::kw_class, tok::kw_static, tok::l_square,
+ Keywords.kw_internal)) {
+ return true;
+ }
+ return false;
+ }
+
bool isCpp11AttributeSpecifier(const FormatToken &Tok) {
if (!Style.isCpp() || !Tok.startsSequence(tok::l_square, tok::l_square))
return false;
+ // The first square bracket is part of an ObjC array literal
+ if (Tok.Previous && Tok.Previous->is(tok::at)) {
+ return false;
+ }
const FormatToken *AttrTok = Tok.Next->Next;
if (!AttrTok)
return false;
@@ -364,9 +404,9 @@ private:
while (AttrTok && !AttrTok->startsSequence(tok::r_square, tok::r_square)) {
// ObjC message send. We assume nobody will use : in a C++11 attribute
// specifier parameter, although this is technically valid:
- // [[foo(:)]]
+ // [[foo(:)]].
if (AttrTok->is(tok::colon) ||
- AttrTok->startsSequence(tok::identifier, tok::identifier) ||
+ AttrTok->startsSequence(tok::identifier, tok::identifier) ||
AttrTok->startsSequence(tok::r_paren, tok::identifier))
return false;
if (AttrTok->is(tok::ellipsis))
@@ -399,11 +439,17 @@ private:
bool IsCpp11AttributeSpecifier = isCpp11AttributeSpecifier(*Left) ||
Contexts.back().InCpp11AttributeSpecifier;
+ // Treat C# Attributes [STAThread] much like C++ attributes [[...]].
+ bool IsCSharp11AttributeSpecifier =
+ isCSharpAttributeSpecifier(*Left) ||
+ Contexts.back().InCSharpAttributeSpecifier;
+
bool InsideInlineASM = Line.startsWith(tok::kw_asm);
+ bool IsCppStructuredBinding = Left->isCppStructuredBinding(Style);
bool StartsObjCMethodExpr =
- !InsideInlineASM && !CppArrayTemplates && Style.isCpp() &&
- !IsCpp11AttributeSpecifier && Contexts.back().CanBeExpression &&
- Left->isNot(TT_LambdaLSquare) &&
+ !IsCppStructuredBinding && !InsideInlineASM && !CppArrayTemplates &&
+ Style.isCpp() && !IsCpp11AttributeSpecifier &&
+ Contexts.back().CanBeExpression && Left->isNot(TT_LambdaLSquare) &&
!CurrentToken->isOneOf(tok::l_brace, tok::r_square) &&
(!Parent ||
Parent->isOneOf(tok::colon, tok::l_square, tok::l_paren,
@@ -411,11 +457,12 @@ private:
Parent->isUnaryOperator() ||
// FIXME(bug 36976): ObjC return types shouldn't use TT_CastRParen.
Parent->isOneOf(TT_ObjCForIn, TT_CastRParen) ||
- getBinOpPrecedence(Parent->Tok.getKind(), true, true) > prec::Unknown);
+ (getBinOpPrecedence(Parent->Tok.getKind(), true, true) >
+ prec::Unknown));
bool ColonFound = false;
unsigned BindingIncrease = 1;
- if (Left->isCppStructuredBinding(Style)) {
+ if (IsCppStructuredBinding) {
Left->Type = TT_StructuredBindingLSquare;
} else if (Left->is(TT_Unknown)) {
if (StartsObjCMethodExpr) {
@@ -476,6 +523,8 @@ private:
// Should only be relevant to JavaScript:
tok::kw_default)) {
Left->Type = TT_ArrayInitializerLSquare;
+ } else if (IsCSharp11AttributeSpecifier) {
+ Left->Type = TT_AttributeSquare;
} else {
BindingIncrease = 10;
Left->Type = TT_ArraySubscriptLSquare;
@@ -490,14 +539,22 @@ private:
Contexts.back().ColonIsObjCMethodExpr = StartsObjCMethodExpr;
Contexts.back().InCpp11AttributeSpecifier = IsCpp11AttributeSpecifier;
+ Contexts.back().InCSharpAttributeSpecifier = IsCSharp11AttributeSpecifier;
while (CurrentToken) {
if (CurrentToken->is(tok::r_square)) {
if (IsCpp11AttributeSpecifier)
CurrentToken->Type = TT_AttributeSquare;
- else if (CurrentToken->Next && CurrentToken->Next->is(tok::l_paren) &&
+ if (IsCSharp11AttributeSpecifier)
+ CurrentToken->Type = TT_AttributeSquare;
+ else if (((CurrentToken->Next &&
+ CurrentToken->Next->is(tok::l_paren)) ||
+ (CurrentToken->Previous &&
+ CurrentToken->Previous->Previous == Left)) &&
Left->is(TT_ObjCMethodExpr)) {
- // An ObjC method call is rarely followed by an open parenthesis.
+ // An ObjC method call is rarely followed by an open parenthesis. It
+ // also can't be composed of just one token, unless it's a macro that
+ // will be expanded to more tokens.
// FIXME: Do we incorrectly label ":" with this?
StartsObjCMethodExpr = false;
Left->Type = TT_Unknown;
@@ -516,6 +573,10 @@ private:
if (Parent && Parent->is(TT_PointerOrReference))
Parent->Type = TT_BinaryOperator;
}
+ // An arrow after an ObjC method expression is not a lambda arrow.
+ if (CurrentToken->Type == TT_ObjCMethodExpr && CurrentToken->Next &&
+ CurrentToken->Next->is(TT_LambdaArrow))
+ CurrentToken->Next->Type = TT_Unknown;
Left->MatchingParen = CurrentToken;
CurrentToken->MatchingParen = Left;
// FirstObjCSelectorName is set when a colon is found. This does
@@ -523,11 +584,11 @@ private:
// Here, we set FirstObjCSelectorName when the end of the method call is
// reached, in case it was not set already.
if (!Contexts.back().FirstObjCSelectorName) {
- FormatToken* Previous = CurrentToken->getPreviousNonComment();
- if (Previous && Previous->is(TT_SelectorName)) {
- Previous->ObjCSelectorNameParts = 1;
- Contexts.back().FirstObjCSelectorName = Previous;
- }
+ FormatToken *Previous = CurrentToken->getPreviousNonComment();
+ if (Previous && Previous->is(TT_SelectorName)) {
+ Previous->ObjCSelectorNameParts = 1;
+ Contexts.back().FirstObjCSelectorName = Previous;
+ }
} else {
Left->ParameterCount =
Contexts.back().FirstObjCSelectorName->ObjCSelectorNameParts;
@@ -1062,13 +1123,16 @@ public:
return LT_ImportStatement;
}
- // In .proto files, top-level options are very similar to import statements
- // and should not be line-wrapped.
+ // In .proto files, top-level options and package statements are very
+ // similar to import statements and should not be line-wrapped.
if (Style.Language == FormatStyle::LK_Proto && Line.Level == 0 &&
- CurrentToken->is(Keywords.kw_option)) {
+ CurrentToken->isOneOf(Keywords.kw_option, Keywords.kw_package)) {
next();
- if (CurrentToken && CurrentToken->is(tok::identifier))
+ if (CurrentToken && CurrentToken->is(tok::identifier)) {
+ while (CurrentToken)
+ next();
return LT_ImportStatement;
+ }
}
bool KeywordVirtualFound = false;
@@ -1134,11 +1198,12 @@ private:
// Reset token type in case we have already looked at it and then
// recovered from an error (e.g. failure to find the matching >).
- if (!CurrentToken->isOneOf(TT_LambdaLSquare, TT_ForEachMacro,
- TT_FunctionLBrace, TT_ImplicitStringLiteral,
- TT_InlineASMBrace, TT_JsFatArrow, TT_LambdaArrow,
- TT_OverloadedOperator, TT_RegexLiteral,
- TT_TemplateString, TT_ObjCStringLiteral))
+ if (!CurrentToken->isOneOf(
+ TT_LambdaLSquare, TT_LambdaLBrace, TT_ForEachMacro,
+ TT_TypenameMacro, TT_FunctionLBrace, TT_ImplicitStringLiteral,
+ TT_InlineASMBrace, TT_JsFatArrow, TT_LambdaArrow, TT_NamespaceMacro,
+ TT_OverloadedOperator, TT_RegexLiteral, TT_TemplateString,
+ TT_ObjCStringLiteral))
CurrentToken->Type = TT_Unknown;
CurrentToken->Role.reset();
CurrentToken->MatchingParen = nullptr;
@@ -1182,6 +1247,7 @@ private:
bool CaretFound = false;
bool IsForEachMacro = false;
bool InCpp11AttributeSpecifier = false;
+ bool InCSharpAttributeSpecifier = false;
};
/// Puts a new \c Context onto the stack \c Contexts for the lifetime
@@ -1355,6 +1421,7 @@ private:
if (AfterParen->Tok.isNot(tok::caret)) {
if (FormatToken *BeforeParen = Current.MatchingParen->Previous)
if (BeforeParen->is(tok::identifier) &&
+ !BeforeParen->is(TT_TypenameMacro) &&
BeforeParen->TokenText == BeforeParen->TokenText.upper() &&
(!BeforeParen->Previous ||
BeforeParen->Previous->ClosesTemplateDeclaration))
@@ -1389,7 +1456,8 @@ private:
Current.Type = Current.Previous->Type;
}
} else if (canBeObjCSelectorComponent(Current) &&
- // FIXME(bug 36976): ObjC return types shouldn't use TT_CastRParen.
+ // FIXME(bug 36976): ObjC return types shouldn't use
+ // TT_CastRParen.
Current.Previous && Current.Previous->is(TT_CastRParen) &&
Current.Previous->MatchingParen &&
Current.Previous->MatchingParen->Previous &&
@@ -1605,7 +1673,8 @@ private:
FormatToken *TokenBeforeMatchingParen =
PrevToken->MatchingParen->getPreviousNonComment();
if (TokenBeforeMatchingParen &&
- TokenBeforeMatchingParen->isOneOf(tok::kw_typeof, tok::kw_decltype))
+ TokenBeforeMatchingParen->isOneOf(tok::kw_typeof, tok::kw_decltype,
+ TT_TypenameMacro))
return TT_PointerOrReference;
}
@@ -1912,12 +1981,15 @@ void TokenAnnotator::setCommentLineLevels(
NextNonCommentLine->First->NewlinesBefore <= 1 &&
NextNonCommentLine->First->OriginalColumn ==
(*I)->First->OriginalColumn) {
- // Align comments for preprocessor lines with the # in column 0.
- // Otherwise, align with the next line.
- (*I)->Level = (NextNonCommentLine->Type == LT_PreprocessorDirective ||
- NextNonCommentLine->Type == LT_ImportStatement)
- ? 0
- : NextNonCommentLine->Level;
+ // Align comments for preprocessor lines with the # in column 0 if
+ // preprocessor lines are not indented. Otherwise, align with the next
+ // line.
+ (*I)->Level =
+ (Style.IndentPPDirectives != FormatStyle::PPDIS_BeforeHash &&
+ (NextNonCommentLine->Type == LT_PreprocessorDirective ||
+ NextNonCommentLine->Type == LT_ImportStatement))
+ ? 0
+ : NextNonCommentLine->Level;
} else {
NextNonCommentLine = (*I)->First->isNot(tok::r_brace) ? (*I) : nullptr;
}
@@ -2033,7 +2105,7 @@ static bool isFunctionDeclarationName(const FormatToken &Current,
return true;
for (const FormatToken *Tok = Next->Next; Tok && Tok != Next->MatchingParen;
Tok = Tok->Next) {
- if (Tok->is(tok::l_paren) && Tok->MatchingParen) {
+ if (Tok->isOneOf(tok::l_paren, TT_TemplateOpener) && Tok->MatchingParen) {
Tok = Tok->MatchingParen;
continue;
}
@@ -2245,6 +2317,9 @@ unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line,
return 500;
}
+ if (Left.is(tok::coloncolon) ||
+ (Right.is(tok::period) && Style.Language == FormatStyle::LK_Proto))
+ return 500;
if (Right.isOneOf(TT_StartOfName, TT_FunctionDeclarationName) ||
Right.is(tok::kw_operator)) {
if (Line.startsWith(tok::kw_for) && Right.PartOfMultiVariableDeclStmt)
@@ -2263,9 +2338,6 @@ unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line,
return 160;
if (Left.is(TT_CastRParen))
return 100;
- if (Left.is(tok::coloncolon) ||
- (Right.is(tok::period) && Style.Language == FormatStyle::LK_Proto))
- return 500;
if (Left.isOneOf(tok::kw_class, tok::kw_struct))
return 5000;
if (Left.is(tok::comment))
@@ -2391,6 +2463,12 @@ unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line,
return 3;
}
+bool TokenAnnotator::spaceRequiredBeforeParens(const FormatToken &Right) const {
+ return Style.SpaceBeforeParens == FormatStyle::SBPO_Always ||
+ (Style.SpaceBeforeParens == FormatStyle::SBPO_NonEmptyParentheses &&
+ Right.ParameterCount > 0);
+}
+
bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
const FormatToken &Left,
const FormatToken &Right) {
@@ -2415,9 +2493,9 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
if (Right.isOneOf(tok::semi, tok::comma))
return false;
if (Right.is(tok::less) && Line.Type == LT_ObjCDecl) {
- bool IsLightweightGeneric =
- Right.MatchingParen && Right.MatchingParen->Next &&
- Right.MatchingParen->Next->is(tok::colon);
+ bool IsLightweightGeneric = Right.MatchingParen &&
+ Right.MatchingParen->Next &&
+ Right.MatchingParen->Next->is(tok::colon);
return !IsLightweightGeneric && Style.ObjCSpaceBeforeProtocolList;
}
if (Right.is(tok::less) && Left.is(tok::kw_template))
@@ -2456,7 +2534,8 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
FormatToken *TokenBeforeMatchingParen =
Left.MatchingParen->getPreviousNonComment();
if (!TokenBeforeMatchingParen ||
- !TokenBeforeMatchingParen->isOneOf(tok::kw_typeof, tok::kw_decltype))
+ !TokenBeforeMatchingParen->isOneOf(tok::kw_typeof, tok::kw_decltype,
+ TT_TypenameMacro))
return true;
}
return (Left.Tok.isLiteral() ||
@@ -2537,9 +2616,11 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
(Left.isOneOf(tok::kw_try, Keywords.kw___except, tok::kw_catch,
tok::kw_new, tok::kw_delete) &&
(!Left.Previous || Left.Previous->isNot(tok::period))))) ||
- (Style.SpaceBeforeParens == FormatStyle::SBPO_Always &&
+ (spaceRequiredBeforeParens(Right) &&
(Left.is(tok::identifier) || Left.isFunctionLikeKeyword() ||
- Left.is(tok::r_paren)) &&
+ Left.is(tok::r_paren) || Left.isSimpleTypeSpecifier() ||
+ (Left.is(tok::r_square) && Left.MatchingParen &&
+ Left.MatchingParen->is(TT_LambdaLSquare))) &&
Line.Type != LT_PreprocessorDirective);
}
if (Left.is(tok::at) && Right.Tok.getObjCKeywordID() != tok::objc_not_keyword)
@@ -2602,7 +2683,8 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
// Slashes occur in text protocol extension syntax: [type/type] { ... }.
if (Left.is(tok::slash) || Right.is(tok::slash))
return false;
- if (Left.MatchingParen && Left.MatchingParen->is(TT_ProtoExtensionLSquare) &&
+ if (Left.MatchingParen &&
+ Left.MatchingParen->is(TT_ProtoExtensionLSquare) &&
Right.isOneOf(tok::l_brace, tok::less))
return !Style.Cpp11BracedListStyle;
// A percent is probably part of a formatting specification, such as %lld.
@@ -2612,7 +2694,7 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
// and "%d %d"
if (Left.is(tok::numeric_constant) && Right.is(tok::percent))
return Right.WhitespaceRange.getEnd() != Right.WhitespaceRange.getBegin();
- } else if (Style.Language == FormatStyle::LK_JavaScript) {
+ } else if (Style.Language == FormatStyle::LK_JavaScript || Style.isCSharp()) {
if (Left.is(TT_JsFatArrow))
return true;
// for await ( ...
@@ -2730,7 +2812,7 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
Left.isOneOf(TT_TrailingReturnArrow, TT_LambdaArrow))
return true;
if (Right.is(TT_OverloadedOperatorLParen))
- return Style.SpaceBeforeParens == FormatStyle::SBPO_Always;
+ return spaceRequiredBeforeParens(Right);
if (Left.is(tok::comma))
return true;
if (Right.is(tok::comma))
@@ -2761,7 +2843,8 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
return true;
}
if (Left.is(TT_UnaryOperator))
- return Right.is(TT_BinaryOperator);
+ return (Style.SpaceAfterLogicalNot && Left.is(tok::exclaim)) ||
+ Right.is(TT_BinaryOperator);
// If the next token is a binary operator or a selector name, we have
// incorrectly classified the parenthesis as a cast. FIXME: Detect correctly.
@@ -2814,7 +2897,7 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
return true;
if (Left.is(TT_TemplateCloser) && Right.is(tok::l_paren) &&
Right.isNot(TT_FunctionTypeLParen))
- return Style.SpaceBeforeParens == FormatStyle::SBPO_Always;
+ return spaceRequiredBeforeParens(Right);
if (Right.is(TT_TemplateOpener) && Left.is(tok::r_paren) &&
Left.MatchingParen && Left.MatchingParen->is(TT_OverloadedOperatorLParen))
return false;
@@ -2831,7 +2914,7 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
// Returns 'true' if 'Tok' is a brace we'd want to break before in Allman style.
static bool isAllmanBrace(const FormatToken &Tok) {
return Tok.is(tok::l_brace) && Tok.BlockKind == BK_Block &&
- !Tok.isOneOf(TT_ObjCBlockLBrace, TT_DictLiteral);
+ !Tok.isOneOf(TT_ObjCBlockLBrace, TT_LambdaLBrace, TT_DictLiteral);
}
bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
@@ -2959,6 +3042,27 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
if (Left.is(TT_ObjCBlockLBrace) && !Style.AllowShortBlocksOnASingleLine)
return true;
+ if (Left.is(TT_LambdaLBrace)) {
+ if (Left.MatchingParen && Left.MatchingParen->Next &&
+ Left.MatchingParen->Next->isOneOf(tok::comma, tok::r_paren) &&
+ Style.AllowShortLambdasOnASingleLine == FormatStyle::SLS_Inline)
+ return false;
+
+ if (Style.AllowShortLambdasOnASingleLine == FormatStyle::SLS_None ||
+ Style.AllowShortLambdasOnASingleLine == FormatStyle::SLS_Inline ||
+ (!Left.Children.empty() &&
+ Style.AllowShortLambdasOnASingleLine == FormatStyle::SLS_Empty))
+ return true;
+ }
+
+ // Put multiple C# attributes on a new line.
+ if (Style.isCSharp() &&
+ ((Left.is(TT_AttributeSquare) && Left.is(tok::r_square)) ||
+ (Left.is(tok::r_square) && Right.is(TT_AttributeSquare) &&
+ Right.is(tok::l_square))))
+ return true;
+
+ // Put multiple Java annotation on a new line.
if ((Style.Language == FormatStyle::LK_Java ||
Style.Language == FormatStyle::LK_JavaScript) &&
Left.is(TT_LeadingJavaAnnotation) &&
@@ -3119,7 +3223,7 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
// function f(): a is B { ... }
// Do not break before is in these cases.
if (Right.is(Keywords.kw_is)) {
- const FormatToken* Next = Right.getNextNonComment();
+ const FormatToken *Next = Right.getNextNonComment();
// If `is` is followed by a colon, it's likely that it's a dict key, so
// ignore it for this check.
// For example this is common in Polymer:
@@ -3159,6 +3263,11 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
return false; // must not break in "module foo { ...}"
if (Right.is(TT_TemplateString) && Right.closesScope())
return false;
+ // Don't split tagged template literal so there is a break between the tag
+ // identifier and template string.
+ if (Left.is(tok::identifier) && Right.is(TT_TemplateString)) {
+ return false;
+ }
if (Left.is(TT_TemplateString) && Left.opensScope())
return true;
}
diff --git a/lib/Format/TokenAnnotator.h b/lib/Format/TokenAnnotator.h
index e2f2c469d267..702ac6c0d76e 100644
--- a/lib/Format/TokenAnnotator.h
+++ b/lib/Format/TokenAnnotator.h
@@ -1,9 +1,8 @@
//===--- TokenAnnotator.h - Format C++ code ---------------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
@@ -100,14 +99,23 @@ public:
/// function declaration. Asserts MightBeFunctionDecl.
bool mightBeFunctionDefinition() const {
assert(MightBeFunctionDecl);
- // FIXME: Line.Last points to other characters than tok::semi
- // and tok::lbrace.
- return !Last->isOneOf(tok::semi, tok::comment);
+ // Try to determine if the end of a stream of tokens is either the
+ // Definition or the Declaration for a function. It does this by looking for
+ // the ';' in foo(); and using that it ends with a ; to know this is the
+ // Definition, however the line could end with
+ // foo(); /* comment */
+ // or
+ // foo(); // comment
+ // or
+ // foo() // comment
+ // endsWith() ignores the comment.
+ return !endsWith(tok::semi);
}
/// \c true if this line starts a namespace definition.
bool startsWithNamespace() const {
return startsWith(tok::kw_namespace) ||
+ startsWith(TT_NamespaceMacro) ||
startsWith(tok::kw_inline, tok::kw_namespace) ||
startsWith(tok::kw_export, tok::kw_namespace);
}
@@ -165,6 +173,8 @@ private:
unsigned splitPenalty(const AnnotatedLine &Line, const FormatToken &Tok,
bool InFunctionDecl);
+ bool spaceRequiredBeforeParens(const FormatToken &Right) const;
+
bool spaceRequiredBetween(const AnnotatedLine &Line, const FormatToken &Left,
const FormatToken &Right);
diff --git a/lib/Format/UnwrappedLineFormatter.cpp b/lib/Format/UnwrappedLineFormatter.cpp
index 6b6a9aff461a..3f3c80bc1ccf 100644
--- a/lib/Format/UnwrappedLineFormatter.cpp
+++ b/lib/Format/UnwrappedLineFormatter.cpp
@@ -1,14 +1,13 @@
//===--- UnwrappedLineFormatter.cpp - Format C++ code ---------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
-#include "NamespaceEndCommentsFixer.h"
#include "UnwrappedLineFormatter.h"
+#include "NamespaceEndCommentsFixer.h"
#include "WhitespaceManager.h"
#include "llvm/Support/Debug.h"
#include <queue>
@@ -95,7 +94,7 @@ private:
/// characters to the left from their level.
int getIndentOffset(const FormatToken &RootToken) {
if (Style.Language == FormatStyle::LK_Java ||
- Style.Language == FormatStyle::LK_JavaScript)
+ Style.Language == FormatStyle::LK_JavaScript || Style.isCSharp())
return 0;
if (RootToken.isAccessSpecifier(false) ||
RootToken.isObjCAccessSpecifier() ||
@@ -135,20 +134,29 @@ private:
unsigned Indent = 0;
};
-bool isNamespaceDeclaration(const AnnotatedLine *Line) {
- const FormatToken *NamespaceTok = Line->First;
- return NamespaceTok && NamespaceTok->getNamespaceToken();
-}
-
-bool isEndOfNamespace(const AnnotatedLine *Line,
- const SmallVectorImpl<AnnotatedLine *> &AnnotatedLines) {
+const FormatToken *getMatchingNamespaceToken(
+ const AnnotatedLine *Line,
+ const SmallVectorImpl<AnnotatedLine *> &AnnotatedLines) {
if (!Line->startsWith(tok::r_brace))
- return false;
+ return nullptr;
size_t StartLineIndex = Line->MatchingOpeningBlockLineIndex;
if (StartLineIndex == UnwrappedLine::kInvalidIndex)
- return false;
+ return nullptr;
assert(StartLineIndex < AnnotatedLines.size());
- return isNamespaceDeclaration(AnnotatedLines[StartLineIndex]);
+ return AnnotatedLines[StartLineIndex]->First->getNamespaceToken();
+}
+
+StringRef getNamespaceTokenText(const AnnotatedLine *Line) {
+ const FormatToken *NamespaceToken = Line->First->getNamespaceToken();
+ return NamespaceToken ? NamespaceToken->TokenText : StringRef();
+}
+
+StringRef getMatchingNamespaceTokenText(
+ const AnnotatedLine *Line,
+ const SmallVectorImpl<AnnotatedLine *> &AnnotatedLines) {
+ const FormatToken *NamespaceToken =
+ getMatchingNamespaceToken(Line, AnnotatedLines);
+ return NamespaceToken ? NamespaceToken->TokenText : StringRef();
}
class LineJoiner {
@@ -250,10 +258,11 @@ private:
TheLine->Level != 0);
if (Style.CompactNamespaces) {
- if (isNamespaceDeclaration(TheLine)) {
+ if (auto nsToken = TheLine->First->getNamespaceToken()) {
int i = 0;
unsigned closingLine = TheLine->MatchingClosingBlockLineIndex - 1;
- for (; I + 1 + i != E && isNamespaceDeclaration(I[i + 1]) &&
+ for (; I + 1 + i != E &&
+ nsToken->TokenText == getNamespaceTokenText(I[i + 1]) &&
closingLine == I[i + 1]->MatchingClosingBlockLineIndex &&
I[i + 1]->Last->TotalLength < Limit;
i++, closingLine--) {
@@ -265,10 +274,12 @@ private:
return i;
}
- if (isEndOfNamespace(TheLine, AnnotatedLines)) {
+ if (auto nsToken = getMatchingNamespaceToken(TheLine, AnnotatedLines)) {
int i = 0;
unsigned openingLine = TheLine->MatchingOpeningBlockLineIndex - 1;
- for (; I + 1 + i != E && isEndOfNamespace(I[i + 1], AnnotatedLines) &&
+ for (; I + 1 + i != E &&
+ nsToken->TokenText ==
+ getMatchingNamespaceTokenText(I[i + 1], AnnotatedLines) &&
openingLine == I[i + 1]->MatchingOpeningBlockLineIndex;
i++, openingLine--) {
// No space between consecutive braces
@@ -414,10 +425,12 @@ private:
if (I[1]->First->isOneOf(tok::semi, tok::kw_if, tok::kw_for, tok::kw_while,
TT_LineComment))
return 0;
- // Only inline simple if's (no nested if or else).
- if (I + 2 != E && Line.startsWith(tok::kw_if) &&
- I[2]->First->is(tok::kw_else))
- return 0;
+ // Only inline simple if's (no nested if or else), unless specified
+ if (Style.AllowShortIfStatementsOnASingleLine != FormatStyle::SIS_Always) {
+ if (I + 2 != E && Line.startsWith(tok::kw_if) &&
+ I[2]->First->is(tok::kw_else))
+ return 0;
+ }
return 1;
}
@@ -691,10 +704,8 @@ public:
/// Formats an \c AnnotatedLine and returns the penalty.
///
/// If \p DryRun is \c false, directly applies the changes.
- virtual unsigned formatLine(const AnnotatedLine &Line,
- unsigned FirstIndent,
- unsigned FirstStartColumn,
- bool DryRun) = 0;
+ virtual unsigned formatLine(const AnnotatedLine &Line, unsigned FirstIndent,
+ unsigned FirstStartColumn, bool DryRun) = 0;
protected:
/// If the \p State's next token is an r_brace closing a nested block,
@@ -822,7 +833,7 @@ public:
LineState State =
Indenter->getInitialState(FirstIndent, FirstStartColumn, &Line, DryRun);
while (State.NextToken) {
- formatChildren(State, /*Newline=*/false, DryRun, Penalty);
+ formatChildren(State, /*NewLine=*/false, DryRun, Penalty);
Indenter->addTokenToState(
State, /*Newline=*/State.NextToken->MustBreakBefore, DryRun);
}
@@ -1009,13 +1020,10 @@ private:
} // anonymous namespace
-unsigned
-UnwrappedLineFormatter::format(const SmallVectorImpl<AnnotatedLine *> &Lines,
- bool DryRun, int AdditionalIndent,
- bool FixBadIndentation,
- unsigned FirstStartColumn,
- unsigned NextStartColumn,
- unsigned LastStartColumn) {
+unsigned UnwrappedLineFormatter::format(
+ const SmallVectorImpl<AnnotatedLine *> &Lines, bool DryRun,
+ int AdditionalIndent, bool FixBadIndentation, unsigned FirstStartColumn,
+ unsigned NextStartColumn, unsigned LastStartColumn) {
LineJoiner Joiner(Style, Keywords, Lines);
// Try to look up already computed penalty in DryRun-mode.
@@ -1077,7 +1085,9 @@ UnwrappedLineFormatter::format(const SmallVectorImpl<AnnotatedLine *> &Lines,
TheLine.Last->TotalLength + Indent <= ColumnLimit ||
(TheLine.Type == LT_ImportStatement &&
(Style.Language != FormatStyle::LK_JavaScript ||
- !Style.JavaScriptWrapImports));
+ !Style.JavaScriptWrapImports)) ||
+ (Style.isCSharp() &&
+ TheLine.InPPDirective); // don't split #regions in C#
if (Style.ColumnLimit == 0)
NoColumnLimitLineFormatter(Indenter, Whitespaces, Style, this)
.formatLine(TheLine, NextStartColumn + Indent,
@@ -1182,8 +1192,10 @@ void UnwrappedLineFormatter::formatFirstToken(
if (Newlines)
Indent = NewlineIndent;
- // Preprocessor directives get indented after the hash, if indented.
- if (Line.Type == LT_PreprocessorDirective || Line.Type == LT_ImportStatement)
+ // Preprocessor directives get indented before the hash only if specified
+ if (Style.IndentPPDirectives != FormatStyle::PPDIS_BeforeHash &&
+ (Line.Type == LT_PreprocessorDirective ||
+ Line.Type == LT_ImportStatement))
Indent = 0;
Whitespaces->replaceWhitespace(RootToken, Newlines, Indent, Indent,
diff --git a/lib/Format/UnwrappedLineFormatter.h b/lib/Format/UnwrappedLineFormatter.h
index dac210ea62b1..a1ff16999589 100644
--- a/lib/Format/UnwrappedLineFormatter.h
+++ b/lib/Format/UnwrappedLineFormatter.h
@@ -1,9 +1,8 @@
//===--- UnwrappedLineFormatter.h - Format C++ code -------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
@@ -40,10 +39,8 @@ public:
/// Format the current block and return the penalty.
unsigned format(const SmallVectorImpl<AnnotatedLine *> &Lines,
bool DryRun = false, int AdditionalIndent = 0,
- bool FixBadIndentation = false,
- unsigned FirstStartColumn = 0,
- unsigned NextStartColumn = 0,
- unsigned LastStartColumn = 0);
+ bool FixBadIndentation = false, unsigned FirstStartColumn = 0,
+ unsigned NextStartColumn = 0, unsigned LastStartColumn = 0);
private:
/// Add a new line and the required indent before the first Token
diff --git a/lib/Format/UnwrappedLineParser.cpp b/lib/Format/UnwrappedLineParser.cpp
index 3cd3c8f9cdf6..a35e98ae5503 100644
--- a/lib/Format/UnwrappedLineParser.cpp
+++ b/lib/Format/UnwrappedLineParser.cpp
@@ -1,9 +1,8 @@
//===--- UnwrappedLineParser.cpp - Format C++ code ------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
@@ -173,10 +172,16 @@ class CompoundStatementIndenter {
public:
CompoundStatementIndenter(UnwrappedLineParser *Parser,
const FormatStyle &Style, unsigned &LineLevel)
+ : CompoundStatementIndenter(Parser, LineLevel,
+ Style.BraceWrapping.AfterControlStatement,
+ Style.BraceWrapping.IndentBraces) {
+ }
+ CompoundStatementIndenter(UnwrappedLineParser *Parser, unsigned &LineLevel,
+ bool WrapBrace, bool IndentBrace)
: LineLevel(LineLevel), OldLineLevel(LineLevel) {
- if (Style.BraceWrapping.AfterControlStatement)
+ if (WrapBrace)
Parser->addUnwrappedLine();
- if (Style.BraceWrapping.IndentBraces)
+ if (IndentBrace)
++LineLevel;
}
~CompoundStatementIndenter() { LineLevel = OldLineLevel; }
@@ -482,7 +487,7 @@ void UnwrappedLineParser::calculateBraceTypes(bool ExpectClassBody) {
break;
case tok::identifier:
if (!Tok->is(TT_StatementMacro))
- break;
+ break;
LLVM_FALLTHROUGH;
case tok::at:
case tok::semi:
@@ -625,7 +630,7 @@ static bool isIIFE(const UnwrappedLine &Line,
static bool ShouldBreakBeforeBrace(const FormatStyle &Style,
const FormatToken &InitialToken) {
- if (InitialToken.is(tok::kw_namespace))
+ if (InitialToken.isOneOf(tok::kw_namespace, TT_NamespaceMacro))
return Style.BraceWrapping.AfterNamespace;
if (InitialToken.is(tok::kw_class))
return Style.BraceWrapping.AfterClass;
@@ -656,6 +661,7 @@ void UnwrappedLineParser::parseChildBlock() {
void UnwrappedLineParser::parsePPDirective() {
assert(FormatTok->Tok.is(tok::hash) && "'#' expected");
ScopedMacroState MacroState(*Line, Tokens, FormatTok);
+
nextToken();
if (!FormatTok->Tok.getIdentifierInfo()) {
@@ -799,7 +805,7 @@ void UnwrappedLineParser::parsePPEndIf() {
void UnwrappedLineParser::parsePPDefine() {
nextToken();
- if (FormatTok->Tok.getKind() != tok::identifier) {
+ if (!FormatTok->Tok.getIdentifierInfo()) {
IncludeGuard = IG_Rejected;
IncludeGuardToken = nullptr;
parsePPUnknown();
@@ -824,7 +830,7 @@ void UnwrappedLineParser::parsePPDefine() {
FormatTok->WhitespaceRange.getEnd()) {
parseParens();
}
- if (Style.IndentPPDirectives == FormatStyle::PPDIS_AfterHash)
+ if (Style.IndentPPDirectives != FormatStyle::PPDIS_None)
Line->Level += PPBranchLevel + 1;
addUnwrappedLine();
++Line->Level;
@@ -841,7 +847,7 @@ void UnwrappedLineParser::parsePPUnknown() {
do {
nextToken();
} while (!eof());
- if (Style.IndentPPDirectives == FormatStyle::PPDIS_AfterHash)
+ if (Style.IndentPPDirectives != FormatStyle::PPDIS_None)
Line->Level += PPBranchLevel + 1;
addUnwrappedLine();
}
@@ -1000,7 +1006,7 @@ void UnwrappedLineParser::parseStructuralElement() {
case tok::kw_protected:
case tok::kw_private:
if (Style.Language == FormatStyle::LK_Java ||
- Style.Language == FormatStyle::LK_JavaScript)
+ Style.Language == FormatStyle::LK_JavaScript || Style.isCSharp())
nextToken();
else
parseAccessSpecifier();
@@ -1116,6 +1122,10 @@ void UnwrappedLineParser::parseStructuralElement() {
parseStatementMacro();
return;
}
+ if (Style.isCpp() && FormatTok->is(TT_NamespaceMacro)) {
+ parseNamespace();
+ return;
+ }
// In all other cases, parse the declaration.
break;
default:
@@ -1167,8 +1177,8 @@ void UnwrappedLineParser::parseStructuralElement() {
case tok::objc_synchronized:
nextToken();
if (FormatTok->Tok.is(tok::l_paren))
- // Skip synchronization object
- parseParens();
+ // Skip synchronization object
+ parseParens();
if (FormatTok->Tok.is(tok::l_brace)) {
if (Style.BraceWrapping.AfterControlStatement)
addUnwrappedLine();
@@ -1214,9 +1224,9 @@ void UnwrappedLineParser::parseStructuralElement() {
// parseRecord falls through and does not yet add an unwrapped line as a
// record declaration or definition can start a structural element.
parseRecord();
- // This does not apply for Java and JavaScript.
+ // This does not apply for Java, JavaScript and C#.
if (Style.Language == FormatStyle::LK_Java ||
- Style.Language == FormatStyle::LK_JavaScript) {
+ Style.Language == FormatStyle::LK_JavaScript || Style.isCSharp()) {
if (FormatTok->is(tok::semi))
nextToken();
addUnwrappedLine();
@@ -1329,10 +1339,15 @@ void UnwrappedLineParser::parseStructuralElement() {
// See if the following token should start a new unwrapped line.
StringRef Text = FormatTok->TokenText;
nextToken();
- if (Line->Tokens.size() == 1 &&
- // JS doesn't have macros, and within classes colons indicate fields,
- // not labels.
- Style.Language != FormatStyle::LK_JavaScript) {
+
+ // JS doesn't have macros, and within classes colons indicate fields, not
+ // labels.
+ if (Style.Language == FormatStyle::LK_JavaScript)
+ break;
+
+ TokenCount = Line->Tokens.size();
+ if (TokenCount == 1 ||
+ (TokenCount == 2 && Line->Tokens.front().Tok->is(tok::comment))) {
if (FormatTok->Tok.is(tok::colon) && !Line->MustBeDeclaration) {
Line->Tokens.begin()->Tok->MustBreakBefore = true;
parseLabel();
@@ -1402,6 +1417,8 @@ bool UnwrappedLineParser::tryToParseLambda() {
if (!tryToParseLambdaIntroducer())
return false;
+ bool SeenArrow = false;
+
while (FormatTok->isNot(tok::l_brace)) {
if (FormatTok->isSimpleTypeSpecifier()) {
nextToken();
@@ -1423,16 +1440,57 @@ bool UnwrappedLineParser::tryToParseLambda() {
case tok::numeric_constant:
case tok::coloncolon:
case tok::kw_mutable:
+ case tok::kw_noexcept:
nextToken();
break;
+ // Specialization of a template with an integer parameter can contain
+ // arithmetic, logical, comparison and ternary operators.
+ //
+ // FIXME: This also accepts sequences of operators that are not in the scope
+ // of a template argument list.
+ //
+ // In a C++ lambda a template type can only occur after an arrow. We use
+ // this as an heuristic to distinguish between Objective-C expressions
+ // followed by an `a->b` expression, such as:
+ // ([obj func:arg] + a->b)
+ // Otherwise the code below would parse as a lambda.
+ case tok::plus:
+ case tok::minus:
+ case tok::exclaim:
+ case tok::tilde:
+ case tok::slash:
+ case tok::percent:
+ case tok::lessless:
+ case tok::pipe:
+ case tok::pipepipe:
+ case tok::ampamp:
+ case tok::caret:
+ case tok::equalequal:
+ case tok::exclaimequal:
+ case tok::greaterequal:
+ case tok::lessequal:
+ case tok::question:
+ case tok::colon:
+ case tok::kw_true:
+ case tok::kw_false:
+ if (SeenArrow) {
+ nextToken();
+ break;
+ }
+ return true;
case tok::arrow:
+ // This might or might not actually be a lambda arrow (this could be an
+ // ObjC method invocation followed by a dereferencing arrow). We might
+ // reset this back to TT_Unknown in TokenAnnotator.
FormatTok->Type = TT_LambdaArrow;
+ SeenArrow = true;
nextToken();
break;
default:
return true;
}
}
+ FormatTok->Type = TT_LambdaLBrace;
LSquare.Type = TT_LambdaLSquare;
parseChildBlock();
return true;
@@ -1806,12 +1864,17 @@ void UnwrappedLineParser::parseTryCatch() {
}
void UnwrappedLineParser::parseNamespace() {
- assert(FormatTok->Tok.is(tok::kw_namespace) && "'namespace' expected");
+ assert(FormatTok->isOneOf(tok::kw_namespace, TT_NamespaceMacro) &&
+ "'namespace' expected");
const FormatToken &InitialToken = *FormatTok;
nextToken();
- while (FormatTok->isOneOf(tok::identifier, tok::coloncolon))
- nextToken();
+ if (InitialToken.is(TT_NamespaceMacro)) {
+ parseParens();
+ } else {
+ while (FormatTok->isOneOf(tok::identifier, tok::coloncolon))
+ nextToken();
+ }
if (FormatTok->Tok.is(tok::l_brace)) {
if (ShouldBreakBeforeBrace(Style, InitialToken))
addUnwrappedLine();
@@ -1907,7 +1970,9 @@ void UnwrappedLineParser::parseLabel() {
if (Line->Level > 1 || (!Line->InPPDirective && Line->Level > 0))
--Line->Level;
if (CommentsBeforeNextToken.empty() && FormatTok->Tok.is(tok::l_brace)) {
- CompoundStatementIndenter Indenter(this, Style, Line->Level);
+ CompoundStatementIndenter Indenter(this, Line->Level,
+ Style.BraceWrapping.AfterCaseLabel,
+ Style.BraceWrapping.IndentBraces);
parseBlock(/*MustBeDeclaration=*/false);
if (FormatTok->Tok.is(tok::kw_break)) {
if (Style.BraceWrapping.AfterControlStatement)
@@ -1976,6 +2041,10 @@ bool UnwrappedLineParser::parseEnum() {
FormatTok->isOneOf(tok::colon, tok::question))
return false;
+ // In protobuf, "enum" can be used as a field name.
+ if (Style.Language == FormatStyle::LK_Proto && FormatTok->is(tok::equal))
+ return false;
+
// Eat up enum class ...
if (FormatTok->Tok.is(tok::kw_class) || FormatTok->Tok.is(tok::kw_struct))
nextToken();
@@ -2347,8 +2416,7 @@ void UnwrappedLineParser::parseJavaScriptEs6ImportExport() {
}
}
-void UnwrappedLineParser::parseStatementMacro()
-{
+void UnwrappedLineParser::parseStatementMacro() {
nextToken();
if (FormatTok->is(tok::l_paren))
parseParens();
@@ -2631,6 +2699,9 @@ void UnwrappedLineParser::readToken(int LevelDifference) {
// Comments stored before the preprocessor directive need to be output
// before the preprocessor directive, at the same level as the
// preprocessor directive, as we consider them to apply to the directive.
+ if (Style.IndentPPDirectives == FormatStyle::PPDIS_BeforeHash &&
+ PPBranchLevel > 0)
+ Line->Level += PPBranchLevel;
flushComments(isOnNewLine(*FormatTok));
parsePPDirective();
}
diff --git a/lib/Format/UnwrappedLineParser.h b/lib/Format/UnwrappedLineParser.h
index 55d60dff9152..e1b35317c7c0 100644
--- a/lib/Format/UnwrappedLineParser.h
+++ b/lib/Format/UnwrappedLineParser.h
@@ -1,9 +1,8 @@
//===--- UnwrappedLineParser.h - Format C++ code ----------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
@@ -77,8 +76,7 @@ class UnwrappedLineParser {
public:
UnwrappedLineParser(const FormatStyle &Style,
const AdditionalKeywords &Keywords,
- unsigned FirstStartColumn,
- ArrayRef<FormatToken *> Tokens,
+ unsigned FirstStartColumn, ArrayRef<FormatToken *> Tokens,
UnwrappedLineConsumer &Callback);
void parse();
diff --git a/lib/Format/UsingDeclarationsSorter.cpp b/lib/Format/UsingDeclarationsSorter.cpp
index 9e49e7913033..b6559db61d0c 100644
--- a/lib/Format/UsingDeclarationsSorter.cpp
+++ b/lib/Format/UsingDeclarationsSorter.cpp
@@ -1,9 +1,8 @@
//===--- UsingDeclarationsSorter.cpp ----------------------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
@@ -128,8 +127,7 @@ void endUsingDeclarationBlock(
}
SmallVector<UsingDeclaration, 4> SortedUsingDeclarations(
UsingDeclarations->begin(), UsingDeclarations->end());
- std::stable_sort(SortedUsingDeclarations.begin(),
- SortedUsingDeclarations.end());
+ llvm::stable_sort(SortedUsingDeclarations);
SortedUsingDeclarations.erase(
std::unique(SortedUsingDeclarations.begin(),
SortedUsingDeclarations.end(),
diff --git a/lib/Format/UsingDeclarationsSorter.h b/lib/Format/UsingDeclarationsSorter.h
index 7e5cf7610d67..4285a1ca03cf 100644
--- a/lib/Format/UsingDeclarationsSorter.h
+++ b/lib/Format/UsingDeclarationsSorter.h
@@ -1,9 +1,8 @@
//===--- UsingDeclarationsSorter.h ------------------------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
diff --git a/lib/Format/WhitespaceManager.cpp b/lib/Format/WhitespaceManager.cpp
index 032b1333322d..23fbf94c7588 100644
--- a/lib/Format/WhitespaceManager.cpp
+++ b/lib/Format/WhitespaceManager.cpp
@@ -1,9 +1,8 @@
//===--- WhitespaceManager.cpp - Format C++ code --------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
@@ -92,6 +91,7 @@ const tooling::Replacements &WhitespaceManager::generateReplacements() {
llvm::sort(Changes, Change::IsBeforeInFile(SourceMgr));
calculateLineBreakInformation();
+ alignConsecutiveMacros();
alignConsecutiveDeclarations();
alignConsecutiveAssignments();
alignTrailingComments();
@@ -429,23 +429,148 @@ static unsigned AlignTokens(const FormatStyle &Style, F &&Matches,
return i;
}
+// Aligns a sequence of matching tokens, on the MinColumn column.
+//
+// Sequences start from the first matching token to align, and end at the
+// first token of the first line that doesn't need to be aligned.
+//
+// We need to adjust the StartOfTokenColumn of each Change that is on a line
+// containing any matching token to be aligned and located after such token.
+static void AlignMacroSequence(
+ unsigned &StartOfSequence, unsigned &EndOfSequence, unsigned &MinColumn,
+ unsigned &MaxColumn, bool &FoundMatchOnLine,
+ std::function<bool(const WhitespaceManager::Change &C)> AlignMacrosMatches,
+ SmallVector<WhitespaceManager::Change, 16> &Changes) {
+ if (StartOfSequence > 0 && StartOfSequence < EndOfSequence) {
+
+ FoundMatchOnLine = false;
+ int Shift = 0;
+
+ for (unsigned I = StartOfSequence; I != EndOfSequence; ++I) {
+ if (Changes[I].NewlinesBefore > 0) {
+ Shift = 0;
+ FoundMatchOnLine = false;
+ }
+
+ // If this is the first matching token to be aligned, remember by how many
+ // spaces it has to be shifted, so the rest of the changes on the line are
+ // shifted by the same amount
+ if (!FoundMatchOnLine && AlignMacrosMatches(Changes[I])) {
+ FoundMatchOnLine = true;
+ Shift = MinColumn - Changes[I].StartOfTokenColumn;
+ Changes[I].Spaces += Shift;
+ }
+
+ assert(Shift >= 0);
+ Changes[I].StartOfTokenColumn += Shift;
+ if (I + 1 != Changes.size())
+ Changes[I + 1].PreviousEndOfTokenColumn += Shift;
+ }
+ }
+
+ MinColumn = 0;
+ MaxColumn = UINT_MAX;
+ StartOfSequence = 0;
+ EndOfSequence = 0;
+}
+
+void WhitespaceManager::alignConsecutiveMacros() {
+ if (!Style.AlignConsecutiveMacros)
+ return;
+
+ auto AlignMacrosMatches = [](const Change &C) {
+ const FormatToken *Current = C.Tok;
+ unsigned SpacesRequiredBefore = 1;
+
+ if (Current->SpacesRequiredBefore == 0 || !Current->Previous)
+ return false;
+
+ Current = Current->Previous;
+
+ // If token is a ")", skip over the parameter list, to the
+ // token that precedes the "("
+ if (Current->is(tok::r_paren) && Current->MatchingParen) {
+ Current = Current->MatchingParen->Previous;
+ SpacesRequiredBefore = 0;
+ }
+
+ if (!Current || !Current->is(tok::identifier))
+ return false;
+
+ if (!Current->Previous || !Current->Previous->is(tok::pp_define))
+ return false;
+
+ // For a macro function, 0 spaces are required between the
+ // identifier and the lparen that opens the parameter list.
+ // For a simple macro, 1 space is required between the
+ // identifier and the first token of the defined value.
+ return Current->Next->SpacesRequiredBefore == SpacesRequiredBefore;
+ };
+
+ unsigned MinColumn = 0;
+ unsigned MaxColumn = UINT_MAX;
+
+ // Start and end of the token sequence we're processing.
+ unsigned StartOfSequence = 0;
+ unsigned EndOfSequence = 0;
+
+ // Whether a matching token has been found on the current line.
+ bool FoundMatchOnLine = false;
+
+ unsigned I = 0;
+ for (unsigned E = Changes.size(); I != E; ++I) {
+ if (Changes[I].NewlinesBefore != 0) {
+ EndOfSequence = I;
+ // If there is a blank line, or if the last line didn't contain any
+ // matching token, the sequence ends here.
+ if (Changes[I].NewlinesBefore > 1 || !FoundMatchOnLine)
+ AlignMacroSequence(StartOfSequence, EndOfSequence, MinColumn, MaxColumn,
+ FoundMatchOnLine, AlignMacrosMatches, Changes);
+
+ FoundMatchOnLine = false;
+ }
+
+ if (!AlignMacrosMatches(Changes[I]))
+ continue;
+
+ FoundMatchOnLine = true;
+
+ if (StartOfSequence == 0)
+ StartOfSequence = I;
+
+ unsigned ChangeMinColumn = Changes[I].StartOfTokenColumn;
+ int LineLengthAfter = -Changes[I].Spaces;
+ for (unsigned j = I; j != E && Changes[j].NewlinesBefore == 0; ++j)
+ LineLengthAfter += Changes[j].Spaces + Changes[j].TokenLength;
+ unsigned ChangeMaxColumn = Style.ColumnLimit - LineLengthAfter;
+
+ MinColumn = std::max(MinColumn, ChangeMinColumn);
+ MaxColumn = std::min(MaxColumn, ChangeMaxColumn);
+ }
+
+ EndOfSequence = I;
+ AlignMacroSequence(StartOfSequence, EndOfSequence, MinColumn, MaxColumn,
+ FoundMatchOnLine, AlignMacrosMatches, Changes);
+}
+
void WhitespaceManager::alignConsecutiveAssignments() {
if (!Style.AlignConsecutiveAssignments)
return;
- AlignTokens(Style,
- [&](const Change &C) {
- // Do not align on equal signs that are first on a line.
- if (C.NewlinesBefore > 0)
- return false;
+ AlignTokens(
+ Style,
+ [&](const Change &C) {
+ // Do not align on equal signs that are first on a line.
+ if (C.NewlinesBefore > 0)
+ return false;
- // Do not align on equal signs that are last on a line.
- if (&C != &Changes.back() && (&C + 1)->NewlinesBefore > 0)
- return false;
+ // Do not align on equal signs that are last on a line.
+ if (&C != &Changes.back() && (&C + 1)->NewlinesBefore > 0)
+ return false;
- return C.Tok->is(tok::equal);
- },
- Changes, /*StartAt=*/0);
+ return C.Tok->is(tok::equal);
+ },
+ Changes, /*StartAt=*/0);
}
void WhitespaceManager::alignConsecutiveDeclarations() {
@@ -458,15 +583,28 @@ void WhitespaceManager::alignConsecutiveDeclarations() {
// const char* const* v1;
// float const* v2;
// SomeVeryLongType const& v3;
- AlignTokens(Style,
- [](Change const &C) {
- // tok::kw_operator is necessary for aligning operator overload
- // definitions.
- return C.Tok->is(TT_StartOfName) ||
- C.Tok->is(TT_FunctionDeclarationName) ||
- C.Tok->is(tok::kw_operator);
- },
- Changes, /*StartAt=*/0);
+ AlignTokens(
+ Style,
+ [](Change const &C) {
+ // tok::kw_operator is necessary for aligning operator overload
+ // definitions.
+ if (C.Tok->isOneOf(TT_FunctionDeclarationName, tok::kw_operator))
+ return true;
+ if (C.Tok->isNot(TT_StartOfName))
+ return false;
+ // Check if there is a subsequent name that starts the same declaration.
+ for (FormatToken *Next = C.Tok->Next; Next; Next = Next->Next) {
+ if (Next->is(tok::comment))
+ continue;
+ if (!Next->Tok.getIdentifierInfo())
+ break;
+ if (Next->isOneOf(TT_StartOfName, TT_FunctionDeclarationName,
+ tok::kw_operator))
+ return false;
+ }
+ return true;
+ },
+ Changes, /*StartAt=*/0);
}
void WhitespaceManager::alignTrailingComments() {
@@ -542,11 +680,10 @@ void WhitespaceManager::alignTrailingComments() {
MinColumn = std::max(MinColumn, ChangeMinColumn);
MaxColumn = std::min(MaxColumn, ChangeMaxColumn);
}
- BreakBeforeNext =
- (i == 0) || (Changes[i].NewlinesBefore > 1) ||
- // Never start a sequence with a comment at the beginning of
- // the line.
- (Changes[i].NewlinesBefore == 1 && StartOfSequence == i);
+ BreakBeforeNext = (i == 0) || (Changes[i].NewlinesBefore > 1) ||
+ // Never start a sequence with a comment at the beginning
+ // of the line.
+ (Changes[i].NewlinesBefore == 1 && StartOfSequence == i);
Newlines = 0;
}
alignTrailingComments(StartOfSequence, Changes.size(), MinColumn);
@@ -680,11 +817,15 @@ void WhitespaceManager::appendIndentText(std::string &Text,
case FormatStyle::UT_Always: {
unsigned FirstTabWidth =
Style.TabWidth - WhitespaceStartColumn % Style.TabWidth;
- // Indent with tabs only when there's at least one full tab.
- if (FirstTabWidth + Style.TabWidth <= Spaces) {
- Spaces -= FirstTabWidth;
- Text.append("\t");
+ // Insert only spaces when we want to end up before the next tab.
+ if (Spaces < FirstTabWidth || Spaces == 1) {
+ Text.append(Spaces, ' ');
+ break;
}
+ // Align to the next tab.
+ Spaces -= FirstTabWidth;
+ Text.append("\t");
+
Text.append(Spaces / Style.TabWidth, '\t');
Text.append(Spaces % Style.TabWidth, ' ');
break;
diff --git a/lib/Format/WhitespaceManager.h b/lib/Format/WhitespaceManager.h
index db90343f7294..f47bf40204b3 100644
--- a/lib/Format/WhitespaceManager.h
+++ b/lib/Format/WhitespaceManager.h
@@ -1,9 +1,8 @@
//===--- WhitespaceManager.h - Format C++ code ------------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
@@ -41,6 +40,8 @@ public:
bool UseCRLF)
: SourceMgr(SourceMgr), Style(Style), UseCRLF(UseCRLF) {}
+ bool useCRLF() const { return UseCRLF; }
+
/// Replaces the whitespace in front of \p Tok. Only call once for
/// each \c AnnotatedToken.
///
@@ -170,6 +171,9 @@ private:
/// \c EscapedNewlineColumn for the first tokens or token parts in a line.
void calculateLineBreakInformation();
+ /// \brief Align consecutive C/C++ preprocessor macros over all \c Changes.
+ void alignConsecutiveMacros();
+
/// Align consecutive assignments over all \c Changes.
void alignConsecutiveAssignments();
diff --git a/lib/Frontend/ASTConsumers.cpp b/lib/Frontend/ASTConsumers.cpp
index 28834a2de8a2..26154ee2e856 100644
--- a/lib/Frontend/ASTConsumers.cpp
+++ b/lib/Frontend/ASTConsumers.cpp
@@ -1,9 +1,8 @@
//===--- ASTConsumers.cpp - ASTConsumer implementations -------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -35,10 +34,12 @@ namespace {
public:
enum Kind { DumpFull, Dump, Print, None };
- ASTPrinter(std::unique_ptr<raw_ostream> Out, Kind K, StringRef FilterString,
+ ASTPrinter(std::unique_ptr<raw_ostream> Out, Kind K,
+ ASTDumpOutputFormat Format, StringRef FilterString,
bool DumpLookups = false)
: Out(Out ? *Out : llvm::outs()), OwnedOut(std::move(Out)),
- OutputKind(K), FilterString(FilterString), DumpLookups(DumpLookups) {}
+ OutputKind(K), OutputFormat(Format), FilterString(FilterString),
+ DumpLookups(DumpLookups) {}
void HandleTranslationUnit(ASTContext &Context) override {
TranslationUnitDecl *D = Context.getTranslationUnitDecl();
@@ -91,7 +92,7 @@ namespace {
PrintingPolicy Policy(D->getASTContext().getLangOpts());
D->print(Out, Policy, /*Indentation=*/0, /*PrintInstantiation=*/true);
} else if (OutputKind != None)
- D->dump(Out, OutputKind == DumpFull);
+ D->dump(Out, OutputKind == DumpFull, OutputFormat);
}
raw_ostream &Out;
@@ -100,6 +101,9 @@ namespace {
/// How to output individual declarations.
Kind OutputKind;
+ /// What format should the output take?
+ ASTDumpOutputFormat OutputFormat;
+
/// Which declarations or DeclContexts to display.
std::string FilterString;
@@ -136,20 +140,18 @@ std::unique_ptr<ASTConsumer>
clang::CreateASTPrinter(std::unique_ptr<raw_ostream> Out,
StringRef FilterString) {
return llvm::make_unique<ASTPrinter>(std::move(Out), ASTPrinter::Print,
- FilterString);
+ ADOF_Default, FilterString);
}
std::unique_ptr<ASTConsumer>
-clang::CreateASTDumper(std::unique_ptr<raw_ostream> Out,
- StringRef FilterString,
- bool DumpDecls,
- bool Deserialize,
- bool DumpLookups) {
+clang::CreateASTDumper(std::unique_ptr<raw_ostream> Out, StringRef FilterString,
+ bool DumpDecls, bool Deserialize, bool DumpLookups,
+ ASTDumpOutputFormat Format) {
assert((DumpDecls || Deserialize || DumpLookups) && "nothing to dump");
return llvm::make_unique<ASTPrinter>(std::move(Out),
Deserialize ? ASTPrinter::DumpFull :
DumpDecls ? ASTPrinter::Dump :
- ASTPrinter::None,
+ ASTPrinter::None, Format,
FilterString, DumpLookups);
}
diff --git a/lib/Frontend/ASTMerge.cpp b/lib/Frontend/ASTMerge.cpp
index 4f622da118c5..14d781ccdf93 100644
--- a/lib/Frontend/ASTMerge.cpp
+++ b/lib/Frontend/ASTMerge.cpp
@@ -1,16 +1,15 @@
//===-- ASTMerge.cpp - AST Merging Frontend Action --------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "clang/Frontend/ASTUnit.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTDiagnostic.h"
#include "clang/AST/ASTImporter.h"
-#include "clang/AST/ASTImporterLookupTable.h"
+#include "clang/AST/ASTImporterSharedState.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Frontend/FrontendActions.h"
@@ -39,7 +38,7 @@ void ASTMergeAction::ExecuteAction() {
&CI.getASTContext());
IntrusiveRefCntPtr<DiagnosticIDs>
DiagIDs(CI.getDiagnostics().getDiagnosticIDs());
- ASTImporterLookupTable LookupTable(
+ auto SharedState = std::make_shared<ASTImporterSharedState>(
*CI.getASTContext().getTranslationUnitDecl());
for (unsigned I = 0, N = ASTFiles.size(); I != N; ++I) {
IntrusiveRefCntPtr<DiagnosticsEngine>
@@ -56,7 +55,7 @@ void ASTMergeAction::ExecuteAction() {
ASTImporter Importer(CI.getASTContext(), CI.getFileManager(),
Unit->getASTContext(), Unit->getFileManager(),
- /*MinimalImport=*/false, &LookupTable);
+ /*MinimalImport=*/false, SharedState);
TranslationUnitDecl *TU = Unit->getASTContext().getTranslationUnitDecl();
for (auto *D : TU->decls()) {
@@ -66,11 +65,13 @@ void ASTMergeAction::ExecuteAction() {
if (II->isStr("__va_list_tag") || II->isStr("__builtin_va_list"))
continue;
- Decl *ToD = Importer.Import(D);
+ llvm::Expected<Decl *> ToDOrError = Importer.Import(D);
- if (ToD) {
- DeclGroupRef DGR(ToD);
+ if (ToDOrError) {
+ DeclGroupRef DGR(*ToDOrError);
CI.getASTConsumer().HandleTopLevelDecl(DGR);
+ } else {
+ llvm::consumeError(ToDOrError.takeError());
}
}
}
diff --git a/lib/Frontend/ASTUnit.cpp b/lib/Frontend/ASTUnit.cpp
index c7b2551cb8d7..7445a94cfe59 100644
--- a/lib/Frontend/ASTUnit.cpp
+++ b/lib/Frontend/ASTUnit.cpp
@@ -1,9 +1,8 @@
//===- ASTUnit.cpp - ASTUnit utility --------------------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -31,7 +30,6 @@
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/LangOptions.h"
-#include "clang/Basic/MemoryBufferCache.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
@@ -61,6 +59,7 @@
#include "clang/Serialization/ASTReader.h"
#include "clang/Serialization/ASTWriter.h"
#include "clang/Serialization/ContinuousRangeMap.h"
+#include "clang/Serialization/InMemoryModuleCache.h"
#include "clang/Serialization/Module.h"
#include "clang/Serialization/PCHContainerOperations.h"
#include "llvm/ADT/ArrayRef.h"
@@ -76,7 +75,7 @@
#include "llvm/ADT/StringSet.h"
#include "llvm/ADT/Twine.h"
#include "llvm/ADT/iterator_range.h"
-#include "llvm/Bitcode/BitstreamWriter.h"
+#include "llvm/Bitstream/BitstreamWriter.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CrashRecoveryContext.h"
@@ -218,8 +217,8 @@ struct ASTUnit::ASTWriterData {
llvm::BitstreamWriter Stream;
ASTWriter Writer;
- ASTWriterData(MemoryBufferCache &PCMCache)
- : Stream(Buffer), Writer(Stream, Buffer, PCMCache, {}) {}
+ ASTWriterData(InMemoryModuleCache &ModuleCache)
+ : Stream(Buffer), Writer(Stream, Buffer, ModuleCache, {}) {}
};
void ASTUnit::clearFileLevelDecls() {
@@ -609,17 +608,20 @@ private:
};
/// Diagnostic consumer that saves each diagnostic it is given.
-class StoredDiagnosticConsumer : public DiagnosticConsumer {
+class FilterAndStoreDiagnosticConsumer : public DiagnosticConsumer {
SmallVectorImpl<StoredDiagnostic> *StoredDiags;
SmallVectorImpl<ASTUnit::StandaloneDiagnostic> *StandaloneDiags;
+ bool CaptureNonErrorsFromIncludes = true;
const LangOptions *LangOpts = nullptr;
SourceManager *SourceMgr = nullptr;
public:
- StoredDiagnosticConsumer(
+ FilterAndStoreDiagnosticConsumer(
SmallVectorImpl<StoredDiagnostic> *StoredDiags,
- SmallVectorImpl<ASTUnit::StandaloneDiagnostic> *StandaloneDiags)
- : StoredDiags(StoredDiags), StandaloneDiags(StandaloneDiags) {
+ SmallVectorImpl<ASTUnit::StandaloneDiagnostic> *StandaloneDiags,
+ bool CaptureNonErrorsFromIncludes)
+ : StoredDiags(StoredDiags), StandaloneDiags(StandaloneDiags),
+ CaptureNonErrorsFromIncludes(CaptureNonErrorsFromIncludes) {
assert((StoredDiags || StandaloneDiags) &&
"No output collections were passed to StoredDiagnosticConsumer.");
}
@@ -635,21 +637,25 @@ public:
const Diagnostic &Info) override;
};
-/// RAII object that optionally captures diagnostics, if
+/// RAII object that optionally captures and filters diagnostics, if
/// there is no diagnostic client to capture them already.
class CaptureDroppedDiagnostics {
DiagnosticsEngine &Diags;
- StoredDiagnosticConsumer Client;
+ FilterAndStoreDiagnosticConsumer Client;
DiagnosticConsumer *PreviousClient = nullptr;
std::unique_ptr<DiagnosticConsumer> OwningPreviousClient;
public:
CaptureDroppedDiagnostics(
- bool RequestCapture, DiagnosticsEngine &Diags,
+ CaptureDiagsKind CaptureDiagnostics, DiagnosticsEngine &Diags,
SmallVectorImpl<StoredDiagnostic> *StoredDiags,
SmallVectorImpl<ASTUnit::StandaloneDiagnostic> *StandaloneDiags)
- : Diags(Diags), Client(StoredDiags, StandaloneDiags) {
- if (RequestCapture || Diags.getClient() == nullptr) {
+ : Diags(Diags),
+ Client(StoredDiags, StandaloneDiags,
+ CaptureDiagnostics !=
+ CaptureDiagsKind::AllWithoutNonErrorsFromIncludes) {
+ if (CaptureDiagnostics != CaptureDiagsKind::None ||
+ Diags.getClient() == nullptr) {
OwningPreviousClient = Diags.takeClient();
PreviousClient = Diags.getClient();
Diags.setClient(&Client, false);
@@ -668,8 +674,16 @@ static ASTUnit::StandaloneDiagnostic
makeStandaloneDiagnostic(const LangOptions &LangOpts,
const StoredDiagnostic &InDiag);
-void StoredDiagnosticConsumer::HandleDiagnostic(DiagnosticsEngine::Level Level,
- const Diagnostic &Info) {
+static bool isInMainFile(const clang::Diagnostic &D) {
+ if (!D.hasSourceManager() || !D.getLocation().isValid())
+ return false;
+
+ auto &M = D.getSourceManager();
+ return M.isWrittenInMainFile(M.getExpansionLoc(D.getLocation()));
+}
+
+void FilterAndStoreDiagnosticConsumer::HandleDiagnostic(
+ DiagnosticsEngine::Level Level, const Diagnostic &Info) {
// Default implementation (Warnings/errors count).
DiagnosticConsumer::HandleDiagnostic(Level, Info);
@@ -677,6 +691,11 @@ void StoredDiagnosticConsumer::HandleDiagnostic(DiagnosticsEngine::Level Level,
// about. This effectively drops diagnostics from modules we're building.
// FIXME: In the long run, ee don't want to drop source managers from modules.
if (!Info.hasSourceManager() || &Info.getSourceManager() == SourceMgr) {
+ if (!CaptureNonErrorsFromIncludes && Level <= DiagnosticsEngine::Warning &&
+ !isInMainFile(Info)) {
+ return;
+ }
+
StoredDiagnostic *ResultDiag = nullptr;
if (StoredDiags) {
StoredDiags->emplace_back(Level, Info);
@@ -724,10 +743,13 @@ ASTUnit::getBufferForFile(StringRef Filename, std::string *ErrorStr) {
/// Configure the diagnostics object for use with ASTUnit.
void ASTUnit::ConfigureDiags(IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
- ASTUnit &AST, bool CaptureDiagnostics) {
+ ASTUnit &AST,
+ CaptureDiagsKind CaptureDiagnostics) {
assert(Diags.get() && "no DiagnosticsEngine was provided");
- if (CaptureDiagnostics)
- Diags->setClient(new StoredDiagnosticConsumer(&AST.StoredDiagnostics, nullptr));
+ if (CaptureDiagnostics != CaptureDiagsKind::None)
+ Diags->setClient(new FilterAndStoreDiagnosticConsumer(
+ &AST.StoredDiagnostics, nullptr,
+ CaptureDiagnostics != CaptureDiagsKind::AllWithoutNonErrorsFromIncludes));
}
std::unique_ptr<ASTUnit> ASTUnit::LoadFromASTFile(
@@ -735,7 +757,7 @@ std::unique_ptr<ASTUnit> ASTUnit::LoadFromASTFile(
WhatToLoad ToLoad, IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
const FileSystemOptions &FileSystemOpts, bool UseDebugInfo,
bool OnlyLocalDecls, ArrayRef<RemappedFile> RemappedFiles,
- bool CaptureDiagnostics, bool AllowPCHWithCompilerErrors,
+ CaptureDiagsKind CaptureDiagnostics, bool AllowPCHWithCompilerErrors,
bool UserFilesAreVolatile) {
std::unique_ptr<ASTUnit> AST(new ASTUnit(true));
@@ -759,7 +781,7 @@ std::unique_ptr<ASTUnit> ASTUnit::LoadFromASTFile(
AST->SourceMgr = new SourceManager(AST->getDiagnostics(),
AST->getFileManager(),
UserFilesAreVolatile);
- AST->PCMCache = new MemoryBufferCache;
+ AST->ModuleCache = new InMemoryModuleCache;
AST->HSOpts = std::make_shared<HeaderSearchOptions>();
AST->HSOpts->ModuleFormat = PCHContainerRdr.getFormat();
AST->HeaderInfo.reset(new HeaderSearch(AST->HSOpts,
@@ -779,7 +801,7 @@ std::unique_ptr<ASTUnit> ASTUnit::LoadFromASTFile(
AST->PP = std::make_shared<Preprocessor>(
AST->PPOpts, AST->getDiagnostics(), *AST->LangOpts,
- AST->getSourceManager(), *AST->PCMCache, HeaderInfo, AST->ModuleLoader,
+ AST->getSourceManager(), HeaderInfo, AST->ModuleLoader,
/*IILookup=*/nullptr,
/*OwnsHeaderSearch=*/false);
Preprocessor &PP = *AST->PP;
@@ -792,10 +814,10 @@ std::unique_ptr<ASTUnit> ASTUnit::LoadFromASTFile(
bool disableValid = false;
if (::getenv("LIBCLANG_DISABLE_PCH_VALIDATION"))
disableValid = true;
- AST->Reader = new ASTReader(PP, AST->Ctx.get(), PCHContainerRdr, {},
- /*isysroot=*/"",
- /*DisableValidation=*/disableValid,
- AllowPCHWithCompilerErrors);
+ AST->Reader = new ASTReader(
+ PP, *AST->ModuleCache, AST->Ctx.get(), PCHContainerRdr, {},
+ /*isysroot=*/"",
+ /*DisableValidation=*/disableValid, AllowPCHWithCompilerErrors);
AST->Reader->setListener(llvm::make_unique<ASTInfoCollector>(
*AST->PP, AST->Ctx.get(), *AST->HSOpts, *AST->PPOpts, *AST->LangOpts,
@@ -1079,28 +1101,29 @@ bool ASTUnit::Parse(std::shared_ptr<PCHContainerOperations> PCHContainerOps,
if (!Invocation)
return true;
+ if (VFS && FileMgr)
+ assert(VFS == &FileMgr->getVirtualFileSystem() &&
+ "VFS passed to Parse and VFS in FileMgr are different");
+
auto CCInvocation = std::make_shared<CompilerInvocation>(*Invocation);
if (OverrideMainBuffer) {
assert(Preamble &&
"No preamble was built, but OverrideMainBuffer is not null");
- IntrusiveRefCntPtr<llvm::vfs::FileSystem> OldVFS = VFS;
Preamble->AddImplicitPreamble(*CCInvocation, VFS, OverrideMainBuffer.get());
- if (OldVFS != VFS && FileMgr) {
- assert(OldVFS == FileMgr->getVirtualFileSystem() &&
- "VFS passed to Parse and VFS in FileMgr are different");
- FileMgr = new FileManager(FileMgr->getFileSystemOpts(), VFS);
- }
+ // VFS may have changed...
}
// Create the compiler instance to use for building the AST.
std::unique_ptr<CompilerInstance> Clang(
new CompilerInstance(std::move(PCHContainerOps)));
- if (FileMgr && VFS) {
- assert(VFS == FileMgr->getVirtualFileSystem() &&
- "VFS passed to Parse and VFS in FileMgr are different");
- } else if (VFS) {
- Clang->setVirtualFileSystem(VFS);
- }
+
+ // Ensure that Clang has a FileManager with the right VFS, which may have
+ // changed above in AddImplicitPreamble. If VFS is nullptr, rely on
+ // createFileManager to create one.
+ if (VFS && FileMgr && &FileMgr->getVirtualFileSystem() == VFS)
+ Clang->setFileManager(&*FileMgr);
+ else
+ FileMgr = Clang->createFileManager(std::move(VFS));
// Recover resources if we crash before exiting this method.
llvm::CrashRecoveryContextCleanupRegistrar<CompilerInstance>
@@ -1137,10 +1160,6 @@ bool ASTUnit::Parse(std::shared_ptr<PCHContainerOperations> PCHContainerOps,
// Configure the various subsystems.
LangOpts = Clang->getInvocation().LangOpts;
FileSystemOpts = Clang->getFileSystemOpts();
- if (!FileMgr) {
- Clang->createFileManager();
- FileMgr = &Clang->getFileManager();
- }
ResetForParse();
@@ -1187,8 +1206,10 @@ bool ASTUnit::Parse(std::shared_ptr<PCHContainerOperations> PCHContainerOps,
else
PreambleSrcLocCache.clear();
- if (!Act->Execute())
+ if (llvm::Error Err = Act->Execute()) {
+ consumeError(std::move(Err)); // FIXME this drops errors on the floor.
goto error;
+ }
transferASTDataFromCompilerInstance(*Clang);
@@ -1308,22 +1329,22 @@ ASTUnit::getMainBufferWithPrecompiledPreamble(
PreambleInvocationIn.getDiagnosticOpts());
getDiagnostics().setNumWarnings(NumWarningsInPreamble);
- PreambleRebuildCounter = 1;
+ PreambleRebuildCountdown = 1;
return MainFileBuffer;
} else {
Preamble.reset();
PreambleDiagnostics.clear();
TopLevelDeclsInPreamble.clear();
PreambleSrcLocCache.clear();
- PreambleRebuildCounter = 1;
+ PreambleRebuildCountdown = 1;
}
}
// If the preamble rebuild counter > 1, it's because we previously
// failed to build a preamble and we're not yet ready to try
// again. Decrement the counter and return a failure.
- if (PreambleRebuildCounter > 1) {
- --PreambleRebuildCounter;
+ if (PreambleRebuildCountdown > 1) {
+ --PreambleRebuildCountdown;
return nullptr;
}
@@ -1333,13 +1354,15 @@ ASTUnit::getMainBufferWithPrecompiledPreamble(
if (!AllowRebuild)
return nullptr;
+ ++PreambleCounter;
+
SmallVector<StandaloneDiagnostic, 4> NewPreambleDiagsStandalone;
SmallVector<StoredDiagnostic, 4> NewPreambleDiags;
ASTUnitPreambleCallbacks Callbacks;
{
llvm::Optional<CaptureDroppedDiagnostics> Capture;
- if (CaptureDiagnostics)
- Capture.emplace(/*RequestCapture=*/true, *Diagnostics, &NewPreambleDiags,
+ if (CaptureDiagnostics != CaptureDiagsKind::None)
+ Capture.emplace(CaptureDiagnostics, *Diagnostics, &NewPreambleDiags,
&NewPreambleDiagsStandalone);
// We did not previously compute a preamble, or it can't be reused anyway.
@@ -1360,18 +1383,19 @@ ASTUnit::getMainBufferWithPrecompiledPreamble(
if (NewPreamble) {
Preamble = std::move(*NewPreamble);
- PreambleRebuildCounter = 1;
+ PreambleRebuildCountdown = 1;
} else {
switch (static_cast<BuildPreambleError>(NewPreamble.getError().value())) {
case BuildPreambleError::CouldntCreateTempFile:
// Try again next time.
- PreambleRebuildCounter = 1;
+ PreambleRebuildCountdown = 1;
return nullptr;
case BuildPreambleError::CouldntCreateTargetInfo:
case BuildPreambleError::BeginSourceFileFailed:
case BuildPreambleError::CouldntEmitPCH:
+ case BuildPreambleError::BadInputs:
// These erros are more likely to repeat, retry after some period.
- PreambleRebuildCounter = DefaultPreambleRebuildInterval;
+ PreambleRebuildCountdown = DefaultPreambleRebuildInterval;
return nullptr;
}
llvm_unreachable("unexpected BuildPreambleError");
@@ -1466,7 +1490,8 @@ StringRef ASTUnit::getASTFileName() const {
std::unique_ptr<ASTUnit>
ASTUnit::create(std::shared_ptr<CompilerInvocation> CI,
IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
- bool CaptureDiagnostics, bool UserFilesAreVolatile) {
+ CaptureDiagsKind CaptureDiagnostics,
+ bool UserFilesAreVolatile) {
std::unique_ptr<ASTUnit> AST(new ASTUnit(false));
ConfigureDiags(Diags, *AST, CaptureDiagnostics);
IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS =
@@ -1478,7 +1503,7 @@ ASTUnit::create(std::shared_ptr<CompilerInvocation> CI,
AST->UserFilesAreVolatile = UserFilesAreVolatile;
AST->SourceMgr = new SourceManager(AST->getDiagnostics(), *AST->FileMgr,
UserFilesAreVolatile);
- AST->PCMCache = new MemoryBufferCache;
+ AST->ModuleCache = new InMemoryModuleCache;
return AST;
}
@@ -1488,7 +1513,7 @@ ASTUnit *ASTUnit::LoadFromCompilerInvocationAction(
std::shared_ptr<PCHContainerOperations> PCHContainerOps,
IntrusiveRefCntPtr<DiagnosticsEngine> Diags, FrontendAction *Action,
ASTUnit *Unit, bool Persistent, StringRef ResourceFilesPath,
- bool OnlyLocalDecls, bool CaptureDiagnostics,
+ bool OnlyLocalDecls, CaptureDiagsKind CaptureDiagnostics,
unsigned PrecompilePreambleAfterNParses, bool CacheCodeCompletionResults,
bool IncludeBriefCommentsInCodeCompletion, bool UserFilesAreVolatile,
std::unique_ptr<ASTUnit> *ErrAST) {
@@ -1511,7 +1536,7 @@ ASTUnit *ASTUnit::LoadFromCompilerInvocationAction(
AST->OnlyLocalDecls = OnlyLocalDecls;
AST->CaptureDiagnostics = CaptureDiagnostics;
if (PrecompilePreambleAfterNParses > 0)
- AST->PreambleRebuildCounter = PrecompilePreambleAfterNParses;
+ AST->PreambleRebuildCountdown = PrecompilePreambleAfterNParses;
AST->TUKind = Action ? Action->getTranslationUnitKind() : TU_Complete;
AST->ShouldCacheCodeCompletionResults = CacheCodeCompletionResults;
AST->IncludeBriefCommentsInCodeCompletion
@@ -1609,7 +1634,8 @@ ASTUnit *ASTUnit::LoadFromCompilerInvocationAction(
Clang->setASTConsumer(
llvm::make_unique<MultiplexConsumer>(std::move(Consumers)));
}
- if (!Act->Execute()) {
+ if (llvm::Error Err = Act->Execute()) {
+ consumeError(std::move(Err)); // FIXME this drops errors on the floor.
AST->transferASTDataFromCompilerInstance(*Clang);
if (OwnAST && ErrAST)
ErrAST->swap(OwnAST);
@@ -1645,7 +1671,7 @@ bool ASTUnit::LoadFromCompilerInvocation(
std::unique_ptr<llvm::MemoryBuffer> OverrideMainBuffer;
if (PrecompilePreambleAfterNParses > 0) {
- PreambleRebuildCounter = PrecompilePreambleAfterNParses;
+ PreambleRebuildCountdown = PrecompilePreambleAfterNParses;
OverrideMainBuffer =
getMainBufferWithPrecompiledPreamble(PCHContainerOps, *Invocation, VFS);
getDiagnostics().Reset();
@@ -1666,7 +1692,7 @@ std::unique_ptr<ASTUnit> ASTUnit::LoadFromCompilerInvocation(
std::shared_ptr<CompilerInvocation> CI,
std::shared_ptr<PCHContainerOperations> PCHContainerOps,
IntrusiveRefCntPtr<DiagnosticsEngine> Diags, FileManager *FileMgr,
- bool OnlyLocalDecls, bool CaptureDiagnostics,
+ bool OnlyLocalDecls, CaptureDiagsKind CaptureDiagnostics,
unsigned PrecompilePreambleAfterNParses, TranslationUnitKind TUKind,
bool CacheCodeCompletionResults, bool IncludeBriefCommentsInCodeCompletion,
bool UserFilesAreVolatile) {
@@ -1694,7 +1720,7 @@ std::unique_ptr<ASTUnit> ASTUnit::LoadFromCompilerInvocation(
if (AST->LoadFromCompilerInvocation(std::move(PCHContainerOps),
PrecompilePreambleAfterNParses,
- AST->FileMgr->getVirtualFileSystem()))
+ &AST->FileMgr->getVirtualFileSystem()))
return nullptr;
return AST;
}
@@ -1703,7 +1729,7 @@ ASTUnit *ASTUnit::LoadFromCommandLine(
const char **ArgBegin, const char **ArgEnd,
std::shared_ptr<PCHContainerOperations> PCHContainerOps,
IntrusiveRefCntPtr<DiagnosticsEngine> Diags, StringRef ResourceFilesPath,
- bool OnlyLocalDecls, bool CaptureDiagnostics,
+ bool OnlyLocalDecls, CaptureDiagsKind CaptureDiagnostics,
ArrayRef<RemappedFile> RemappedFiles, bool RemappedFilesKeepOriginalName,
unsigned PrecompilePreambleAfterNParses, TranslationUnitKind TUKind,
bool CacheCodeCompletionResults, bool IncludeBriefCommentsInCodeCompletion,
@@ -1758,7 +1784,7 @@ ASTUnit *ASTUnit::LoadFromCommandLine(
VFS = llvm::vfs::getRealFileSystem();
VFS = createVFSFromCompilerInvocation(*CI, *Diags, VFS);
AST->FileMgr = new FileManager(AST->FileSystemOpts, VFS);
- AST->PCMCache = new MemoryBufferCache;
+ AST->ModuleCache = new InMemoryModuleCache;
AST->OnlyLocalDecls = OnlyLocalDecls;
AST->CaptureDiagnostics = CaptureDiagnostics;
AST->TUKind = TUKind;
@@ -1769,7 +1795,7 @@ ASTUnit *ASTUnit::LoadFromCommandLine(
AST->Invocation = CI;
AST->SkipFunctionBodies = SkipFunctionBodies;
if (ForSerialization)
- AST->WriterData.reset(new ASTWriterData(*AST->PCMCache));
+ AST->WriterData.reset(new ASTWriterData(*AST->ModuleCache));
// Zero out now to ease cleanup during crash recovery.
CI = nullptr;
Diags = nullptr;
@@ -1801,7 +1827,7 @@ bool ASTUnit::Reparse(std::shared_ptr<PCHContainerOperations> PCHContainerOps,
if (!VFS) {
assert(FileMgr && "FileMgr is null on Reparse call");
- VFS = FileMgr->getVirtualFileSystem();
+ VFS = &FileMgr->getVirtualFileSystem();
}
clearFileLevelDecls();
@@ -1823,7 +1849,7 @@ bool ASTUnit::Reparse(std::shared_ptr<PCHContainerOperations> PCHContainerOps,
// If we have a preamble file lying around, or if we might try to
// build a precompiled preamble, do so now.
std::unique_ptr<llvm::MemoryBuffer> OverrideMainBuffer;
- if (Preamble || PreambleRebuildCounter > 0)
+ if (Preamble || PreambleRebuildCountdown > 0)
OverrideMainBuffer =
getMainBufferWithPrecompiledPreamble(PCHContainerOps, *Invocation, VFS);
@@ -1881,8 +1907,7 @@ namespace {
public:
AugmentedCodeCompleteConsumer(ASTUnit &AST, CodeCompleteConsumer &Next,
const CodeCompleteOptions &CodeCompleteOpts)
- : CodeCompleteConsumer(CodeCompleteOpts, Next.isOutputBinary()),
- AST(AST), Next(Next) {
+ : CodeCompleteConsumer(CodeCompleteOpts), AST(AST), Next(Next) {
// Compute the set of contexts in which we will look when we don't have
// any information about the specific context.
NormalContexts
@@ -2161,7 +2186,7 @@ void ASTUnit::CodeComplete(
// Set up diagnostics, capturing any diagnostics produced.
Clang->setDiagnostics(&Diag);
- CaptureDroppedDiagnostics Capture(true,
+ CaptureDroppedDiagnostics Capture(CaptureDiagsKind::All,
Clang->getDiagnostics(),
&StoredDiagnostics, nullptr);
ProcessWarningOptions(Diag, Inv.getDiagnosticOpts());
@@ -2215,18 +2240,18 @@ void ASTUnit::CodeComplete(
if (Preamble) {
std::string CompleteFilePath(File);
- auto VFS = FileMgr.getVirtualFileSystem();
- auto CompleteFileStatus = VFS->status(CompleteFilePath);
+ auto &VFS = FileMgr.getVirtualFileSystem();
+ auto CompleteFileStatus = VFS.status(CompleteFilePath);
if (CompleteFileStatus) {
llvm::sys::fs::UniqueID CompleteFileID = CompleteFileStatus->getUniqueID();
std::string MainPath(OriginalSourceFile);
- auto MainStatus = VFS->status(MainPath);
+ auto MainStatus = VFS.status(MainPath);
if (MainStatus) {
llvm::sys::fs::UniqueID MainID = MainStatus->getUniqueID();
if (CompleteFileID == MainID && Line > 1)
OverrideMainBuffer = getMainBufferWithPrecompiledPreamble(
- PCHContainerOps, Inv, VFS, false, Line - 1);
+ PCHContainerOps, Inv, &VFS, false, Line - 1);
}
}
}
@@ -2237,7 +2262,8 @@ void ASTUnit::CodeComplete(
assert(Preamble &&
"No preamble was built, but OverrideMainBuffer is not null");
- auto VFS = FileMgr.getVirtualFileSystem();
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS =
+ &FileMgr.getVirtualFileSystem();
Preamble->AddImplicitPreamble(Clang->getInvocation(), VFS,
OverrideMainBuffer.get());
// FIXME: there is no way to update VFS if it was changed by
@@ -2257,7 +2283,9 @@ void ASTUnit::CodeComplete(
std::unique_ptr<SyntaxOnlyAction> Act;
Act.reset(new SyntaxOnlyAction);
if (Act->BeginSourceFile(*Clang.get(), Clang->getFrontendOpts().Inputs[0])) {
- Act->Execute();
+ if (llvm::Error Err = Act->Execute()) {
+ consumeError(std::move(Err)); // FIXME this drops errors on the floor.
+ }
Act->EndSourceFile();
}
}
@@ -2318,8 +2346,8 @@ bool ASTUnit::serialize(raw_ostream &OS) {
SmallString<128> Buffer;
llvm::BitstreamWriter Stream(Buffer);
- MemoryBufferCache PCMCache;
- ASTWriter Writer(Stream, Buffer, PCMCache, {});
+ InMemoryModuleCache ModuleCache;
+ ASTWriter Writer(Stream, Buffer, ModuleCache, {});
return serializeUnit(Writer, Buffer, getSema(), hasErrors, OS);
}
@@ -2419,8 +2447,8 @@ void ASTUnit::addFileLevelDecl(Decl *D) {
return;
}
- LocDeclsTy::iterator I = std::upper_bound(Decls->begin(), Decls->end(),
- LocDecl, llvm::less_first());
+ LocDeclsTy::iterator I =
+ llvm::upper_bound(*Decls, LocDecl, llvm::less_first());
Decls->insert(I, LocDecl);
}
@@ -2445,9 +2473,9 @@ void ASTUnit::findFileRegionDecls(FileID File, unsigned Offset, unsigned Length,
return;
LocDeclsTy::iterator BeginIt =
- std::lower_bound(LocDecls.begin(), LocDecls.end(),
- std::make_pair(Offset, (Decl *)nullptr),
- llvm::less_first());
+ llvm::partition_point(LocDecls, [=](std::pair<unsigned, Decl *> LD) {
+ return LD.first < Offset;
+ });
if (BeginIt != LocDecls.begin())
--BeginIt;
@@ -2458,9 +2486,9 @@ void ASTUnit::findFileRegionDecls(FileID File, unsigned Offset, unsigned Length,
BeginIt->second->isTopLevelDeclInObjCContainer())
--BeginIt;
- LocDeclsTy::iterator EndIt = std::upper_bound(
- LocDecls.begin(), LocDecls.end(),
- std::make_pair(Offset + Length, (Decl *)nullptr), llvm::less_first());
+ LocDeclsTy::iterator EndIt = llvm::upper_bound(
+ LocDecls, std::make_pair(Offset + Length, (Decl *)nullptr),
+ llvm::less_first());
if (EndIt != LocDecls.end())
++EndIt;
diff --git a/lib/Frontend/ChainedDiagnosticConsumer.cpp b/lib/Frontend/ChainedDiagnosticConsumer.cpp
index d77fd180ea0d..793c5ff8a2b5 100644
--- a/lib/Frontend/ChainedDiagnosticConsumer.cpp
+++ b/lib/Frontend/ChainedDiagnosticConsumer.cpp
@@ -1,9 +1,8 @@
//===- ChainedDiagnosticConsumer.cpp - Chain Diagnostic Clients -----------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Frontend/ChainedIncludesSource.cpp b/lib/Frontend/ChainedIncludesSource.cpp
index 1bfc25c4c778..48154ecf4742 100644
--- a/lib/Frontend/ChainedIncludesSource.cpp
+++ b/lib/Frontend/ChainedIncludesSource.cpp
@@ -1,9 +1,8 @@
//===- ChainedIncludesSource.cpp - Chained PCHs in Memory -------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -83,9 +82,9 @@ createASTReader(CompilerInstance &CI, StringRef pchFile,
ASTDeserializationListener *deserialListener = nullptr) {
Preprocessor &PP = CI.getPreprocessor();
std::unique_ptr<ASTReader> Reader;
- Reader.reset(new ASTReader(PP, &CI.getASTContext(),
+ Reader.reset(new ASTReader(PP, CI.getModuleCache(), &CI.getASTContext(),
CI.getPCHContainerReader(),
- /*Extensions=*/{ },
+ /*Extensions=*/{},
/*isysroot=*/"", /*DisableValidation=*/true));
for (unsigned ti = 0; ti < bufNames.size(); ++ti) {
StringRef sr(bufNames[ti]);
@@ -160,8 +159,8 @@ IntrusiveRefCntPtr<ExternalSemaSource> clang::createChainedIncludesSource(
auto Buffer = std::make_shared<PCHBuffer>();
ArrayRef<std::shared_ptr<ModuleFileExtension>> Extensions;
auto consumer = llvm::make_unique<PCHGenerator>(
- Clang->getPreprocessor(), "-", /*isysroot=*/"", Buffer,
- Extensions, /*AllowASTWithErrors=*/true);
+ Clang->getPreprocessor(), Clang->getModuleCache(), "-", /*isysroot=*/"",
+ Buffer, Extensions, /*AllowASTWithErrors=*/true);
Clang->getASTContext().setASTMutationListener(
consumer->GetASTMutationListener());
Clang->setASTConsumer(std::move(consumer));
diff --git a/lib/Frontend/CompilerInstance.cpp b/lib/Frontend/CompilerInstance.cpp
index f66674535423..cf0267549e75 100644
--- a/lib/Frontend/CompilerInstance.cpp
+++ b/lib/Frontend/CompilerInstance.cpp
@@ -1,9 +1,8 @@
//===--- CompilerInstance.cpp ---------------------------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -14,7 +13,6 @@
#include "clang/Basic/CharInfo.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/FileManager.h"
-#include "clang/Basic/MemoryBufferCache.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/Stack.h"
#include "clang/Basic/TargetInfo.h"
@@ -36,6 +34,7 @@
#include "clang/Sema/Sema.h"
#include "clang/Serialization/ASTReader.h"
#include "clang/Serialization/GlobalModuleIndex.h"
+#include "clang/Serialization/InMemoryModuleCache.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Support/BuryPointer.h"
#include "llvm/Support/CrashRecoveryContext.h"
@@ -47,6 +46,7 @@
#include "llvm/Support/Path.h"
#include "llvm/Support/Program.h"
#include "llvm/Support/Signals.h"
+#include "llvm/Support/TimeProfiler.h"
#include "llvm/Support/Timer.h"
#include "llvm/Support/raw_ostream.h"
#include <sys/stat.h>
@@ -58,15 +58,12 @@ using namespace clang;
CompilerInstance::CompilerInstance(
std::shared_ptr<PCHContainerOperations> PCHContainerOps,
- MemoryBufferCache *SharedPCMCache)
- : ModuleLoader(/* BuildingModule = */ SharedPCMCache),
+ InMemoryModuleCache *SharedModuleCache)
+ : ModuleLoader(/* BuildingModule = */ SharedModuleCache),
Invocation(new CompilerInvocation()),
- PCMCache(SharedPCMCache ? SharedPCMCache : new MemoryBufferCache),
- ThePCHContainerOperations(std::move(PCHContainerOps)) {
- // Don't allow this to invalidate buffers in use by others.
- if (SharedPCMCache)
- getPCMCache().finalizeCurrentBuffers();
-}
+ ModuleCache(SharedModuleCache ? SharedModuleCache
+ : new InMemoryModuleCache),
+ ThePCHContainerOperations(std::move(PCHContainerOps)) {}
CompilerInstance::~CompilerInstance() {
assert(OutputFiles.empty() && "Still output files in flight?");
@@ -93,10 +90,6 @@ void CompilerInstance::setAuxTarget(TargetInfo *Value) { AuxTarget = Value; }
void CompilerInstance::setFileManager(FileManager *Value) {
FileMgr = Value;
- if (Value)
- VirtualFileSystem = Value->getVirtualFileSystem();
- else
- VirtualFileSystem.reset();
}
void CompilerInstance::setSourceManager(SourceManager *Value) {
@@ -137,7 +130,7 @@ IntrusiveRefCntPtr<ASTReader> CompilerInstance::getModuleManager() const {
return ModuleManager;
}
void CompilerInstance::setModuleManager(IntrusiveRefCntPtr<ASTReader> Reader) {
- assert(PCMCache.get() == &Reader->getModuleManager().getPCMCache() &&
+ assert(ModuleCache.get() == &Reader->getModuleManager().getModuleCache() &&
"Expected ASTReader to use the same PCM cache");
ModuleManager = std::move(Reader);
}
@@ -177,7 +170,7 @@ static void collectIncludePCH(CompilerInstance &CI,
std::error_code EC;
SmallString<128> DirNative;
llvm::sys::path::native(PCHDir->getName(), DirNative);
- llvm::vfs::FileSystem &FS = *FileMgr.getVirtualFileSystem();
+ llvm::vfs::FileSystem &FS = FileMgr.getVirtualFileSystem();
SimpleASTReaderListener Validator(CI.getPreprocessor());
for (llvm::vfs::directory_iterator Dir = FS.dir_begin(DirNative, EC), DirEnd;
Dir != DirEnd && !EC; Dir.increment(EC)) {
@@ -239,9 +232,13 @@ static void SetUpDiagnosticLog(DiagnosticOptions *DiagOpts,
std::move(StreamOwner));
if (CodeGenOpts)
Logger->setDwarfDebugFlags(CodeGenOpts->DwarfDebugFlags);
- assert(Diags.ownsClient());
- Diags.setClient(
- new ChainedDiagnosticConsumer(Diags.takeClient(), std::move(Logger)));
+ if (Diags.ownsClient()) {
+ Diags.setClient(
+ new ChainedDiagnosticConsumer(Diags.takeClient(), std::move(Logger)));
+ } else {
+ Diags.setClient(
+ new ChainedDiagnosticConsumer(Diags.getClient(), std::move(Logger)));
+ }
}
static void SetupSerializedDiagnostics(DiagnosticOptions *DiagOpts,
@@ -301,13 +298,14 @@ CompilerInstance::createDiagnostics(DiagnosticOptions *Opts,
// File Manager
-FileManager *CompilerInstance::createFileManager() {
- if (!hasVirtualFileSystem()) {
- IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS =
- createVFSFromCompilerInvocation(getInvocation(), getDiagnostics());
- setVirtualFileSystem(VFS);
- }
- FileMgr = new FileManager(getFileSystemOpts(), VirtualFileSystem);
+FileManager *CompilerInstance::createFileManager(
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS) {
+ if (!VFS)
+ VFS = FileMgr ? &FileMgr->getVirtualFileSystem()
+ : createVFSFromCompilerInvocation(getInvocation(),
+ getDiagnostics());
+ assert(VFS && "FileManager has no VFS?");
+ FileMgr = new FileManager(getFileSystemOpts(), std::move(VFS));
return FileMgr.get();
}
@@ -379,11 +377,11 @@ void CompilerInstance::createPreprocessor(TranslationUnitKind TUKind) {
HeaderSearch *HeaderInfo =
new HeaderSearch(getHeaderSearchOptsPtr(), getSourceManager(),
getDiagnostics(), getLangOpts(), &getTarget());
- PP = std::make_shared<Preprocessor>(
- Invocation->getPreprocessorOptsPtr(), getDiagnostics(), getLangOpts(),
- getSourceManager(), getPCMCache(), *HeaderInfo, *this,
- /*IdentifierInfoLookup=*/nullptr,
- /*OwnsHeaderSearch=*/true, TUKind);
+ PP = std::make_shared<Preprocessor>(Invocation->getPreprocessorOptsPtr(),
+ getDiagnostics(), getLangOpts(),
+ getSourceManager(), *HeaderInfo, *this,
+ /*IdentifierInfoLookup=*/nullptr,
+ /*OwnsHeaderSearch=*/true, TUKind);
getTarget().adjust(getLangOpts());
PP->Initialize(getTarget(), getAuxTarget());
@@ -417,8 +415,7 @@ void CompilerInstance::createPreprocessor(TranslationUnitKind TUKind) {
// Handle generating dependencies, if requested.
const DependencyOutputOptions &DepOpts = getDependencyOutputOpts();
if (!DepOpts.OutputFile.empty())
- TheDependencyFileGenerator.reset(
- DependencyFileGenerator::CreateAndAttachToPreprocessor(*PP, DepOpts));
+ addDependencyCollector(std::make_shared<DependencyFileGenerator>(DepOpts));
if (!DepOpts.DOTOutputFile.empty())
AttachDependencyGraphGen(*PP, DepOpts.DOTOutputFile,
getHeaderSearchOpts().Sysroot);
@@ -490,29 +487,26 @@ void CompilerInstance::createPCHExternalASTSource(
bool Preamble = getPreprocessorOpts().PrecompiledPreambleBytes.first != 0;
ModuleManager = createPCHExternalASTSource(
Path, getHeaderSearchOpts().Sysroot, DisablePCHValidation,
- AllowPCHWithCompilerErrors, getPreprocessor(), getASTContext(),
- getPCHContainerReader(),
- getFrontendOpts().ModuleFileExtensions,
- TheDependencyFileGenerator.get(),
- DependencyCollectors,
- DeserializationListener,
- OwnDeserializationListener, Preamble,
+ AllowPCHWithCompilerErrors, getPreprocessor(), getModuleCache(),
+ getASTContext(), getPCHContainerReader(),
+ getFrontendOpts().ModuleFileExtensions, DependencyCollectors,
+ DeserializationListener, OwnDeserializationListener, Preamble,
getFrontendOpts().UseGlobalModuleIndex);
}
IntrusiveRefCntPtr<ASTReader> CompilerInstance::createPCHExternalASTSource(
StringRef Path, StringRef Sysroot, bool DisablePCHValidation,
- bool AllowPCHWithCompilerErrors, Preprocessor &PP, ASTContext &Context,
+ bool AllowPCHWithCompilerErrors, Preprocessor &PP,
+ InMemoryModuleCache &ModuleCache, ASTContext &Context,
const PCHContainerReader &PCHContainerRdr,
ArrayRef<std::shared_ptr<ModuleFileExtension>> Extensions,
- DependencyFileGenerator *DependencyFile,
ArrayRef<std::shared_ptr<DependencyCollector>> DependencyCollectors,
void *DeserializationListener, bool OwnDeserializationListener,
bool Preamble, bool UseGlobalModuleIndex) {
HeaderSearchOptions &HSOpts = PP.getHeaderSearchInfo().getHeaderSearchOpts();
IntrusiveRefCntPtr<ASTReader> Reader(new ASTReader(
- PP, &Context, PCHContainerRdr, Extensions,
+ PP, ModuleCache, &Context, PCHContainerRdr, Extensions,
Sysroot.empty() ? "" : Sysroot.data(), DisablePCHValidation,
AllowPCHWithCompilerErrors, /*AllowConfigurationMismatch*/ false,
HSOpts.ModulesValidateSystemHeaders, UseGlobalModuleIndex));
@@ -525,8 +519,6 @@ IntrusiveRefCntPtr<ASTReader> CompilerInstance::createPCHExternalASTSource(
static_cast<ASTDeserializationListener *>(DeserializationListener),
/*TakeOwnership=*/OwnDeserializationListener);
- if (DependencyFile)
- DependencyFile->AttachToASTReader(*Reader);
for (auto &Listener : DependencyCollectors)
Listener->attachToASTReader(*Reader);
@@ -593,12 +585,6 @@ void CompilerInstance::createCodeCompletionConsumer() {
setCodeCompletionConsumer(nullptr);
return;
}
-
- if (CompletionConsumer->isOutputBinary() &&
- llvm::sys::ChangeStdoutToBinary()) {
- getPreprocessor().getDiagnostics().Report(diag::err_fe_stdout_binary);
- setCodeCompletionConsumer(nullptr);
- }
}
void CompilerInstance::createFrontendTimer() {
@@ -929,6 +915,9 @@ bool CompilerInstance::ExecuteAction(FrontendAction &Act) {
// Adjust target options based on codegen options.
getTarget().adjustTargetOptions(getCodeGenOpts(), getTargetOpts());
+ if (auto *Aux = getAuxTarget())
+ getTarget().setAuxTarget(Aux);
+
// rewriter project will change target built-in bool type from its default.
if (getFrontendOpts().ProgramAction == frontend::RewriteObjC)
getTarget().noSignedCharForObjCBool();
@@ -952,7 +941,9 @@ bool CompilerInstance::ExecuteAction(FrontendAction &Act) {
getSourceManager().clearIDTables();
if (Act.BeginSourceFile(*this, FIF)) {
- Act.Execute();
+ if (llvm::Error Err = Act.Execute()) {
+ consumeError(std::move(Err)); // FIXME this drops errors on the floor.
+ }
Act.EndSourceFile();
}
}
@@ -1031,6 +1022,8 @@ compileModuleImpl(CompilerInstance &ImportingInstance, SourceLocation ImportLoc,
[](CompilerInstance &) {},
llvm::function_ref<void(CompilerInstance &)> PostBuildStep =
[](CompilerInstance &) {}) {
+ llvm::TimeTraceScope TimeScope("Module Compile", ModuleName);
+
// Construct a compiler invocation for creating this module.
auto Invocation =
std::make_shared<CompilerInvocation>(ImportingInstance.getInvocation());
@@ -1092,11 +1085,11 @@ compileModuleImpl(CompilerInstance &ImportingInstance, SourceLocation ImportLoc,
Invocation->getModuleHash() && "Module hash mismatch!");
// Construct a compiler instance that will be used to actually create the
- // module. Since we're sharing a PCMCache,
+ // module. Since we're sharing an in-memory module cache,
// CompilerInstance::CompilerInstance is responsible for finalizing the
// buffers to prevent use-after-frees.
CompilerInstance Instance(ImportingInstance.getPCHContainerOperations(),
- &ImportingInstance.getPreprocessor().getPCMCache());
+ &ImportingInstance.getModuleCache());
auto &Inv = *Invocation;
Instance.setInvocation(std::move(Invocation));
@@ -1104,8 +1097,6 @@ compileModuleImpl(CompilerInstance &ImportingInstance, SourceLocation ImportLoc,
ImportingInstance.getDiagnosticClient()),
/*ShouldOwnClient=*/true);
- Instance.setVirtualFileSystem(&ImportingInstance.getVirtualFileSystem());
-
// Note that this module is part of the module build stack, so that we
// can detect cycles in the module graph.
Instance.setFileManager(&ImportingInstance.getFileManager());
@@ -1253,7 +1244,7 @@ static bool compileAndLoadModule(CompilerInstance &ImportingInstance,
llvm::LockFileManager Locked(ModuleFileName);
switch (Locked) {
case llvm::LockFileManager::LFS_Error:
- // PCMCache takes care of correctness and locks are only necessary for
+ // ModuleCache takes care of correctness and locks are only necessary for
// performance. Fallback to building the module in case of any lock
// related errors.
Diags.Report(ModuleNameLoc, diag::remark_module_lock_failure)
@@ -1280,9 +1271,9 @@ static bool compileAndLoadModule(CompilerInstance &ImportingInstance,
case llvm::LockFileManager::Res_OwnerDied:
continue; // try again to get the lock.
case llvm::LockFileManager::Res_Timeout:
- // Since PCMCache takes care of correctness, we try waiting for another
- // process to complete the build so clang does not do it done twice. If
- // case of timeout, build it ourselves.
+ // Since ModuleCache takes care of correctness, we try waiting for
+ // another process to complete the build so clang does not do it done
+ // twice. If case of timeout, build it ourselves.
Diags.Report(ModuleNameLoc, diag::remark_module_lock_timeout)
<< Module->Name;
// Clear the lock file so that future invocations can make progress.
@@ -1480,14 +1471,13 @@ void CompilerInstance::createModuleManager() {
"Reading modules",
*FrontendTimerGroup);
ModuleManager = new ASTReader(
- getPreprocessor(), &getASTContext(), getPCHContainerReader(),
- getFrontendOpts().ModuleFileExtensions,
+ getPreprocessor(), getModuleCache(), &getASTContext(),
+ getPCHContainerReader(), getFrontendOpts().ModuleFileExtensions,
Sysroot.empty() ? "" : Sysroot.c_str(), PPOpts.DisablePCHValidation,
/*AllowASTWithCompilerErrors=*/false,
/*AllowConfigurationMismatch=*/false,
HSOpts.ModulesValidateSystemHeaders,
- getFrontendOpts().UseGlobalModuleIndex,
- std::move(ReadTimer));
+ getFrontendOpts().UseGlobalModuleIndex, std::move(ReadTimer));
if (hasASTConsumer()) {
ModuleManager->setDeserializationListener(
getASTConsumer().GetASTDeserializationListener());
@@ -1500,8 +1490,6 @@ void CompilerInstance::createModuleManager() {
if (hasASTConsumer())
ModuleManager->StartTranslationUnit(&getASTConsumer());
- if (TheDependencyFileGenerator)
- TheDependencyFileGenerator->AttachToASTReader(*ModuleManager);
for (auto &Listener : DependencyCollectors)
Listener->attachToASTReader(*ModuleManager);
}
@@ -1710,6 +1698,7 @@ CompilerInstance::loadModule(SourceLocation ImportLoc,
Timer.init("loading." + ModuleFileName, "Loading " + ModuleFileName,
*FrontendTimerGroup);
llvm::TimeRegion TimeLoading(FrontendTimerGroup ? &Timer : nullptr);
+ llvm::TimeTraceScope TimeScope("Module Load", ModuleName);
// Try to load the module file. If we are not trying to load from the
// module cache, we don't know how to rebuild modules.
@@ -2056,9 +2045,16 @@ GlobalModuleIndex *CompilerInstance::loadGlobalModuleIndex(
hasPreprocessor()) {
llvm::sys::fs::create_directories(
getPreprocessor().getHeaderSearchInfo().getModuleCachePath());
- GlobalModuleIndex::writeIndex(
- getFileManager(), getPCHContainerReader(),
- getPreprocessor().getHeaderSearchInfo().getModuleCachePath());
+ if (llvm::Error Err = GlobalModuleIndex::writeIndex(
+ getFileManager(), getPCHContainerReader(),
+ getPreprocessor().getHeaderSearchInfo().getModuleCachePath())) {
+ // FIXME this drops the error on the floor. This code is only used for
+ // typo correction and drops more than just this one source of errors
+ // (such as the directory creation failure above). It should handle the
+ // error.
+ consumeError(std::move(Err));
+ return nullptr;
+ }
ModuleManager->resetForReload();
ModuleManager->loadGlobalIndex();
GlobalIndex = ModuleManager->getGlobalIndex();
@@ -2083,9 +2079,13 @@ GlobalModuleIndex *CompilerInstance::loadGlobalModuleIndex(
}
}
if (RecreateIndex) {
- GlobalModuleIndex::writeIndex(
- getFileManager(), getPCHContainerReader(),
- getPreprocessor().getHeaderSearchInfo().getModuleCachePath());
+ if (llvm::Error Err = GlobalModuleIndex::writeIndex(
+ getFileManager(), getPCHContainerReader(),
+ getPreprocessor().getHeaderSearchInfo().getModuleCachePath())) {
+ // FIXME As above, this drops the error on the floor.
+ consumeError(std::move(Err));
+ return nullptr;
+ }
ModuleManager->resetForReload();
ModuleManager->loadGlobalIndex();
GlobalIndex = ModuleManager->getGlobalIndex();
diff --git a/lib/Frontend/CompilerInvocation.cpp b/lib/Frontend/CompilerInvocation.cpp
index 3e6528c25982..8a9844096f08 100644
--- a/lib/Frontend/CompilerInvocation.cpp
+++ b/lib/Frontend/CompilerInvocation.cpp
@@ -1,9 +1,8 @@
//===- CompilerInvocation.cpp ---------------------------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -27,6 +26,7 @@
#include "clang/Basic/Visibility.h"
#include "clang/Basic/XRayInstr.h"
#include "clang/Config/config.h"
+#include "clang/Driver/Driver.h"
#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/Options.h"
#include "clang/Frontend/CommandLineSourceLoc.h"
@@ -285,6 +285,16 @@ static bool ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
}
Opts.ShowCheckerHelp = Args.hasArg(OPT_analyzer_checker_help);
+ Opts.ShowCheckerHelpAlpha = Args.hasArg(OPT_analyzer_checker_help_alpha);
+ Opts.ShowCheckerHelpDeveloper =
+ Args.hasArg(OPT_analyzer_checker_help_developer);
+
+ Opts.ShowCheckerOptionList = Args.hasArg(OPT_analyzer_checker_option_help);
+ Opts.ShowCheckerOptionAlphaList =
+ Args.hasArg(OPT_analyzer_checker_option_help_alpha);
+ Opts.ShowCheckerOptionDeveloperList =
+ Args.hasArg(OPT_analyzer_checker_option_help_developer);
+
Opts.ShowConfigOptionsList = Args.hasArg(OPT_analyzer_config_help);
Opts.ShowEnabledCheckerList = Args.hasArg(OPT_analyzer_list_enabled_checkers);
Opts.ShouldEmitErrorsOnInvalidConfigValue =
@@ -299,6 +309,7 @@ static bool ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
Args.hasArg(OPT_analyzer_viz_egraph_graphviz);
Opts.DumpExplodedGraphTo = Args.getLastArgValue(OPT_analyzer_dump_egraph);
Opts.NoRetryExhausted = Args.hasArg(OPT_analyzer_disable_retry_exhausted);
+ Opts.AnalyzerWerror = Args.hasArg(OPT_analyzer_werror);
Opts.AnalyzeAll = Args.hasArg(OPT_analyzer_opt_analyze_headers);
Opts.AnalyzerDisplayProgress = Args.hasArg(OPT_analyzer_display_progress);
Opts.AnalyzeNestedBlocks =
@@ -422,7 +433,7 @@ static void initOption(AnalyzerOptions::ConfigTable &Config,
OptionField = DefaultVal;
bool HasFailed = getStringOption(Config, Name, std::to_string(DefaultVal))
- .getAsInteger(10, OptionField);
+ .getAsInteger(0, OptionField);
if (Diags && HasFailed)
Diags->Report(diag::err_analyzer_config_invalid_input)
<< Name << "an unsigned";
@@ -456,6 +467,10 @@ static void parseAnalyzerConfigs(AnalyzerOptions &AnOpts,
if (!Diags)
return;
+ if (AnOpts.ShouldTrackConditionsDebug && !AnOpts.ShouldTrackConditions)
+ Diags->Report(diag::err_analyzer_config_invalid_input)
+ << "track-conditions-debug" << "'track-conditions' to also be enabled";
+
if (!AnOpts.CTUDir.empty() && !llvm::sys::fs::is_directory(AnOpts.CTUDir))
Diags->Report(diag::err_analyzer_config_invalid_input) << "ctu-dir"
<< "a filename";
@@ -551,7 +566,7 @@ static void parseSanitizerKinds(StringRef FlagName,
DiagnosticsEngine &Diags, SanitizerSet &S) {
for (const auto &Sanitizer : Sanitizers) {
SanitizerMask K = parseSanitizerValue(Sanitizer, /*AllowGroups=*/false);
- if (K == 0)
+ if (K == SanitizerMask())
Diags.Report(diag::err_drv_invalid_value) << FlagName << Sanitizer;
else
S.set(K, true);
@@ -588,6 +603,7 @@ static void setPGOInstrumentor(CodeGenOptions &Opts, ArgList &Args,
.Case("none", CodeGenOptions::ProfileNone)
.Case("clang", CodeGenOptions::ProfileClangInstr)
.Case("llvm", CodeGenOptions::ProfileIRInstr)
+ .Case("csllvm", CodeGenOptions::ProfileCSIRInstr)
.Default(~0U);
if (I == ~0U) {
Diags.Report(diag::err_drv_invalid_pgo_instrumentor) << A->getAsString(Args)
@@ -610,9 +626,12 @@ static void setPGOUseInstrumentor(CodeGenOptions &Opts,
}
std::unique_ptr<llvm::IndexedInstrProfReader> PGOReader =
std::move(ReaderOrErr.get());
- if (PGOReader->isIRLevelProfile())
- Opts.setProfileUse(CodeGenOptions::ProfileIRInstr);
- else
+ if (PGOReader->isIRLevelProfile()) {
+ if (PGOReader->hasCSIRLevelProfile())
+ Opts.setProfileUse(CodeGenOptions::ProfileCSIRInstr);
+ else
+ Opts.setProfileUse(CodeGenOptions::ProfileIRInstr);
+ } else
Opts.setProfileUse(CodeGenOptions::ProfileClangInstr);
}
@@ -668,6 +687,8 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
StringRef Name = A->getValue();
if (Name == "Accelerate")
Opts.setVecLib(CodeGenOptions::Accelerate);
+ else if (Name == "MASSV")
+ Opts.setVecLib(CodeGenOptions::MASSV);
else if (Name == "SVML")
Opts.setVecLib(CodeGenOptions::SVML);
else if (Name == "none")
@@ -710,24 +731,8 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.WholeProgramVTables = Args.hasArg(OPT_fwhole_program_vtables);
Opts.LTOVisibilityPublicStd = Args.hasArg(OPT_flto_visibility_public_std);
Opts.SplitDwarfFile = Args.getLastArgValue(OPT_split_dwarf_file);
+ Opts.SplitDwarfOutput = Args.getLastArgValue(OPT_split_dwarf_output);
Opts.SplitDwarfInlining = !Args.hasArg(OPT_fno_split_dwarf_inlining);
-
- if (Arg *A =
- Args.getLastArg(OPT_enable_split_dwarf, OPT_enable_split_dwarf_EQ)) {
- if (A->getOption().matches(options::OPT_enable_split_dwarf)) {
- Opts.setSplitDwarfMode(CodeGenOptions::SplitFileFission);
- } else {
- StringRef Name = A->getValue();
- if (Name == "single")
- Opts.setSplitDwarfMode(CodeGenOptions::SingleFileFission);
- else if (Name == "split")
- Opts.setSplitDwarfMode(CodeGenOptions::SplitFileFission);
- else
- Diags.Report(diag::err_drv_invalid_value)
- << A->getAsString(Args) << Name;
- }
- }
-
Opts.DebugTypeExtRefs = Args.hasArg(OPT_dwarf_ext_refs);
Opts.DebugExplicitImport = Args.hasArg(OPT_dwarf_explicit_import);
Opts.DebugFwdTemplateParams = Args.hasArg(OPT_debug_forward_template_params);
@@ -742,6 +747,13 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.DisableLLVMPasses = Args.hasArg(OPT_disable_llvm_passes);
Opts.DisableLifetimeMarkers = Args.hasArg(OPT_disable_lifetimemarkers);
+
+ llvm::Triple T(TargetOpts.Triple);
+ llvm::Triple::ArchType Arch = T.getArch();
+ if (Opts.OptimizationLevel > 0 &&
+ (Arch == llvm::Triple::x86 || Arch == llvm::Triple::x86_64))
+ Opts.EnableDebugEntryValues = Args.hasArg(OPT_femit_debug_entry_values);
+
Opts.DisableO0ImplyOptNone = Args.hasArg(OPT_disable_O0_optnone);
Opts.DisableRedZone = Args.hasArg(OPT_disable_red_zone);
Opts.IndirectTlsSegRefs = Args.hasArg(OPT_mno_tls_direct_seg_refs);
@@ -1214,6 +1226,16 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
if (!Opts.OptRecordFile.empty())
NeedLocTracking = true;
+ if (Arg *A = Args.getLastArg(OPT_opt_record_passes)) {
+ Opts.OptRecordPasses = A->getValue();
+ NeedLocTracking = true;
+ }
+
+ if (Arg *A = Args.getLastArg(OPT_opt_record_format)) {
+ Opts.OptRecordFormat = A->getValue();
+ NeedLocTracking = true;
+ }
+
if (Arg *A = Args.getLastArg(OPT_Rpass_EQ)) {
Opts.OptimizationRemarkPattern =
GenerateOptimizationRemarkRegex(Diags, Args, A);
@@ -1322,6 +1344,10 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.DefaultFunctionAttrs = Args.getAllArgValues(OPT_default_function_attr);
+ Opts.PassPlugins = Args.getAllArgValues(OPT_fpass_plugin_EQ);
+
+ Opts.SymbolPartition = Args.getLastArgValue(OPT_fsymbol_partition_EQ);
+
return Success;
}
@@ -1402,9 +1428,9 @@ static bool checkVerifyPrefixes(const std::vector<std::string> &VerifyPrefixes,
for (const auto &Prefix : VerifyPrefixes) {
// Every prefix must start with a letter and contain only alphanumeric
// characters, hyphens, and underscores.
- auto BadChar = std::find_if(Prefix.begin(), Prefix.end(),
- [](char C){return !isAlphanumeric(C)
- && C != '-' && C != '_';});
+ auto BadChar = llvm::find_if(Prefix, [](char C) {
+ return !isAlphanumeric(C) && C != '-' && C != '_';
+ });
if (BadChar != Prefix.end() || !isLetter(Prefix[0])) {
Success = false;
if (Diags) {
@@ -1592,6 +1618,22 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
llvm_unreachable("Invalid option in group!");
case OPT_ast_list:
Opts.ProgramAction = frontend::ASTDeclList; break;
+ case OPT_ast_dump_all_EQ:
+ case OPT_ast_dump_EQ: {
+ unsigned Val = llvm::StringSwitch<unsigned>(A->getValue())
+ .CaseLower("default", ADOF_Default)
+ .CaseLower("json", ADOF_JSON)
+ .Default(std::numeric_limits<unsigned>::max());
+
+ if (Val != std::numeric_limits<unsigned>::max())
+ Opts.ASTDumpFormat = static_cast<ASTDumpOutputFormat>(Val);
+ else {
+ Diags.Report(diag::err_drv_invalid_value)
+ << A->getAsString(Args) << A->getValue();
+ Opts.ASTDumpFormat = ADOF_Default;
+ }
+ LLVM_FALLTHROUGH;
+ }
case OPT_ast_dump:
case OPT_ast_dump_all:
case OPT_ast_dump_lookups:
@@ -1633,6 +1675,25 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
Opts.ProgramAction = frontend::GenerateHeaderModule; break;
case OPT_emit_pch:
Opts.ProgramAction = frontend::GeneratePCH; break;
+ case OPT_emit_iterface_stubs: {
+ llvm::Optional<frontend::ActionKind> ProgramAction =
+ llvm::StringSwitch<llvm::Optional<frontend::ActionKind>>(
+ Args.hasArg(OPT_iterface_stub_version_EQ)
+ ? Args.getLastArgValue(OPT_iterface_stub_version_EQ)
+ : "")
+ .Case("experimental-yaml-elf-v1",
+ frontend::GenerateInterfaceYAMLExpV1)
+ .Case("experimental-tapi-elf-v1",
+ frontend::GenerateInterfaceTBEExpV1)
+ .Default(llvm::None);
+ if (!ProgramAction)
+ Diags.Report(diag::err_drv_invalid_value)
+ << "Must specify a valid interface stub format type using "
+ << "-interface-stub-version=<experimental-tapi-elf-v1 | "
+ "experimental-yaml-elf-v1>";
+ Opts.ProgramAction = *ProgramAction;
+ break;
+ }
case OPT_init_only:
Opts.ProgramAction = frontend::InitOnly; break;
case OPT_fsyntax_only:
@@ -1659,6 +1720,10 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
Opts.ProgramAction = frontend::MigrateSource; break;
case OPT_Eonly:
Opts.ProgramAction = frontend::RunPreprocessorOnly; break;
+ case OPT_print_dependency_directives_minimized_source:
+ Opts.ProgramAction =
+ frontend::PrintDependencyDirectivesSourceMinimizerOutput;
+ break;
}
}
@@ -1706,6 +1771,8 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
Opts.ShowHelp = Args.hasArg(OPT_help);
Opts.ShowStats = Args.hasArg(OPT_print_stats);
Opts.ShowTimers = Args.hasArg(OPT_ftime_report);
+ Opts.PrintSupportedCPUs = Args.hasArg(OPT_print_supported_cpus);
+ Opts.TimeTrace = Args.hasArg(OPT_ftime_trace);
Opts.ShowVersion = Args.hasArg(OPT_version);
Opts.ASTMergeFiles = Args.getAllArgValues(OPT_ast_merge);
Opts.LLVMArgs = Args.getAllArgValues(OPT_mllvm);
@@ -1713,8 +1780,8 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
Opts.FixOnlyWarnings = Args.hasArg(OPT_fix_only_warnings);
Opts.FixAndRecompile = Args.hasArg(OPT_fixit_recompile);
Opts.FixToTemporaries = Args.hasArg(OPT_fixit_to_temp);
- Opts.ASTDumpDecls = Args.hasArg(OPT_ast_dump);
- Opts.ASTDumpAll = Args.hasArg(OPT_ast_dump_all);
+ Opts.ASTDumpDecls = Args.hasArg(OPT_ast_dump, OPT_ast_dump_EQ);
+ Opts.ASTDumpAll = Args.hasArg(OPT_ast_dump_all, OPT_ast_dump_all_EQ);
Opts.ASTDumpFilter = Args.getLastArgValue(OPT_ast_dump_filter);
Opts.ASTDumpLookups = Args.hasArg(OPT_ast_dump_lookups);
Opts.UseGlobalModuleIndex = !Args.hasArg(OPT_fno_modules_global_index);
@@ -1894,18 +1961,7 @@ std::string CompilerInvocation::GetResourcesPath(const char *Argv0,
void *MainAddr) {
std::string ClangExecutable =
llvm::sys::fs::getMainExecutable(Argv0, MainAddr);
- StringRef Dir = llvm::sys::path::parent_path(ClangExecutable);
-
- // Compute the path to the resource directory.
- StringRef ClangResourceDir(CLANG_RESOURCE_DIR);
- SmallString<128> P(Dir);
- if (ClangResourceDir != "")
- llvm::sys::path::append(P, ClangResourceDir);
- else
- llvm::sys::path::append(P, "..", Twine("lib") + CLANG_LIBDIR_SUFFIX,
- "clang", CLANG_VERSION_STRING);
-
- return P.str();
+ return Driver::GetResourcesPath(ClangExecutable, CLANG_RESOURCE_DIR);
}
static void ParseHeaderSearchArgs(HeaderSearchOptions &Opts, ArgList &Args,
@@ -2117,6 +2173,7 @@ void CompilerInvocation::setLangDefaults(LangOptions &Opts, InputKind IK,
Opts.C99 = Std.isC99();
Opts.C11 = Std.isC11();
Opts.C17 = Std.isC17();
+ Opts.C2x = Std.isC2x();
Opts.CPlusPlus = Std.isCPlusPlus();
Opts.CPlusPlus11 = Std.isCPlusPlus11();
Opts.CPlusPlus14 = Std.isCPlusPlus14();
@@ -2150,9 +2207,15 @@ void CompilerInvocation::setLangDefaults(LangOptions &Opts, InputKind IK,
Opts.NativeHalfType = 1;
Opts.NativeHalfArgsAndReturns = 1;
Opts.OpenCLCPlusPlus = Opts.CPlusPlus;
+
// Include default header file for OpenCL.
if (Opts.IncludeDefaultHeader) {
- PPOpts.Includes.push_back("opencl-c.h");
+ if (Opts.DeclareOpenCLBuiltins) {
+ // Only include base header file for builtin types and constants.
+ PPOpts.Includes.push_back("opencl-c-base.h");
+ } else {
+ PPOpts.Includes.push_back("opencl-c.h");
+ }
}
}
@@ -2183,6 +2246,9 @@ void CompilerInvocation::setLangDefaults(LangOptions &Opts, InputKind IK,
Opts.AlignedAllocation = Opts.CPlusPlus17;
Opts.DollarIdents = !Opts.AsmPreprocessor;
+
+ // Enable [[]] attributes in C++11 and C2x by default.
+ Opts.DoubleSquareBracketAttributes = Opts.CPlusPlus11 || Opts.C2x;
}
/// Attempt to parse a visibility value out of the given argument.
@@ -2354,6 +2420,7 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
}
Opts.IncludeDefaultHeader = Args.hasArg(OPT_finclude_default_header);
+ Opts.DeclareOpenCLBuiltins = Args.hasArg(OPT_fdeclare_opencl_builtins);
llvm::Triple T(TargetOpts.Triple);
CompilerInvocation::setLangDefaults(Opts, IK, T, PPOpts, LangStd);
@@ -2502,6 +2569,9 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
if (Args.hasArg(OPT_fvisibility_global_new_delete_hidden))
Opts.GlobalAllocationFunctionVisibilityHidden = 1;
+ if (Args.hasArg(OPT_fapply_global_visibility_to_externs))
+ Opts.SetVisibilityForExternDecls = 1;
+
if (Args.hasArg(OPT_ftrapv)) {
Opts.setSignedOverflowBehavior(LangOptions::SOB_Trapping);
// Set the handler, if one is specified.
@@ -2583,20 +2653,25 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Opts.Blocks = Args.hasArg(OPT_fblocks) || (Opts.OpenCL
&& Opts.OpenCLVersion == 200);
Opts.BlocksRuntimeOptional = Args.hasArg(OPT_fblocks_runtime_optional);
- Opts.CoroutinesTS = Args.hasArg(OPT_fcoroutines_ts);
+ Opts.Coroutines = Opts.CPlusPlus2a || Args.hasArg(OPT_fcoroutines_ts);
- // Enable [[]] attributes in C++11 by default.
Opts.DoubleSquareBracketAttributes =
Args.hasFlag(OPT_fdouble_square_bracket_attributes,
- OPT_fno_double_square_bracket_attributes, Opts.CPlusPlus11);
+ OPT_fno_double_square_bracket_attributes,
+ Opts.DoubleSquareBracketAttributes);
+ Opts.CPlusPlusModules = Opts.CPlusPlus2a;
Opts.ModulesTS = Args.hasArg(OPT_fmodules_ts);
- Opts.Modules = Args.hasArg(OPT_fmodules) || Opts.ModulesTS;
+ Opts.Modules =
+ Args.hasArg(OPT_fmodules) || Opts.ModulesTS || Opts.CPlusPlusModules;
Opts.ModulesStrictDeclUse = Args.hasArg(OPT_fmodules_strict_decluse);
Opts.ModulesDeclUse =
Args.hasArg(OPT_fmodules_decluse) || Opts.ModulesStrictDeclUse;
+ // FIXME: We only need this in C++ modules / Modules TS if we might textually
+ // enter a different module (eg, when building a header unit).
Opts.ModulesLocalVisibility =
- Args.hasArg(OPT_fmodules_local_submodule_visibility) || Opts.ModulesTS;
+ Args.hasArg(OPT_fmodules_local_submodule_visibility) || Opts.ModulesTS ||
+ Opts.CPlusPlusModules;
Opts.ModulesCodegen = Args.hasArg(OPT_fmodules_codegen);
Opts.ModulesDebugInfo = Args.hasArg(OPT_fmodules_debuginfo);
Opts.ModulesSearchAll = Opts.Modules &&
@@ -2667,7 +2742,13 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Opts.PackStruct = getLastArgIntValue(Args, OPT_fpack_struct_EQ, 0, Diags);
Opts.MaxTypeAlign = getLastArgIntValue(Args, OPT_fmax_type_align_EQ, 0, Diags);
Opts.AlignDouble = Args.hasArg(OPT_malign_double);
+ Opts.LongDoubleSize = Args.hasArg(OPT_mlong_double_128)
+ ? 128
+ : Args.hasArg(OPT_mlong_double_64) ? 64 : 0;
+ Opts.PPCIEEELongDouble = Args.hasArg(OPT_mabi_EQ_ieeelongdouble);
Opts.PICLevel = getLastArgIntValue(Args, OPT_pic_level, 0, Diags);
+ Opts.ROPI = Args.hasArg(OPT_fropi);
+ Opts.RWPI = Args.hasArg(OPT_frwpi);
Opts.PIE = Args.hasArg(OPT_pic_is_pie);
Opts.Static = Args.hasArg(OPT_static_define);
Opts.DumpRecordLayoutsSimple = Args.hasArg(OPT_fdump_record_layouts_simple);
@@ -2697,6 +2778,7 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Opts.HalfArgsAndReturns = Args.hasArg(OPT_fallow_half_arguments_and_returns)
| Opts.NativeHalfArgsAndReturns;
Opts.GNUAsm = !Args.hasArg(OPT_fno_gnu_inline_asm);
+ Opts.Cmse = Args.hasArg(OPT_mcmse); // Armv8-M Security Extensions
// __declspec is enabled by default for the PS4 by the driver, and also
// enabled for Microsoft Extensions or Borland Extensions, here.
@@ -2833,7 +2915,6 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
// Set the flag to prevent the implementation from emitting device exception
// handling code for those requiring so.
- Opts.OpenMPHostCXXExceptions = Opts.Exceptions && Opts.CXXExceptions;
if ((Opts.OpenMPIsDevice && T.isNVPTX()) || Opts.OpenCLCPlusPlus) {
Opts.Exceptions = 0;
Opts.CXXExceptions = 0;
@@ -2845,6 +2926,9 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Opts.OpenMPCUDABlocksPerSM =
getLastArgIntValue(Args, options::OPT_fopenmp_cuda_blocks_per_sm_EQ,
Opts.OpenMPCUDABlocksPerSM, Diags);
+ Opts.OpenMPCUDAReductionBufNum = getLastArgIntValue(
+ Args, options::OPT_fopenmp_cuda_teams_reduction_recs_num_EQ,
+ Opts.OpenMPCUDAReductionBufNum, Diags);
}
// Prevent auto-widening the representation of loop counters during an
@@ -2881,6 +2965,8 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
<< Opts.OMPHostIRFile;
}
+ Opts.SYCLIsDevice = Args.hasArg(options::OPT_fsycl_is_device);
+
// Set CUDA mode for OpenMP target NVPTX if specified in options
Opts.OpenMPCUDAMode = Opts.OpenMPIsDevice && T.isNVPTX() &&
Args.hasArg(options::OPT_fopenmp_cuda_mode);
@@ -3050,6 +3136,8 @@ static bool isStrictlyPreprocessorAction(frontend::ActionKind Action) {
case frontend::GenerateModuleInterface:
case frontend::GenerateHeaderModule:
case frontend::GeneratePCH:
+ case frontend::GenerateInterfaceYAMLExpV1:
+ case frontend::GenerateInterfaceTBEExpV1:
case frontend::ParseSyntaxOnly:
case frontend::ModuleFileInfo:
case frontend::VerifyPCH:
@@ -3069,6 +3157,7 @@ static bool isStrictlyPreprocessorAction(frontend::ActionKind Action) {
case frontend::PrintPreprocessedInput:
case frontend::RewriteMacros:
case frontend::RunPreprocessorOnly:
+ case frontend::PrintDependencyDirectivesSourceMinimizerOutput:
return true;
}
llvm_unreachable("invalid frontend action");
@@ -3333,10 +3422,8 @@ bool CompilerInvocation::CreateFromArgs(CompilerInvocation &Res,
Res.getFrontendOpts().ProgramAction);
// Turn on -Wspir-compat for SPIR target.
- auto Arch = T.getArch();
- if (Arch == llvm::Triple::spir || Arch == llvm::Triple::spir64) {
+ if (T.isSPIR())
Res.getDiagnosticOpts().Warnings.push_back("spir-compat");
- }
// If sanitizer is enabled, disable OPT_ffine_grained_bitfield_accesses.
if (Res.getCodeGenOpts().FineGrainedBitfieldAccesses &&
diff --git a/lib/Frontend/CreateInvocationFromCommandLine.cpp b/lib/Frontend/CreateInvocationFromCommandLine.cpp
index 2d4c40f8b9f1..b62416ffd9e7 100644
--- a/lib/Frontend/CreateInvocationFromCommandLine.cpp
+++ b/lib/Frontend/CreateInvocationFromCommandLine.cpp
@@ -1,9 +1,8 @@
//===--- CreateInvocationFromCommandLine.cpp - CompilerInvocation from Args ==//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Frontend/DependencyFile.cpp b/lib/Frontend/DependencyFile.cpp
index a03d4b79c8b9..375eb91ae366 100644
--- a/lib/Frontend/DependencyFile.cpp
+++ b/lib/Frontend/DependencyFile.cpp
@@ -1,9 +1,8 @@
//===--- DependencyFile.cpp - Generate dependency file --------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -33,8 +32,10 @@ namespace {
struct DepCollectorPPCallbacks : public PPCallbacks {
DependencyCollector &DepCollector;
SourceManager &SM;
- DepCollectorPPCallbacks(DependencyCollector &L, SourceManager &SM)
- : DepCollector(L), SM(SM) { }
+ DiagnosticsEngine &Diags;
+ DepCollectorPPCallbacks(DependencyCollector &L, SourceManager &SM,
+ DiagnosticsEngine &Diags)
+ : DepCollector(L), SM(SM), Diags(Diags) {}
void FileChanged(SourceLocation Loc, FileChangeReason Reason,
SrcMgr::CharacteristicKind FileType,
@@ -58,6 +59,16 @@ struct DepCollectorPPCallbacks : public PPCallbacks {
/*IsModuleFile*/false, /*IsMissing*/false);
}
+ void FileSkipped(const FileEntry &SkippedFile, const Token &FilenameTok,
+ SrcMgr::CharacteristicKind FileType) override {
+ StringRef Filename =
+ llvm::sys::path::remove_leading_dotslash(SkippedFile.getName());
+ DepCollector.maybeAddDependency(Filename, /*FromModule=*/false,
+ /*IsSystem=*/isSystem(FileType),
+ /*IsModuleFile=*/false,
+ /*IsMissing=*/false);
+ }
+
void InclusionDirective(SourceLocation HashLoc, const Token &IncludeTok,
StringRef FileName, bool IsAngled,
CharSourceRange FilenameRange, const FileEntry *File,
@@ -71,9 +82,20 @@ struct DepCollectorPPCallbacks : public PPCallbacks {
// Files that actually exist are handled by FileChanged.
}
- void EndOfMainFile() override {
- DepCollector.finishedMainFile();
+ void HasInclude(SourceLocation Loc, StringRef SpelledFilename, bool IsAngled,
+ const FileEntry *File,
+ SrcMgr::CharacteristicKind FileType) override {
+ if (!File)
+ return;
+ StringRef Filename =
+ llvm::sys::path::remove_leading_dotslash(File->getName());
+ DepCollector.maybeAddDependency(Filename, /*FromModule=*/false,
+ /*IsSystem=*/isSystem(FileType),
+ /*IsModuleFile=*/false,
+ /*IsMissing=*/false);
}
+
+ void EndOfMainFile() override { DepCollector.finishedMainFile(Diags); }
};
struct DepCollectorMMCallbacks : public ModuleMapCallbacks {
@@ -118,9 +140,16 @@ struct DepCollectorASTListener : public ASTReaderListener {
void DependencyCollector::maybeAddDependency(StringRef Filename, bool FromModule,
bool IsSystem, bool IsModuleFile,
bool IsMissing) {
- if (Seen.insert(Filename).second &&
- sawDependency(Filename, FromModule, IsSystem, IsModuleFile, IsMissing))
+ if (sawDependency(Filename, FromModule, IsSystem, IsModuleFile, IsMissing))
+ addDependency(Filename);
+}
+
+bool DependencyCollector::addDependency(StringRef Filename) {
+ if (Seen.insert(Filename).second) {
Dependencies.push_back(Filename);
+ return true;
+ }
+ return false;
}
static bool isSpecialFilename(StringRef Filename) {
@@ -139,8 +168,8 @@ bool DependencyCollector::sawDependency(StringRef Filename, bool FromModule,
DependencyCollector::~DependencyCollector() { }
void DependencyCollector::attachToPreprocessor(Preprocessor &PP) {
- PP.addPPCallbacks(
- llvm::make_unique<DepCollectorPPCallbacks>(*this, PP.getSourceManager()));
+ PP.addPPCallbacks(llvm::make_unique<DepCollectorPPCallbacks>(
+ *this, PP.getSourceManager(), PP.getDiagnostics()));
PP.getHeaderSearchInfo().getModuleMap().addModuleMapCallbacks(
llvm::make_unique<DepCollectorMMCallbacks>(*this));
}
@@ -148,206 +177,57 @@ void DependencyCollector::attachToASTReader(ASTReader &R) {
R.addListener(llvm::make_unique<DepCollectorASTListener>(*this));
}
-namespace {
-/// Private implementation for DependencyFileGenerator
-class DFGImpl : public PPCallbacks {
- std::vector<std::string> Files;
- llvm::StringSet<> FilesSet;
- const Preprocessor *PP;
- std::string OutputFile;
- std::vector<std::string> Targets;
- bool IncludeSystemHeaders;
- bool PhonyTarget;
- bool AddMissingHeaderDeps;
- bool SeenMissingHeader;
- bool IncludeModuleFiles;
- DependencyOutputFormat OutputFormat;
- unsigned InputFileIndex;
-
-private:
- bool FileMatchesDepCriteria(const char *Filename,
- SrcMgr::CharacteristicKind FileType);
- void OutputDependencyFile();
-
-public:
- DFGImpl(const Preprocessor *_PP, const DependencyOutputOptions &Opts)
- : PP(_PP), OutputFile(Opts.OutputFile), Targets(Opts.Targets),
+DependencyFileGenerator::DependencyFileGenerator(
+ const DependencyOutputOptions &Opts)
+ : OutputFile(Opts.OutputFile), Targets(Opts.Targets),
IncludeSystemHeaders(Opts.IncludeSystemHeaders),
PhonyTarget(Opts.UsePhonyTargets),
- AddMissingHeaderDeps(Opts.AddMissingHeaderDeps),
- SeenMissingHeader(false),
+ AddMissingHeaderDeps(Opts.AddMissingHeaderDeps), SeenMissingHeader(false),
IncludeModuleFiles(Opts.IncludeModuleFiles),
- OutputFormat(Opts.OutputFormat),
- InputFileIndex(0) {
- for (const auto &ExtraDep : Opts.ExtraDeps) {
- if (AddFilename(ExtraDep))
- ++InputFileIndex;
- }
- }
-
- void FileChanged(SourceLocation Loc, FileChangeReason Reason,
- SrcMgr::CharacteristicKind FileType,
- FileID PrevFID) override;
-
- void FileSkipped(const FileEntry &SkippedFile, const Token &FilenameTok,
- SrcMgr::CharacteristicKind FileType) override;
-
- void InclusionDirective(SourceLocation HashLoc, const Token &IncludeTok,
- StringRef FileName, bool IsAngled,
- CharSourceRange FilenameRange, const FileEntry *File,
- StringRef SearchPath, StringRef RelativePath,
- const Module *Imported,
- SrcMgr::CharacteristicKind FileType) override;
-
- void HasInclude(SourceLocation Loc, StringRef SpelledFilename, bool IsAngled,
- const FileEntry *File,
- SrcMgr::CharacteristicKind FileType) override;
-
- void EndOfMainFile() override {
- OutputDependencyFile();
+ OutputFormat(Opts.OutputFormat), InputFileIndex(0) {
+ for (const auto &ExtraDep : Opts.ExtraDeps) {
+ if (addDependency(ExtraDep))
+ ++InputFileIndex;
}
-
- bool AddFilename(StringRef Filename);
- bool includeSystemHeaders() const { return IncludeSystemHeaders; }
- bool includeModuleFiles() const { return IncludeModuleFiles; }
-};
-
-class DFGMMCallback : public ModuleMapCallbacks {
- DFGImpl &Parent;
-public:
- DFGMMCallback(DFGImpl &Parent) : Parent(Parent) {}
- void moduleMapFileRead(SourceLocation Loc, const FileEntry &Entry,
- bool IsSystem) override {
- if (!IsSystem || Parent.includeSystemHeaders())
- Parent.AddFilename(Entry.getName());
- }
-};
-
-class DFGASTReaderListener : public ASTReaderListener {
- DFGImpl &Parent;
-public:
- DFGASTReaderListener(DFGImpl &Parent)
- : Parent(Parent) { }
- bool needsInputFileVisitation() override { return true; }
- bool needsSystemInputFileVisitation() override {
- return Parent.includeSystemHeaders();
- }
- void visitModuleFile(StringRef Filename,
- serialization::ModuleKind Kind) override;
- bool visitInputFile(StringRef Filename, bool isSystem,
- bool isOverridden, bool isExplicitModule) override;
-};
}
-DependencyFileGenerator::DependencyFileGenerator(void *Impl)
-: Impl(Impl) { }
-
-DependencyFileGenerator *DependencyFileGenerator::CreateAndAttachToPreprocessor(
- clang::Preprocessor &PP, const clang::DependencyOutputOptions &Opts) {
-
- if (Opts.Targets.empty()) {
+void DependencyFileGenerator::attachToPreprocessor(Preprocessor &PP) {
+ if (Targets.empty()) {
PP.getDiagnostics().Report(diag::err_fe_dependency_file_requires_MT);
- return nullptr;
+ return;
}
// Disable the "file not found" diagnostic if the -MG option was given.
- if (Opts.AddMissingHeaderDeps)
+ if (AddMissingHeaderDeps)
PP.SetSuppressIncludeNotFoundError(true);
- DFGImpl *Callback = new DFGImpl(&PP, Opts);
- PP.addPPCallbacks(std::unique_ptr<PPCallbacks>(Callback));
- PP.getHeaderSearchInfo().getModuleMap().addModuleMapCallbacks(
- llvm::make_unique<DFGMMCallback>(*Callback));
- return new DependencyFileGenerator(Callback);
+ DependencyCollector::attachToPreprocessor(PP);
}
-void DependencyFileGenerator::AttachToASTReader(ASTReader &R) {
- DFGImpl *I = reinterpret_cast<DFGImpl *>(Impl);
- assert(I && "missing implementation");
- R.addListener(llvm::make_unique<DFGASTReaderListener>(*I));
-}
+bool DependencyFileGenerator::sawDependency(StringRef Filename, bool FromModule,
+ bool IsSystem, bool IsModuleFile,
+ bool IsMissing) {
+ if (IsMissing) {
+ // Handle the case of missing file from an inclusion directive.
+ if (AddMissingHeaderDeps)
+ return true;
+ SeenMissingHeader = true;
+ return false;
+ }
+ if (IsModuleFile && !IncludeModuleFiles)
+ return false;
-/// FileMatchesDepCriteria - Determine whether the given Filename should be
-/// considered as a dependency.
-bool DFGImpl::FileMatchesDepCriteria(const char *Filename,
- SrcMgr::CharacteristicKind FileType) {
if (isSpecialFilename(Filename))
return false;
if (IncludeSystemHeaders)
return true;
- return !isSystem(FileType);
+ return !IsSystem;
}
-void DFGImpl::FileChanged(SourceLocation Loc,
- FileChangeReason Reason,
- SrcMgr::CharacteristicKind FileType,
- FileID PrevFID) {
- if (Reason != PPCallbacks::EnterFile)
- return;
-
- // Dependency generation really does want to go all the way to the
- // file entry for a source location to find out what is depended on.
- // We do not want #line markers to affect dependency generation!
- SourceManager &SM = PP->getSourceManager();
-
- const FileEntry *FE =
- SM.getFileEntryForID(SM.getFileID(SM.getExpansionLoc(Loc)));
- if (!FE) return;
-
- StringRef Filename = FE->getName();
- if (!FileMatchesDepCriteria(Filename.data(), FileType))
- return;
-
- AddFilename(llvm::sys::path::remove_leading_dotslash(Filename));
-}
-
-void DFGImpl::FileSkipped(const FileEntry &SkippedFile,
- const Token &FilenameTok,
- SrcMgr::CharacteristicKind FileType) {
- StringRef Filename = SkippedFile.getName();
- if (!FileMatchesDepCriteria(Filename.data(), FileType))
- return;
-
- AddFilename(llvm::sys::path::remove_leading_dotslash(Filename));
-}
-
-void DFGImpl::InclusionDirective(SourceLocation HashLoc,
- const Token &IncludeTok,
- StringRef FileName,
- bool IsAngled,
- CharSourceRange FilenameRange,
- const FileEntry *File,
- StringRef SearchPath,
- StringRef RelativePath,
- const Module *Imported,
- SrcMgr::CharacteristicKind FileType) {
- if (!File) {
- if (AddMissingHeaderDeps)
- AddFilename(FileName);
- else
- SeenMissingHeader = true;
- }
-}
-
-void DFGImpl::HasInclude(SourceLocation Loc, StringRef SpelledFilename,
- bool IsAngled, const FileEntry *File,
- SrcMgr::CharacteristicKind FileType) {
- if (!File)
- return;
- StringRef Filename = File->getName();
- if (!FileMatchesDepCriteria(Filename.data(), FileType))
- return;
- AddFilename(llvm::sys::path::remove_leading_dotslash(Filename));
-}
-
-bool DFGImpl::AddFilename(StringRef Filename) {
- if (FilesSet.insert(Filename).second) {
- Files.push_back(Filename);
- return true;
- }
- return false;
+void DependencyFileGenerator::finishedMainFile(DiagnosticsEngine &Diags) {
+ outputDependencyFile(Diags);
}
/// Print the filename, with escaping or quoting that accommodates the three
@@ -429,7 +309,7 @@ static void PrintFilename(raw_ostream &OS, StringRef Filename,
}
}
-void DFGImpl::OutputDependencyFile() {
+void DependencyFileGenerator::outputDependencyFile(DiagnosticsEngine &Diags) {
if (SeenMissingHeader) {
llvm::sys::fs::remove(OutputFile);
return;
@@ -438,11 +318,14 @@ void DFGImpl::OutputDependencyFile() {
std::error_code EC;
llvm::raw_fd_ostream OS(OutputFile, EC, llvm::sys::fs::F_Text);
if (EC) {
- PP->getDiagnostics().Report(diag::err_fe_error_opening) << OutputFile
- << EC.message();
+ Diags.Report(diag::err_fe_error_opening) << OutputFile << EC.message();
return;
}
+ outputDependencyFile(OS);
+}
+
+void DependencyFileGenerator::outputDependencyFile(llvm::raw_ostream &OS) {
// Write out the dependency targets, trying to avoid overly long
// lines when possible. We try our best to emit exactly the same
// dependency file as GCC (4.2), assuming the included files are the
@@ -470,6 +353,7 @@ void DFGImpl::OutputDependencyFile() {
// Now add each dependency in the order it was seen, but avoiding
// duplicates.
+ ArrayRef<std::string> Files = getDependencies();
for (StringRef File : Files) {
// Start a new line if this would exceed the column limit. Make
// sure to leave space for a trailing " \" in case we need to
@@ -497,20 +381,3 @@ void DFGImpl::OutputDependencyFile() {
}
}
}
-
-bool DFGASTReaderListener::visitInputFile(llvm::StringRef Filename,
- bool IsSystem, bool IsOverridden,
- bool IsExplicitModule) {
- assert(!IsSystem || needsSystemInputFileVisitation());
- if (IsOverridden || IsExplicitModule)
- return true;
-
- Parent.AddFilename(Filename);
- return true;
-}
-
-void DFGASTReaderListener::visitModuleFile(llvm::StringRef Filename,
- serialization::ModuleKind Kind) {
- if (Parent.includeModuleFiles())
- Parent.AddFilename(Filename);
-}
diff --git a/lib/Frontend/DependencyGraph.cpp b/lib/Frontend/DependencyGraph.cpp
index c6c9ac2ea2fa..90624323a000 100644
--- a/lib/Frontend/DependencyGraph.cpp
+++ b/lib/Frontend/DependencyGraph.cpp
@@ -1,9 +1,8 @@
//===--- DependencyGraph.cpp - Generate dependency file -------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Frontend/DiagnosticRenderer.cpp b/lib/Frontend/DiagnosticRenderer.cpp
index 3bd86dc5beaa..22b957988f46 100644
--- a/lib/Frontend/DiagnosticRenderer.cpp
+++ b/lib/Frontend/DiagnosticRenderer.cpp
@@ -1,9 +1,8 @@
//===- DiagnosticRenderer.cpp - Diagnostic Pretty-Printing ----------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Frontend/FrontendAction.cpp b/lib/Frontend/FrontendAction.cpp
index f5226380b4dd..d724bbce3749 100644
--- a/lib/Frontend/FrontendAction.cpp
+++ b/lib/Frontend/FrontendAction.cpp
@@ -1,9 +1,8 @@
//===--- FrontendAction.cpp -----------------------------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -361,7 +360,7 @@ static std::error_code collectModuleHeaderIncludes(
SmallString<128> DirNative;
llvm::sys::path::native(UmbrellaDir.Entry->getName(), DirNative);
- llvm::vfs::FileSystem &FS = *FileMgr.getVirtualFileSystem();
+ llvm::vfs::FileSystem &FS = FileMgr.getVirtualFileSystem();
for (llvm::vfs::recursive_directory_iterator Dir(FS, DirNative, EC), End;
Dir != End && !EC; Dir.increment(EC)) {
// Check whether this entry has an extension typically associated with
@@ -715,7 +714,7 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
SmallString<128> DirNative;
llvm::sys::path::native(PCHDir->getName(), DirNative);
bool Found = false;
- llvm::vfs::FileSystem &FS = *FileMgr.getVirtualFileSystem();
+ llvm::vfs::FileSystem &FS = FileMgr.getVirtualFileSystem();
for (llvm::vfs::directory_iterator Dir = FS.dir_begin(DirNative, EC),
DirEnd;
Dir != DirEnd && !EC; Dir.increment(EC)) {
@@ -925,7 +924,7 @@ failure:
return false;
}
-bool FrontendAction::Execute() {
+llvm::Error FrontendAction::Execute() {
CompilerInstance &CI = getCompilerInstance();
if (CI.hasFrontendTimer()) {
@@ -940,12 +939,18 @@ bool FrontendAction::Execute() {
CI.hasPreprocessor()) {
StringRef Cache =
CI.getPreprocessor().getHeaderSearchInfo().getModuleCachePath();
- if (!Cache.empty())
- GlobalModuleIndex::writeIndex(CI.getFileManager(),
- CI.getPCHContainerReader(), Cache);
+ if (!Cache.empty()) {
+ if (llvm::Error Err = GlobalModuleIndex::writeIndex(
+ CI.getFileManager(), CI.getPCHContainerReader(), Cache)) {
+ // FIXME this drops the error on the floor, but
+ // Index/pch-from-libclang.c seems to rely on dropping at least some of
+ // the error conditions!
+ consumeError(std::move(Err));
+ }
+ }
}
- return true;
+ return llvm::Error::success();
}
void FrontendAction::EndSourceFile() {
@@ -1045,6 +1050,9 @@ PreprocessorFrontendAction::CreateASTConsumer(CompilerInstance &CI,
llvm_unreachable("Invalid CreateASTConsumer on preprocessor action!");
}
+bool WrapperFrontendAction::PrepareToExecuteAction(CompilerInstance &CI) {
+ return WrappedAction->PrepareToExecuteAction(CI);
+}
std::unique_ptr<ASTConsumer>
WrapperFrontendAction::CreateASTConsumer(CompilerInstance &CI,
StringRef InFile) {
diff --git a/lib/Frontend/FrontendActions.cpp b/lib/Frontend/FrontendActions.cpp
index a407dfc162bb..e37afae5332a 100644
--- a/lib/Frontend/FrontendActions.cpp
+++ b/lib/Frontend/FrontendActions.cpp
@@ -1,9 +1,8 @@
//===--- FrontendActions.cpp ----------------------------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -15,6 +14,7 @@
#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/Frontend/MultiplexConsumer.h"
#include "clang/Frontend/Utils.h"
+#include "clang/Lex/DependencyDirectivesSourceMinimizer.h"
#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Lex/PreprocessorOptions.h"
@@ -24,8 +24,8 @@
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
-#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/YAMLTraits.h"
+#include "llvm/Support/raw_ostream.h"
#include <memory>
#include <system_error>
@@ -74,11 +74,10 @@ ASTPrintAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
std::unique_ptr<ASTConsumer>
ASTDumpAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
- return CreateASTDumper(nullptr /*Dump to stdout.*/,
- CI.getFrontendOpts().ASTDumpFilter,
- CI.getFrontendOpts().ASTDumpDecls,
- CI.getFrontendOpts().ASTDumpAll,
- CI.getFrontendOpts().ASTDumpLookups);
+ const FrontendOptions &Opts = CI.getFrontendOpts();
+ return CreateASTDumper(nullptr /*Dump to stdout.*/, Opts.ASTDumpFilter,
+ Opts.ASTDumpDecls, Opts.ASTDumpAll,
+ Opts.ASTDumpLookups, Opts.ASTDumpFormat);
}
std::unique_ptr<ASTConsumer>
@@ -110,10 +109,10 @@ GeneratePCHAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
auto Buffer = std::make_shared<PCHBuffer>();
std::vector<std::unique_ptr<ASTConsumer>> Consumers;
Consumers.push_back(llvm::make_unique<PCHGenerator>(
- CI.getPreprocessor(), OutputFile, Sysroot,
- Buffer, FrontendOpts.ModuleFileExtensions,
- CI.getPreprocessorOpts().AllowPCHWithCompilerErrors,
- FrontendOpts.IncludeTimestamps));
+ CI.getPreprocessor(), CI.getModuleCache(), OutputFile, Sysroot, Buffer,
+ FrontendOpts.ModuleFileExtensions,
+ CI.getPreprocessorOpts().AllowPCHWithCompilerErrors,
+ FrontendOpts.IncludeTimestamps, +CI.getLangOpts().CacheGeneratedPCH));
Consumers.push_back(CI.getPCHContainerWriter().CreatePCHContainerGenerator(
CI, InFile, OutputFile, std::move(OS), Buffer));
@@ -140,7 +139,7 @@ GeneratePCHAction::CreateOutputFile(CompilerInstance &CI, StringRef InFile,
std::unique_ptr<raw_pwrite_stream> OS =
CI.createOutputFile(CI.getFrontendOpts().OutputFile, /*Binary=*/true,
/*RemoveFileOnSignal=*/false, InFile,
- /*Extension=*/"", /*useTemporary=*/true);
+ /*Extension=*/"", /*UseTemporary=*/true);
if (!OS)
return nullptr;
@@ -173,11 +172,13 @@ GenerateModuleAction::CreateASTConsumer(CompilerInstance &CI,
std::vector<std::unique_ptr<ASTConsumer>> Consumers;
Consumers.push_back(llvm::make_unique<PCHGenerator>(
- CI.getPreprocessor(), OutputFile, Sysroot,
- Buffer, CI.getFrontendOpts().ModuleFileExtensions,
- /*AllowASTWithErrors=*/false,
- /*IncludeTimestamps=*/
- +CI.getFrontendOpts().BuildingImplicitModule));
+ CI.getPreprocessor(), CI.getModuleCache(), OutputFile, Sysroot, Buffer,
+ CI.getFrontendOpts().ModuleFileExtensions,
+ /*AllowASTWithErrors=*/false,
+ /*IncludeTimestamps=*/
+ +CI.getFrontendOpts().BuildingImplicitModule,
+ /*ShouldCacheASTInMemory=*/
+ +CI.getFrontendOpts().BuildingImplicitModule));
Consumers.push_back(CI.getPCHContainerWriter().CreatePCHContainerGenerator(
CI, InFile, OutputFile, std::move(OS), Buffer));
return llvm::make_unique<MultiplexConsumer>(std::move(Consumers));
@@ -214,14 +215,14 @@ GenerateModuleFromModuleMapAction::CreateOutputFile(CompilerInstance &CI,
// We use a temporary to avoid race conditions.
return CI.createOutputFile(CI.getFrontendOpts().OutputFile, /*Binary=*/true,
/*RemoveFileOnSignal=*/false, InFile,
- /*Extension=*/"", /*useTemporary=*/true,
+ /*Extension=*/"", /*UseTemporary=*/true,
/*CreateMissingDirectories=*/true);
}
bool GenerateModuleInterfaceAction::BeginSourceFileAction(
CompilerInstance &CI) {
- if (!CI.getLangOpts().ModulesTS) {
- CI.getDiagnostics().Report(diag::err_module_interface_requires_modules_ts);
+ if (!CI.getLangOpts().ModulesTS && !CI.getLangOpts().CPlusPlusModules) {
+ CI.getDiagnostics().Report(diag::err_module_interface_requires_cpp_modules);
return false;
}
@@ -238,7 +239,7 @@ GenerateModuleInterfaceAction::CreateOutputFile(CompilerInstance &CI,
bool GenerateHeaderModuleAction::PrepareToExecuteAction(
CompilerInstance &CI) {
- if (!CI.getLangOpts().Modules && !CI.getLangOpts().ModulesTS) {
+ if (!CI.getLangOpts().Modules) {
CI.getDiagnostics().Report(diag::err_header_module_requires_modules);
return false;
}
@@ -287,7 +288,7 @@ bool GenerateHeaderModuleAction::BeginSourceFileAction(
const DirectoryLookup *CurDir = nullptr;
const FileEntry *FE = HS.LookupFile(
Name, SourceLocation(), /*Angled*/ false, nullptr, CurDir,
- None, nullptr, nullptr, nullptr, nullptr, nullptr);
+ None, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr);
if (!FE) {
CI.getDiagnostics().Report(diag::err_module_header_file_not_found)
<< Name;
@@ -330,8 +331,8 @@ void VerifyPCHAction::ExecuteAction() {
bool Preamble = CI.getPreprocessorOpts().PrecompiledPreambleBytes.first != 0;
const std::string &Sysroot = CI.getHeaderSearchOpts().Sysroot;
std::unique_ptr<ASTReader> Reader(new ASTReader(
- CI.getPreprocessor(), &CI.getASTContext(), CI.getPCHContainerReader(),
- CI.getFrontendOpts().ModuleFileExtensions,
+ CI.getPreprocessor(), CI.getModuleCache(), &CI.getASTContext(),
+ CI.getPCHContainerReader(), CI.getFrontendOpts().ModuleFileExtensions,
Sysroot.empty() ? "" : Sysroot.c_str(),
/*DisableValidation*/ false,
/*AllowPCHWithCompilerErrors*/ false,
@@ -908,3 +909,33 @@ void DumpCompilerOptionsAction::ExecuteAction() {
OS << "}";
}
+
+void PrintDependencyDirectivesSourceMinimizerAction::ExecuteAction() {
+ CompilerInstance &CI = getCompilerInstance();
+ SourceManager &SM = CI.getPreprocessor().getSourceManager();
+ const llvm::MemoryBuffer *FromFile = SM.getBuffer(SM.getMainFileID());
+
+ llvm::SmallString<1024> Output;
+ llvm::SmallVector<minimize_source_to_dependency_directives::Token, 32> Toks;
+ if (minimizeSourceToDependencyDirectives(
+ FromFile->getBuffer(), Output, Toks, &CI.getDiagnostics(),
+ SM.getLocForStartOfFile(SM.getMainFileID()))) {
+ assert(CI.getDiagnostics().hasErrorOccurred() &&
+ "no errors reported for failure");
+
+ // Preprocess the source when verifying the diagnostics to capture the
+ // 'expected' comments.
+ if (CI.getDiagnosticOpts().VerifyDiagnostics) {
+ // Make sure we don't emit new diagnostics!
+ CI.getDiagnostics().setSuppressAllDiagnostics();
+ Preprocessor &PP = getCompilerInstance().getPreprocessor();
+ PP.EnterMainSourceFile();
+ Token Tok;
+ do {
+ PP.Lex(Tok);
+ } while (Tok.isNot(tok::eof));
+ }
+ return;
+ }
+ llvm::outs() << Output;
+}
diff --git a/lib/Frontend/FrontendOptions.cpp b/lib/Frontend/FrontendOptions.cpp
index 0744d447e816..6ccb2c395604 100644
--- a/lib/Frontend/FrontendOptions.cpp
+++ b/lib/Frontend/FrontendOptions.cpp
@@ -1,9 +1,8 @@
//===- FrontendOptions.cpp ------------------------------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Frontend/FrontendTiming.cpp b/lib/Frontend/FrontendTiming.cpp
index 9ea7347e7797..e3f44c9999f6 100644
--- a/lib/Frontend/FrontendTiming.cpp
+++ b/lib/Frontend/FrontendTiming.cpp
@@ -1,9 +1,8 @@
-//===- FronendTiming.cpp - Implements Frontend timing utils --------------===//
+//===- FrontendTiming.cpp - Implements Frontend timing utils -------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Frontend/HeaderIncludeGen.cpp b/lib/Frontend/HeaderIncludeGen.cpp
index 9dc107c9d546..d60f5333bf5b 100644
--- a/lib/Frontend/HeaderIncludeGen.cpp
+++ b/lib/Frontend/HeaderIncludeGen.cpp
@@ -1,9 +1,8 @@
-//===--- HeaderIncludes.cpp - Generate Header Includes --------------------===//
+//===-- HeaderIncludeGen.cpp - Generate Header Includes -------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Frontend/InitHeaderSearch.cpp b/lib/Frontend/InitHeaderSearch.cpp
index ac3bb713ddcc..d65d13489dc4 100644
--- a/lib/Frontend/InitHeaderSearch.cpp
+++ b/lib/Frontend/InitHeaderSearch.cpp
@@ -1,9 +1,8 @@
//===--- InitHeaderSearch.cpp - Initialize header search paths ------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -211,6 +210,10 @@ void InitHeaderSearch::AddDefaultCIncludePaths(const llvm::Triple &triple,
const HeaderSearchOptions &HSOpts) {
llvm::Triple::OSType os = triple.getOS();
+ if (triple.isOSDarwin()) {
+ llvm_unreachable("Include management is handled in the driver.");
+ }
+
if (HSOpts.UseStandardSystemIncludes) {
switch (os) {
case llvm::Triple::CloudABI:
@@ -366,49 +369,7 @@ void InitHeaderSearch::AddDefaultCPlusPlusIncludePaths(
// FIXME: temporary hack: hard-coded paths.
if (triple.isOSDarwin()) {
- bool IsBaseFound = true;
- switch (triple.getArch()) {
- default: break;
-
- case llvm::Triple::ppc:
- case llvm::Triple::ppc64:
- IsBaseFound = AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.2.1",
- "powerpc-apple-darwin10", "",
- "ppc64", triple);
- IsBaseFound |= AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.0.0",
- "powerpc-apple-darwin10", "",
- "ppc64", triple);
- break;
-
- case llvm::Triple::x86:
- case llvm::Triple::x86_64:
- IsBaseFound = AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.2.1",
- "i686-apple-darwin10", "",
- "x86_64", triple);
- IsBaseFound |= AddGnuCPlusPlusIncludePaths(
- "/usr/include/c++/4.0.0", "i686-apple-darwin8", "", "", triple);
- break;
-
- case llvm::Triple::arm:
- case llvm::Triple::thumb:
- IsBaseFound = AddGnuCPlusPlusIncludePaths(
- "/usr/include/c++/4.2.1", "arm-apple-darwin10", "v7", "", triple);
- IsBaseFound |= AddGnuCPlusPlusIncludePaths(
- "/usr/include/c++/4.2.1", "arm-apple-darwin10", "v6", "", triple);
- break;
-
- case llvm::Triple::aarch64:
- IsBaseFound = AddGnuCPlusPlusIncludePaths(
- "/usr/include/c++/4.2.1", "arm64-apple-darwin10", "", "", triple);
- break;
- }
- // Warn when compiling pure C++ / Objective-C++ only.
- if (!IsBaseFound &&
- !(LangOpts.CUDA || LangOpts.OpenCL || LangOpts.RenderScript)) {
- Headers.getDiags().Report(SourceLocation(),
- diag::warn_stdlibcxx_not_found);
- }
- return;
+ llvm_unreachable("Include management is handled in the driver.");
}
switch (os) {
@@ -433,14 +394,6 @@ void InitHeaderSearch::AddDefaultCPlusPlusIncludePaths(
case llvm::Triple::DragonFly:
AddPath("/usr/include/c++/5.0", CXXSystem, false);
break;
- case llvm::Triple::OpenBSD: {
- std::string t = triple.getTriple();
- if (t.substr(0, 6) == "x86_64")
- t.replace(0, 6, "amd64");
- AddGnuCPlusPlusIncludePaths("/usr/include/g++",
- t, "", "", triple);
- break;
- }
case llvm::Triple::Minix:
AddGnuCPlusPlusIncludePaths("/usr/gnu/include/c++/4.4.3",
"", "", "", triple);
@@ -461,9 +414,11 @@ void InitHeaderSearch::AddDefaultIncludePaths(const LangOptions &Lang,
default:
break; // Everything else continues to use this routine's logic.
+ case llvm::Triple::Emscripten:
case llvm::Triple::Linux:
case llvm::Triple::Hurd:
case llvm::Triple::Solaris:
+ case llvm::Triple::WASI:
return;
case llvm::Triple::Win32:
@@ -471,6 +426,22 @@ void InitHeaderSearch::AddDefaultIncludePaths(const LangOptions &Lang,
triple.isOSBinFormatMachO())
return;
break;
+
+ case llvm::Triple::UnknownOS:
+ if (triple.getArch() == llvm::Triple::wasm32 ||
+ triple.getArch() == llvm::Triple::wasm64)
+ return;
+ break;
+ }
+
+ // All header search logic is handled in the Driver for Darwin.
+ if (triple.isOSDarwin()) {
+ if (HSOpts.UseStandardSystemIncludes) {
+ // Add the default framework include paths on Darwin.
+ AddPath("/System/Library/Frameworks", System, true);
+ AddPath("/Library/Frameworks", System, true);
+ }
+ return;
}
if (Lang.CPlusPlus && !Lang.AsmPreprocessor &&
@@ -483,14 +454,6 @@ void InitHeaderSearch::AddDefaultIncludePaths(const LangOptions &Lang,
}
AddDefaultCIncludePaths(triple, HSOpts);
-
- // Add the default framework include paths on Darwin.
- if (HSOpts.UseStandardSystemIncludes) {
- if (triple.isOSDarwin()) {
- AddPath("/System/Library/Frameworks", System, true);
- AddPath("/Library/Frameworks", System, true);
- }
- }
}
/// RemoveDuplicates - If there are duplicate directory entries in the specified
diff --git a/lib/Frontend/InitPreprocessor.cpp b/lib/Frontend/InitPreprocessor.cpp
index 66807b097d40..3906e2ae1b98 100644
--- a/lib/Frontend/InitPreprocessor.cpp
+++ b/lib/Frontend/InitPreprocessor.cpp
@@ -1,9 +1,8 @@
//===--- InitPreprocessor.cpp - PP initialization code. ---------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -123,10 +122,10 @@ static void DefineFloatMacros(MacroBuilder &Builder, StringRef Prefix,
"4.94065645841246544176568792868221e-324",
"1.92592994438723585305597794258492732e-34");
int MantissaDigits = PickFP(Sem, 11, 24, 53, 64, 106, 113);
- int Min10Exp = PickFP(Sem, -13, -37, -307, -4931, -291, -4931);
+ int Min10Exp = PickFP(Sem, -4, -37, -307, -4931, -291, -4931);
int Max10Exp = PickFP(Sem, 4, 38, 308, 4932, 308, 4932);
- int MinExp = PickFP(Sem, -14, -125, -1021, -16381, -968, -16381);
- int MaxExp = PickFP(Sem, 15, 128, 1024, 16384, 1024, 16384);
+ int MinExp = PickFP(Sem, -13, -125, -1021, -16381, -968, -16381);
+ int MaxExp = PickFP(Sem, 16, 128, 1024, 16384, 1024, 16384);
Min = PickFP(Sem, "6.103515625e-5", "1.17549435e-38", "2.2250738585072014e-308",
"3.36210314311209350626e-4932",
"2.00416836000897277799610805135016e-292",
@@ -412,7 +411,7 @@ static void InitializeStandardPredefinedMacros(const TargetInfo &TI,
if (LangOpts.OpenCLCPlusPlusVersion == 100)
Builder.defineMacro("__OPENCL_CPP_VERSION__", "100");
else
- llvm_unreachable("Unsupported OpenCL C++ version");
+ llvm_unreachable("Unsupported C++ version for OpenCL");
Builder.defineMacro("__CL_CPP_VERSION_1_0__", "100");
} else {
// OpenCL v1.0 and v1.1 do not have a predefined macro to indicate the
@@ -541,6 +540,8 @@ static void InitializeCPlusPlusFeatureTestMacros(const LangOptions &LangOpts,
Builder.defineMacro("__cpp_template_template_args", "201611L");
// C++20 features.
+ if (LangOpts.CPlusPlus2a)
+ Builder.defineMacro("__cpp_conditional_explicit", "201806L");
if (LangOpts.Char8)
Builder.defineMacro("__cpp_char8_t", "201811L");
Builder.defineMacro("__cpp_impl_destroying_delete", "201806L");
@@ -548,7 +549,7 @@ static void InitializeCPlusPlusFeatureTestMacros(const LangOptions &LangOpts,
// TS features.
if (LangOpts.ConceptsTS)
Builder.defineMacro("__cpp_experimental_concepts", "1L");
- if (LangOpts.CoroutinesTS)
+ if (LangOpts.Coroutines)
Builder.defineMacro("__cpp_coroutines", "201703L");
}
@@ -603,10 +604,9 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
// Support for #pragma redefine_extname (Sun compatibility)
Builder.defineMacro("__PRAGMA_REDEFINE_EXTNAME", "1");
- // As sad as it is, enough software depends on the __VERSION__ for version
- // checks that it is necessary to report 4.2.1 (the base GCC version we claim
- // compatibility with) first.
- Builder.defineMacro("__VERSION__", "\"4.2.1 Compatible " +
+ // Previously this macro was set to a string aiming to achieve compatibility
+ // with GCC 4.2.1. Now, just return the full Clang version
+ Builder.defineMacro("__VERSION__", "\"" +
Twine(getClangFullCPPVersion()) + "\"");
// Initialize language-specific preprocessor defines.
@@ -831,7 +831,8 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
DefineFmt("__UINTPTR", TI.getUIntPtrType(), TI, Builder);
DefineTypeWidth("__UINTPTR_WIDTH__", TI.getUIntPtrType(), TI, Builder);
- DefineFloatMacros(Builder, "FLT16", &TI.getHalfFormat(), "F16");
+ if (TI.hasFloat16Type())
+ DefineFloatMacros(Builder, "FLT16", &TI.getHalfFormat(), "F16");
DefineFloatMacros(Builder, "FLT", &TI.getFloatFormat(), "F");
DefineFloatMacros(Builder, "DBL", &TI.getDoubleFormat(), "");
DefineFloatMacros(Builder, "LDBL", &TI.getLongDoubleFormat(), "L");
@@ -1057,16 +1058,20 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
Builder.defineMacro("__CLANG_CUDA_APPROX_TRANSCENDENTALS__");
}
+ // Define a macro indicating that the source file is being compiled with a
+ // SYCL device compiler which doesn't produce host binary.
+ if (LangOpts.SYCLIsDevice) {
+ Builder.defineMacro("__SYCL_DEVICE_ONLY__", "1");
+ }
+
// OpenCL definitions.
if (LangOpts.OpenCL) {
-#define OPENCLEXT(Ext) \
- if (TI.getSupportedOpenCLOpts().isSupported(#Ext, \
- LangOpts.OpenCLVersion)) \
- Builder.defineMacro(#Ext);
+#define OPENCLEXT(Ext) \
+ if (TI.getSupportedOpenCLOpts().isSupported(#Ext, LangOpts)) \
+ Builder.defineMacro(#Ext);
#include "clang/Basic/OpenCLExtensions.def"
- auto Arch = TI.getTriple().getArch();
- if (Arch == llvm::Triple::spir || Arch == llvm::Triple::spir64)
+ if (TI.getTriple().isSPIR())
Builder.defineMacro("__IMAGE_SUPPORT__");
}
diff --git a/lib/Frontend/InterfaceStubFunctionsConsumer.cpp b/lib/Frontend/InterfaceStubFunctionsConsumer.cpp
new file mode 100644
index 000000000000..fbba9ae4d6a7
--- /dev/null
+++ b/lib/Frontend/InterfaceStubFunctionsConsumer.cpp
@@ -0,0 +1,378 @@
+//===--- InterfaceStubFunctionsConsumer.cpp -------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/Mangle.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Frontend/FrontendActions.h"
+#include "clang/Sema/TemplateInstCallback.h"
+#include "llvm/BinaryFormat/ELF.h"
+
+using namespace clang;
+
+class InterfaceStubFunctionsConsumer : public ASTConsumer {
+ CompilerInstance &Instance;
+ StringRef InFile;
+ StringRef Format;
+ std::set<std::string> ParsedTemplates;
+
+ enum RootDeclOrigin { TopLevel = 0, FromTU = 1, IsLate = 2 };
+ struct MangledSymbol {
+ std::string ParentName;
+ uint8_t Type;
+ uint8_t Binding;
+ std::vector<std::string> Names;
+ MangledSymbol() = delete;
+
+ MangledSymbol(const std::string &ParentName, uint8_t Type, uint8_t Binding,
+ std::vector<std::string> Names)
+ : ParentName(ParentName), Type(Type), Binding(Binding), Names(Names) {}
+ };
+ using MangledSymbols = std::map<const NamedDecl *, MangledSymbol>;
+
+ bool WriteNamedDecl(const NamedDecl *ND, MangledSymbols &Symbols, int RDO) {
+ // Here we filter out anything that's not set to DefaultVisibility.
+ // DefaultVisibility is set on a decl when -fvisibility is not specified on
+ // the command line (or specified as default) and the decl does not have
+ // __attribute__((visibility("hidden"))) set or when the command line
+ // argument is set to hidden but the decl explicitly has
+ // __attribute__((visibility ("default"))) set. We do this so that the user
+ // can have fine grain control of what they want to expose in the stub.
+ auto isVisible = [](const NamedDecl *ND) -> bool {
+ return ND->getVisibility() == DefaultVisibility;
+ };
+
+ auto ignoreDecl = [this, isVisible](const NamedDecl *ND) -> bool {
+ if (!isVisible(ND))
+ return true;
+
+ if (const VarDecl *VD = dyn_cast<VarDecl>(ND))
+ if ((VD->getStorageClass() == StorageClass::SC_Extern) ||
+ (VD->getStorageClass() == StorageClass::SC_Static &&
+ VD->getParentFunctionOrMethod() == nullptr))
+ return true;
+
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) {
+ if (FD->isInlined() && !isa<CXXMethodDecl>(FD) &&
+ !Instance.getLangOpts().GNUInline)
+ return true;
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
+ if (const auto *RC = dyn_cast<CXXRecordDecl>(MD->getParent()))
+ if (isa<ClassTemplateDecl>(RC->getParent()) || !isVisible(RC))
+ return true;
+ if (MD->isDependentContext() || !MD->hasBody())
+ return true;
+ }
+ if (FD->getStorageClass() == StorageClass::SC_Static)
+ return true;
+ }
+ return false;
+ };
+
+ auto getParentFunctionDecl = [](const NamedDecl *ND) -> const NamedDecl * {
+ if (const VarDecl *VD = dyn_cast<VarDecl>(ND))
+ if (const auto *FD =
+ dyn_cast_or_null<FunctionDecl>(VD->getParentFunctionOrMethod()))
+ return FD;
+ return nullptr;
+ };
+
+ auto getMangledNames = [](const NamedDecl *ND) -> std::vector<std::string> {
+ if (!ND)
+ return {""};
+ ASTNameGenerator NameGen(ND->getASTContext());
+ std::vector<std::string> MangledNames = NameGen.getAllManglings(ND);
+ if (isa<CXXConstructorDecl>(ND) || isa<CXXDestructorDecl>(ND))
+ return MangledNames;
+#ifdef EXPENSIVE_CHECKS
+ assert(MangledNames.size() <= 1 && "Expected only one name mangling.");
+#endif
+ return {NameGen.getName(ND)};
+ };
+
+ if (!(RDO & FromTU))
+ return true;
+ if (Symbols.find(ND) != Symbols.end())
+ return true;
+ // - Currently have not figured out how to produce the names for FieldDecls.
+ // - Do not want to produce symbols for function paremeters.
+ if (isa<FieldDecl>(ND) || isa<ParmVarDecl>(ND))
+ return true;
+
+ const NamedDecl *ParentDecl = getParentFunctionDecl(ND);
+ if ((ParentDecl && ignoreDecl(ParentDecl)) || ignoreDecl(ND))
+ return true;
+
+ if (RDO & IsLate) {
+ Instance.getDiagnostics().Report(diag::err_asm_invalid_type_in_input)
+ << "Generating Interface Stubs is not supported with "
+ "delayed template parsing.";
+ } else {
+ if (const auto *FD = dyn_cast<FunctionDecl>(ND))
+ if (FD->isDependentContext())
+ return true;
+
+ const bool IsWeak = (ND->hasAttr<WeakAttr>() ||
+ ND->hasAttr<WeakRefAttr>() || ND->isWeakImported());
+
+ Symbols.insert(std::make_pair(
+ ND,
+ MangledSymbol(getMangledNames(ParentDecl).front(),
+ // Type:
+ isa<VarDecl>(ND) ? llvm::ELF::STT_OBJECT
+ : llvm::ELF::STT_FUNC,
+ // Binding:
+ IsWeak ? llvm::ELF::STB_WEAK : llvm::ELF::STB_GLOBAL,
+ getMangledNames(ND))));
+ }
+ return true;
+ }
+
+ void
+ HandleDecls(const llvm::iterator_range<DeclContext::decl_iterator> &Decls,
+ MangledSymbols &Symbols, int RDO) {
+ for (const auto *D : Decls)
+ HandleNamedDecl(dyn_cast<NamedDecl>(D), Symbols, RDO);
+ }
+
+ void HandleTemplateSpecializations(const FunctionTemplateDecl &FTD,
+ MangledSymbols &Symbols, int RDO) {
+ for (const auto *D : FTD.specializations())
+ HandleNamedDecl(dyn_cast<NamedDecl>(D), Symbols, RDO);
+ }
+
+ void HandleTemplateSpecializations(const ClassTemplateDecl &CTD,
+ MangledSymbols &Symbols, int RDO) {
+ for (const auto *D : CTD.specializations())
+ HandleNamedDecl(dyn_cast<NamedDecl>(D), Symbols, RDO);
+ }
+
+ bool HandleNamedDecl(const NamedDecl *ND, MangledSymbols &Symbols, int RDO) {
+ if (!ND)
+ return false;
+
+ switch (ND->getKind()) {
+ default:
+ break;
+ case Decl::Kind::Namespace:
+ HandleDecls(cast<NamespaceDecl>(ND)->decls(), Symbols, RDO);
+ return true;
+ case Decl::Kind::CXXRecord:
+ HandleDecls(cast<CXXRecordDecl>(ND)->decls(), Symbols, RDO);
+ return true;
+ case Decl::Kind::ClassTemplateSpecialization:
+ HandleDecls(cast<ClassTemplateSpecializationDecl>(ND)->decls(), Symbols,
+ RDO);
+ return true;
+ case Decl::Kind::ClassTemplate:
+ HandleTemplateSpecializations(*cast<ClassTemplateDecl>(ND), Symbols, RDO);
+ return true;
+ case Decl::Kind::FunctionTemplate:
+ HandleTemplateSpecializations(*cast<FunctionTemplateDecl>(ND), Symbols,
+ RDO);
+ return true;
+ case Decl::Kind::TemplateTypeParm:
+ return true;
+ case Decl::Kind::Var:
+ case Decl::Kind::ParmVar:
+ case Decl::Kind::CXXMethod:
+ case Decl::Kind::CXXConstructor:
+ case Decl::Kind::CXXDestructor:
+ case Decl::Kind::Function:
+ case Decl::Kind::Field:
+ if (WriteNamedDecl(ND, Symbols, RDO))
+ return true;
+ }
+
+ // While interface stubs are in the development stage, it's probably best to
+ // catch anything that's not a VarDecl or Template/FunctionDecl.
+ Instance.getDiagnostics().Report(diag::err_asm_invalid_type_in_input)
+ << "Expected a function or function template decl.";
+ return false;
+ }
+
+public:
+ InterfaceStubFunctionsConsumer(CompilerInstance &Instance, StringRef InFile,
+ StringRef Format)
+ : Instance(Instance), InFile(InFile), Format(Format) {}
+
+ void HandleTranslationUnit(ASTContext &context) override {
+ struct Visitor : public RecursiveASTVisitor<Visitor> {
+ bool VisitNamedDecl(NamedDecl *ND) {
+ if (const auto *FD = dyn_cast<FunctionDecl>(ND))
+ if (FD->isLateTemplateParsed()) {
+ LateParsedDecls.insert(FD);
+ return true;
+ }
+
+ if (const auto *VD = dyn_cast<ValueDecl>(ND)) {
+ ValueDecls.insert(VD);
+ return true;
+ }
+
+ NamedDecls.insert(ND);
+ return true;
+ }
+
+ std::set<const NamedDecl *> LateParsedDecls;
+ std::set<NamedDecl *> NamedDecls;
+ std::set<const ValueDecl *> ValueDecls;
+ } v;
+
+ v.TraverseDecl(context.getTranslationUnitDecl());
+
+ MangledSymbols Symbols;
+ auto OS = Instance.createDefaultOutputFile(/*Binary=*/false, InFile, "ifs");
+ if (!OS)
+ return;
+
+ if (Instance.getLangOpts().DelayedTemplateParsing) {
+ clang::Sema &S = Instance.getSema();
+ for (const auto *FD : v.LateParsedDecls) {
+ clang::LateParsedTemplate &LPT =
+ *S.LateParsedTemplateMap.find(cast<FunctionDecl>(FD))->second;
+ S.LateTemplateParser(S.OpaqueParser, LPT);
+ HandleNamedDecl(FD, Symbols, (FromTU | IsLate));
+ }
+ }
+
+ for (const NamedDecl *ND : v.ValueDecls)
+ HandleNamedDecl(ND, Symbols, FromTU);
+ for (const NamedDecl *ND : v.NamedDecls)
+ HandleNamedDecl(ND, Symbols, FromTU);
+
+ auto writeIfoYaml = [this](const llvm::Triple &T,
+ const MangledSymbols &Symbols,
+ const ASTContext &context, StringRef Format,
+ raw_ostream &OS) -> void {
+ OS << "--- !" << Format << "\n";
+ OS << "FileHeader:\n";
+ OS << " Class: ELFCLASS";
+ OS << (T.isArch64Bit() ? "64" : "32");
+ OS << "\n";
+ OS << " Data: ELFDATA2";
+ OS << (T.isLittleEndian() ? "LSB" : "MSB");
+ OS << "\n";
+ OS << " Type: ET_REL\n";
+ OS << " Machine: "
+ << llvm::StringSwitch<llvm::StringRef>(T.getArchName())
+ .Case("x86_64", "EM_X86_64")
+ .Case("i386", "EM_386")
+ .Case("i686", "EM_386")
+ .Case("aarch64", "EM_AARCH64")
+ .Case("amdgcn", "EM_AMDGPU")
+ .Case("r600", "EM_AMDGPU")
+ .Case("arm", "EM_ARM")
+ .Case("thumb", "EM_ARM")
+ .Case("avr", "EM_AVR")
+ .Case("mips", "EM_MIPS")
+ .Case("mipsel", "EM_MIPS")
+ .Case("mips64", "EM_MIPS")
+ .Case("mips64el", "EM_MIPS")
+ .Case("msp430", "EM_MSP430")
+ .Case("ppc", "EM_PPC")
+ .Case("ppc64", "EM_PPC64")
+ .Case("ppc64le", "EM_PPC64")
+ .Case("x86", T.isOSIAMCU() ? "EM_IAMCU" : "EM_386")
+ .Case("x86_64", "EM_X86_64")
+ .Default("EM_NONE")
+ << "\nSymbols:\n";
+ for (const auto &E : Symbols) {
+ const MangledSymbol &Symbol = E.second;
+ for (auto Name : Symbol.Names) {
+ OS << " - Name: "
+ << (Symbol.ParentName.empty() || Instance.getLangOpts().CPlusPlus
+ ? ""
+ : (Symbol.ParentName + "."))
+ << Name << "\n"
+ << " Type: STT_";
+ switch (Symbol.Type) {
+ default:
+ case llvm::ELF::STT_NOTYPE:
+ OS << "NOTYPE";
+ break;
+ case llvm::ELF::STT_OBJECT:
+ OS << "OBJECT";
+ break;
+ case llvm::ELF::STT_FUNC:
+ OS << "FUNC";
+ break;
+ }
+ OS << "\n Binding: STB_"
+ << ((Symbol.Binding == llvm::ELF::STB_WEAK) ? "WEAK" : "GLOBAL")
+ << "\n";
+ }
+ }
+ OS << "...\n";
+ OS.flush();
+ };
+
+ auto writeIfoElfAbiYaml =
+ [this](const llvm::Triple &T, const MangledSymbols &Symbols,
+ const ASTContext &context, StringRef Format,
+ raw_ostream &OS) -> void {
+ OS << "--- !" << Format << "\n";
+ OS << "TbeVersion: 1.0\n";
+ OS << "Arch: " << T.getArchName() << "\n";
+ OS << "Symbols:\n";
+ for (const auto &E : Symbols) {
+ const MangledSymbol &Symbol = E.second;
+ for (auto Name : Symbol.Names) {
+ OS << " "
+ << (Symbol.ParentName.empty() || Instance.getLangOpts().CPlusPlus
+ ? ""
+ : (Symbol.ParentName + "."))
+ << Name << ": { Type: ";
+ switch (Symbol.Type) {
+ default:
+ llvm_unreachable(
+ "clang -emit-iterface-stubs: Unexpected symbol type.");
+ case llvm::ELF::STT_NOTYPE:
+ OS << "NoType";
+ break;
+ case llvm::ELF::STT_OBJECT: {
+ auto VD = cast<ValueDecl>(E.first)->getType();
+ OS << "Object, Size: "
+ << context.getTypeSizeInChars(VD).getQuantity();
+ break;
+ }
+ case llvm::ELF::STT_FUNC:
+ OS << "Func";
+ break;
+ }
+ if (Symbol.Binding == llvm::ELF::STB_WEAK)
+ OS << ", Weak: true";
+ OS << " }\n";
+ }
+ }
+ OS << "...\n";
+ OS.flush();
+ };
+
+ if (Format == "experimental-yaml-elf-v1")
+ writeIfoYaml(Instance.getTarget().getTriple(), Symbols, context, Format,
+ *OS);
+ else
+ writeIfoElfAbiYaml(Instance.getTarget().getTriple(), Symbols, context,
+ Format, *OS);
+ }
+};
+
+std::unique_ptr<ASTConsumer>
+GenerateInterfaceYAMLExpV1Action::CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) {
+ return llvm::make_unique<InterfaceStubFunctionsConsumer>(
+ CI, InFile, "experimental-yaml-elf-v1");
+}
+
+std::unique_ptr<ASTConsumer>
+GenerateInterfaceTBEExpV1Action::CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) {
+ return llvm::make_unique<InterfaceStubFunctionsConsumer>(
+ CI, InFile, "experimental-tapi-elf-v1");
+}
diff --git a/lib/Frontend/LangStandards.cpp b/lib/Frontend/LangStandards.cpp
index 47023e58fa0b..05087eb41f01 100644
--- a/lib/Frontend/LangStandards.cpp
+++ b/lib/Frontend/LangStandards.cpp
@@ -1,9 +1,8 @@
//===--- LangStandards.cpp - Language Standard Definitions ----------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Frontend/LayoutOverrideSource.cpp b/lib/Frontend/LayoutOverrideSource.cpp
index b31fbd087ba7..76762d58fe25 100644
--- a/lib/Frontend/LayoutOverrideSource.cpp
+++ b/lib/Frontend/LayoutOverrideSource.cpp
@@ -1,9 +1,8 @@
//===--- LayoutOverrideSource.cpp --Override Record Layouts ---------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "clang/Frontend/LayoutOverrideSource.h"
diff --git a/lib/Frontend/LogDiagnosticPrinter.cpp b/lib/Frontend/LogDiagnosticPrinter.cpp
index 9998f65457cf..4bac17553999 100644
--- a/lib/Frontend/LogDiagnosticPrinter.cpp
+++ b/lib/Frontend/LogDiagnosticPrinter.cpp
@@ -1,9 +1,8 @@
//===--- LogDiagnosticPrinter.cpp - Log Diagnostic Printer ----------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Frontend/ModuleDependencyCollector.cpp b/lib/Frontend/ModuleDependencyCollector.cpp
index fa8efcc3b53c..c1d8c0d9eb24 100644
--- a/lib/Frontend/ModuleDependencyCollector.cpp
+++ b/lib/Frontend/ModuleDependencyCollector.cpp
@@ -1,9 +1,8 @@
//===--- ModuleDependencyCollector.cpp - Collect module dependencies ------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -99,24 +98,6 @@ struct ModuleDependencyMMCallbacks : public ModuleMapCallbacks {
}
-// TODO: move this to Support/Path.h and check for HAVE_REALPATH?
-static bool real_path(StringRef SrcPath, SmallVectorImpl<char> &RealPath) {
-#ifdef LLVM_ON_UNIX
- char CanonicalPath[PATH_MAX];
-
- // TODO: emit a warning in case this fails...?
- if (!realpath(SrcPath.str().c_str(), CanonicalPath))
- return false;
-
- SmallString<256> RPath(CanonicalPath);
- RealPath.swap(RPath);
- return true;
-#else
- // FIXME: Add support for systems without realpath.
- return false;
-#endif
-}
-
void ModuleDependencyCollector::attachToASTReader(ASTReader &R) {
R.addListener(llvm::make_unique<ModuleDependencyListener>(*this));
}
@@ -131,7 +112,7 @@ void ModuleDependencyCollector::attachToPreprocessor(Preprocessor &PP) {
static bool isCaseSensitivePath(StringRef Path) {
SmallString<256> TmpDest = Path, UpperDest, RealDest;
// Remove component traversals, links, etc.
- if (!real_path(Path, TmpDest))
+ if (llvm::sys::fs::real_path(Path, TmpDest))
return true; // Current default value in vfs.yaml
Path = TmpDest;
@@ -141,7 +122,7 @@ static bool isCaseSensitivePath(StringRef Path) {
// already expects when sensitivity isn't setup.
for (auto &C : Path)
UpperDest.push_back(toUppercase(C));
- if (real_path(UpperDest, RealDest) && Path.equals(RealDest))
+ if (!llvm::sys::fs::real_path(UpperDest, RealDest) && Path.equals(RealDest))
return false;
return true;
}
@@ -187,7 +168,7 @@ bool ModuleDependencyCollector::getRealPath(StringRef SrcPath,
// Computing the real path is expensive, cache the search through the
// parent path directory.
if (DirWithSymLink == SymLinkMap.end()) {
- if (!real_path(Dir, RealPath))
+ if (llvm::sys::fs::real_path(Dir, RealPath))
return false;
SymLinkMap[Dir] = RealPath.str();
} else {
diff --git a/lib/Frontend/MultiplexConsumer.cpp b/lib/Frontend/MultiplexConsumer.cpp
index c6e18d9cae21..ed7028769d34 100644
--- a/lib/Frontend/MultiplexConsumer.cpp
+++ b/lib/Frontend/MultiplexConsumer.cpp
@@ -1,9 +1,8 @@
//===- MultiplexConsumer.cpp - AST Consumer for PCH Generation --*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -104,6 +103,7 @@ public:
const ObjCInterfaceDecl *IFD) override;
void DeclarationMarkedUsed(const Decl *D) override;
void DeclarationMarkedOpenMPThreadPrivate(const Decl *D) override;
+ void DeclarationMarkedOpenMPAllocate(const Decl *D, const Attr *A) override;
void DeclarationMarkedOpenMPDeclareTarget(const Decl *D,
const Attr *Attr) override;
void RedefinedHiddenDefinition(const NamedDecl *D, Module *M) override;
@@ -209,6 +209,11 @@ void MultiplexASTMutationListener::DeclarationMarkedOpenMPThreadPrivate(
for (size_t i = 0, e = Listeners.size(); i != e; ++i)
Listeners[i]->DeclarationMarkedOpenMPThreadPrivate(D);
}
+void MultiplexASTMutationListener::DeclarationMarkedOpenMPAllocate(
+ const Decl *D, const Attr *A) {
+ for (ASTMutationListener *L : Listeners)
+ L->DeclarationMarkedOpenMPAllocate(D, A);
+}
void MultiplexASTMutationListener::DeclarationMarkedOpenMPDeclareTarget(
const Decl *D, const Attr *Attr) {
for (auto *L : Listeners)
diff --git a/lib/Frontend/PrecompiledPreamble.cpp b/lib/Frontend/PrecompiledPreamble.cpp
index 1930af187e7a..276a9676eaa9 100644
--- a/lib/Frontend/PrecompiledPreamble.cpp
+++ b/lib/Frontend/PrecompiledPreamble.cpp
@@ -1,9 +1,8 @@
//===--- PrecompiledPreamble.cpp - Build precompiled preambles --*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -19,6 +18,7 @@
#include "clang/Frontend/FrontendActions.h"
#include "clang/Frontend/FrontendOptions.h"
#include "clang/Lex/Lexer.h"
+#include "clang/Lex/Preprocessor.h"
#include "clang/Lex/PreprocessorOptions.h"
#include "clang/Serialization/ASTWriter.h"
#include "llvm/ADT/StringExtras.h"
@@ -157,9 +157,12 @@ private:
class PrecompilePreambleConsumer : public PCHGenerator {
public:
PrecompilePreambleConsumer(PrecompilePreambleAction &Action,
- const Preprocessor &PP, StringRef isysroot,
+ const Preprocessor &PP,
+ InMemoryModuleCache &ModuleCache,
+ StringRef isysroot,
std::unique_ptr<raw_ostream> Out)
- : PCHGenerator(PP, "", isysroot, std::make_shared<PCHBuffer>(),
+ : PCHGenerator(PP, ModuleCache, "", isysroot,
+ std::make_shared<PCHBuffer>(),
ArrayRef<std::shared_ptr<ModuleFileExtension>>(),
/*AllowASTWithErrors=*/true),
Action(Action), Out(std::move(Out)) {}
@@ -211,7 +214,7 @@ PrecompilePreambleAction::CreateASTConsumer(CompilerInstance &CI,
Sysroot.clear();
return llvm::make_unique<PrecompilePreambleConsumer>(
- *this, CI.getPreprocessor(), Sysroot, std::move(OS));
+ *this, CI.getPreprocessor(), CI.getModuleCache(), Sysroot, std::move(OS));
}
template <class T> bool moveOnNoError(llvm::ErrorOr<T> Val, T &Output) {
@@ -296,14 +299,13 @@ llvm::ErrorOr<PrecompiledPreamble> PrecompiledPreamble::Build(
// created. This complexity should be lifted elsewhere.
Clang->getTarget().adjust(Clang->getLangOpts());
- assert(Clang->getFrontendOpts().Inputs.size() == 1 &&
- "Invocation must have exactly one source file!");
- assert(Clang->getFrontendOpts().Inputs[0].getKind().getFormat() ==
- InputKind::Source &&
- "FIXME: AST inputs not yet supported here!");
- assert(Clang->getFrontendOpts().Inputs[0].getKind().getLanguage() !=
- InputKind::LLVM_IR &&
- "IR inputs not support here!");
+ if (Clang->getFrontendOpts().Inputs.size() != 1 ||
+ Clang->getFrontendOpts().Inputs[0].getKind().getFormat() !=
+ InputKind::Source ||
+ Clang->getFrontendOpts().Inputs[0].getKind().getLanguage() ==
+ InputKind::LLVM_IR) {
+ return BuildPreambleError::BadInputs;
+ }
// Clear out old caches and data.
Diagnostics.Reset();
@@ -347,8 +349,11 @@ llvm::ErrorOr<PrecompiledPreamble> PrecompiledPreamble::Build(
Callbacks.createPPCallbacks();
if (DelegatedPPCallbacks)
Clang->getPreprocessor().addPPCallbacks(std::move(DelegatedPPCallbacks));
+ if (auto CommentHandler = Callbacks.getCommentHandler())
+ Clang->getPreprocessor().addCommentHandler(CommentHandler);
- Act->Execute();
+ if (llvm::Error Err = Act->Execute())
+ return errorToErrorCode(std::move(Err));
// Run the callbacks.
Callbacks.AfterExecute(*Clang);
@@ -372,7 +377,7 @@ llvm::ErrorOr<PrecompiledPreamble> PrecompiledPreamble::Build(
PrecompiledPreamble::PreambleFileHash::createForFile(File->getSize(),
ModTime);
} else {
- llvm::MemoryBuffer *Buffer = SourceMgr.getMemoryBufferForFile(File);
+ const llvm::MemoryBuffer *Buffer = SourceMgr.getMemoryBufferForFile(File);
FilesInPreamble[File->getName()] =
PrecompiledPreamble::PreambleFileHash::createForMemoryBuffer(Buffer);
}
@@ -449,20 +454,33 @@ bool PrecompiledPreamble::CanReuse(const CompilerInvocation &Invocation,
Status.getSize(), llvm::sys::toTimeT(Status.getLastModificationTime()));
}
+ // OverridenFileBuffers tracks only the files not found in VFS.
+ llvm::StringMap<PreambleFileHash> OverridenFileBuffers;
for (const auto &RB : PreprocessorOpts.RemappedFileBuffers) {
- llvm::vfs::Status Status;
- if (!moveOnNoError(VFS->status(RB.first), Status))
- return false;
-
- OverriddenFiles[Status.getUniqueID()] =
+ const PrecompiledPreamble::PreambleFileHash PreambleHash =
PreambleFileHash::createForMemoryBuffer(RB.second);
+ llvm::vfs::Status Status;
+ if (moveOnNoError(VFS->status(RB.first), Status))
+ OverriddenFiles[Status.getUniqueID()] = PreambleHash;
+ else
+ OverridenFileBuffers[RB.first] = PreambleHash;
}
// Check whether anything has changed.
for (const auto &F : FilesInPreamble) {
+ auto OverridenFileBuffer = OverridenFileBuffers.find(F.first());
+ if (OverridenFileBuffer != OverridenFileBuffers.end()) {
+ // The file's buffer was remapped and the file was not found in VFS.
+ // Check whether it matches up with the previous mapping.
+ if (OverridenFileBuffer->second != F.second)
+ return false;
+ continue;
+ }
+
llvm::vfs::Status Status;
if (!moveOnNoError(VFS->status(F.first()), Status)) {
- // If we can't stat the file, assume that something horrible happened.
+ // If the file's buffer is not remapped and we can't stat it,
+ // assume that something horrible happened.
return false;
}
@@ -476,7 +494,8 @@ bool PrecompiledPreamble::CanReuse(const CompilerInvocation &Invocation,
continue;
}
- // The file was not remapped; check whether it has changed on disk.
+ // Neither the file's buffer nor the file itself was remapped;
+ // check whether it has changed on disk.
if (Status.getSize() != uint64_t(F.second.Size) ||
llvm::sys::toTimeT(Status.getLastModificationTime()) !=
F.second.ModTime)
@@ -743,6 +762,7 @@ void PreambleCallbacks::HandleTopLevelDecl(DeclGroupRef DG) {}
std::unique_ptr<PPCallbacks> PreambleCallbacks::createPPCallbacks() {
return nullptr;
}
+CommentHandler *PreambleCallbacks::getCommentHandler() { return nullptr; }
static llvm::ManagedStatic<BuildPreambleErrorCategory> BuildPreambleErrCategory;
@@ -764,6 +784,8 @@ std::string BuildPreambleErrorCategory::message(int condition) const {
return "BeginSourceFile() return an error";
case BuildPreambleError::CouldntEmitPCH:
return "Could not emit PCH";
+ case BuildPreambleError::BadInputs:
+ return "Command line arguments must contain exactly one source file";
}
llvm_unreachable("unexpected BuildPreambleError");
}
diff --git a/lib/Frontend/PrintPreprocessedOutput.cpp b/lib/Frontend/PrintPreprocessedOutput.cpp
index 3b835985a54c..732edacffbe3 100644
--- a/lib/Frontend/PrintPreprocessedOutput.cpp
+++ b/lib/Frontend/PrintPreprocessedOutput.cpp
@@ -1,9 +1,8 @@
//===--- PrintPreprocessedOutput.cpp - Implement the -E mode --------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -144,6 +143,8 @@ public:
ArrayRef<int> Ids) override;
void PragmaWarningPush(SourceLocation Loc, int Level) override;
void PragmaWarningPop(SourceLocation Loc) override;
+ void PragmaExecCharsetPush(SourceLocation Loc, StringRef Str) override;
+ void PragmaExecCharsetPop(SourceLocation Loc) override;
void PragmaAssumeNonNullBegin(SourceLocation Loc) override;
void PragmaAssumeNonNullEnd(SourceLocation Loc) override;
@@ -554,6 +555,24 @@ void PrintPPOutputPPCallbacks::PragmaWarningPop(SourceLocation Loc) {
setEmittedDirectiveOnThisLine();
}
+void PrintPPOutputPPCallbacks::PragmaExecCharsetPush(SourceLocation Loc,
+ StringRef Str) {
+ startNewLineIfNeeded();
+ MoveToLine(Loc);
+ OS << "#pragma character_execution_set(push";
+ if (!Str.empty())
+ OS << ", " << Str;
+ OS << ')';
+ setEmittedDirectiveOnThisLine();
+}
+
+void PrintPPOutputPPCallbacks::PragmaExecCharsetPop(SourceLocation Loc) {
+ startNewLineIfNeeded();
+ MoveToLine(Loc);
+ OS << "#pragma character_execution_set(pop)";
+ setEmittedDirectiveOnThisLine();
+}
+
void PrintPPOutputPPCallbacks::
PragmaAssumeNonNullBegin(SourceLocation Loc) {
startNewLineIfNeeded();
@@ -645,7 +664,7 @@ struct UnknownPragmaHandler : public PragmaHandler {
bool RequireTokenExpansion)
: Prefix(prefix), Callbacks(callbacks),
ShouldExpandTokens(RequireTokenExpansion) {}
- void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
Token &PragmaTok) override {
// Figure out what line we went to and insert the appropriate number of
// newline characters.
@@ -659,7 +678,8 @@ struct UnknownPragmaHandler : public PragmaHandler {
auto Toks = llvm::make_unique<Token[]>(1);
Toks[0] = PragmaTok;
PP.EnterTokenStream(std::move(Toks), /*NumToks=*/1,
- /*DisableMacroExpansion=*/false);
+ /*DisableMacroExpansion=*/false,
+ /*IsReinject=*/false);
PP.Lex(PragmaTok);
}
Token PrevToken;
@@ -750,6 +770,15 @@ static void PrintPreprocessedTokens(Preprocessor &PP, Token &Tok,
reinterpret_cast<Module *>(Tok.getAnnotationValue()));
PP.Lex(Tok);
continue;
+ } else if (Tok.is(tok::annot_header_unit)) {
+ // This is a header-name that has been (effectively) converted into a
+ // module-name.
+ // FIXME: The module name could contain non-identifier module name
+ // components. We don't have a good way to round-trip those.
+ Module *M = reinterpret_cast<Module *>(Tok.getAnnotationValue());
+ std::string Name = M->getFullModuleName();
+ OS.write(Name.data(), Name.size());
+ Callbacks->HandleNewlinesInToken(Name.data(), Name.size());
} else if (Tok.isAnnotation()) {
// Ignore annotation tokens created by pragmas - the pragmas themselves
// will be reproduced in the preprocessed output.
@@ -771,12 +800,12 @@ static void PrintPreprocessedTokens(Preprocessor &PP, Token &Tok,
Callbacks->HandleNewlinesInToken(TokPtr, Len);
} else {
std::string S = PP.getSpelling(Tok);
- OS.write(&S[0], S.size());
+ OS.write(S.data(), S.size());
// Tokens that can contain embedded newlines need to adjust our current
// line number.
if (Tok.getKind() == tok::comment || Tok.getKind() == tok::unknown)
- Callbacks->HandleNewlinesInToken(&S[0], S.size());
+ Callbacks->HandleNewlinesInToken(S.data(), S.size());
}
Callbacks->setEmittedTokensOnThisLine();
diff --git a/lib/Frontend/Rewrite/FixItRewriter.cpp b/lib/Frontend/Rewrite/FixItRewriter.cpp
index 1c2efe63aa19..667b9f0469f7 100644
--- a/lib/Frontend/Rewrite/FixItRewriter.cpp
+++ b/lib/Frontend/Rewrite/FixItRewriter.cpp
@@ -1,9 +1,8 @@
//===- FixItRewriter.cpp - Fix-It Rewriter Diagnostic Client --------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Frontend/Rewrite/FrontendActions.cpp b/lib/Frontend/Rewrite/FrontendActions.cpp
index bcf6d215c998..0f1a0584c72b 100644
--- a/lib/Frontend/Rewrite/FrontendActions.cpp
+++ b/lib/Frontend/Rewrite/FrontendActions.cpp
@@ -1,9 +1,8 @@
//===--- FrontendActions.cpp ----------------------------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -130,7 +129,11 @@ bool FixItRecompile::BeginInvocation(CompilerInstance &CI) {
FixItOpts->FixOnlyWarnings = FEOpts.FixOnlyWarnings;
FixItRewriter Rewriter(CI.getDiagnostics(), CI.getSourceManager(),
CI.getLangOpts(), FixItOpts.get());
- FixAction->Execute();
+ if (llvm::Error Err = FixAction->Execute()) {
+ // FIXME this drops the error on the floor.
+ consumeError(std::move(Err));
+ return false;
+ }
err = Rewriter.WriteFixedFiles(&RewrittenFiles);
@@ -238,7 +241,7 @@ public:
// Rewrite the contents of the module in a separate compiler instance.
CompilerInstance Instance(CI.getPCHContainerOperations(),
- &CI.getPreprocessor().getPCMCache());
+ &CI.getModuleCache());
Instance.setInvocation(
std::make_shared<CompilerInvocation>(CI.getInvocation()));
Instance.createDiagnostics(
diff --git a/lib/Frontend/Rewrite/HTMLPrint.cpp b/lib/Frontend/Rewrite/HTMLPrint.cpp
index 34ee9673cc54..a5b36bc7856c 100644
--- a/lib/Frontend/Rewrite/HTMLPrint.cpp
+++ b/lib/Frontend/Rewrite/HTMLPrint.cpp
@@ -1,9 +1,8 @@
//===--- HTMLPrint.cpp - Source code -> HTML pretty-printing --------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Frontend/Rewrite/InclusionRewriter.cpp b/lib/Frontend/Rewrite/InclusionRewriter.cpp
index 2e7baa3d9581..cb4e773aca87 100644
--- a/lib/Frontend/Rewrite/InclusionRewriter.cpp
+++ b/lib/Frontend/Rewrite/InclusionRewriter.cpp
@@ -1,9 +1,8 @@
//===--- InclusionRewriter.cpp - Rewrite includes into their expansions ---===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -415,7 +414,7 @@ bool InclusionRewriter::HandleHasInclude(
// FIXME: Why don't we call PP.LookupFile here?
const FileEntry *File = PP.getHeaderSearchInfo().LookupFile(
Filename, SourceLocation(), isAngled, Lookup, CurDir, Includers, nullptr,
- nullptr, nullptr, nullptr, nullptr);
+ nullptr, nullptr, nullptr, nullptr, nullptr);
FileExists = File != nullptr;
return true;
diff --git a/lib/Frontend/Rewrite/RewriteMacros.cpp b/lib/Frontend/Rewrite/RewriteMacros.cpp
index ae6b51bc814f..6b67ee638353 100644
--- a/lib/Frontend/Rewrite/RewriteMacros.cpp
+++ b/lib/Frontend/Rewrite/RewriteMacros.cpp
@@ -1,9 +1,8 @@
//===--- RewriteMacros.cpp - Rewrite macros into their expansions ---------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Frontend/Rewrite/RewriteModernObjC.cpp b/lib/Frontend/Rewrite/RewriteModernObjC.cpp
index 10ca9a785699..bd091ee03351 100644
--- a/lib/Frontend/Rewrite/RewriteModernObjC.cpp
+++ b/lib/Frontend/Rewrite/RewriteModernObjC.cpp
@@ -1,9 +1,8 @@
-//===--- RewriteObjC.cpp - Playground for the code rewriter ---------------===//
+//===-- RewriteModernObjC.cpp - Playground for the code rewriter ----------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -882,9 +881,8 @@ RewriteModernObjC::getIvarAccessString(ObjCIvarDecl *D) {
IvarT, nullptr,
/*BitWidth=*/nullptr, /*Mutable=*/true,
ICIS_NoInit);
- MemberExpr *ME = new (Context)
- MemberExpr(PE, true, SourceLocation(), FD, SourceLocation(),
- FD->getType(), VK_LValue, OK_Ordinary);
+ MemberExpr *ME = MemberExpr::CreateImplicit(
+ *Context, PE, true, FD, FD->getType(), VK_LValue, OK_Ordinary);
IvarT = Context->getDecltypeType(ME, ME->getType());
}
}
@@ -2430,7 +2428,7 @@ void RewriteModernObjC::SynthMsgSendFunctionDecl() {
assert(!argT.isNull() && "Can't find 'SEL' type");
ArgTys.push_back(argT);
QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(),
- ArgTys, /*isVariadic=*/true);
+ ArgTys, /*variadic=*/true);
MsgSendFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
SourceLocation(),
SourceLocation(),
@@ -2444,7 +2442,7 @@ void RewriteModernObjC::SynthMsgSendSuperFunctionDecl() {
SmallVector<QualType, 2> ArgTys;
ArgTys.push_back(Context->VoidTy);
QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(),
- ArgTys, /*isVariadic=*/true);
+ ArgTys, /*variadic=*/true);
MsgSendSuperFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
SourceLocation(),
SourceLocation(),
@@ -2463,7 +2461,7 @@ void RewriteModernObjC::SynthMsgSendStretFunctionDecl() {
assert(!argT.isNull() && "Can't find 'SEL' type");
ArgTys.push_back(argT);
QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(),
- ArgTys, /*isVariadic=*/true);
+ ArgTys, /*variadic=*/true);
MsgSendStretFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
SourceLocation(),
SourceLocation(),
@@ -2479,7 +2477,7 @@ void RewriteModernObjC::SynthMsgSendSuperStretFunctionDecl() {
SmallVector<QualType, 2> ArgTys;
ArgTys.push_back(Context->VoidTy);
QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(),
- ArgTys, /*isVariadic=*/true);
+ ArgTys, /*variadic=*/true);
MsgSendSuperStretFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
SourceLocation(),
SourceLocation(),
@@ -2499,7 +2497,7 @@ void RewriteModernObjC::SynthMsgSendFpretFunctionDecl() {
assert(!argT.isNull() && "Can't find 'SEL' type");
ArgTys.push_back(argT);
QualType msgSendType = getSimpleFunctionType(Context->DoubleTy,
- ArgTys, /*isVariadic=*/true);
+ ArgTys, /*variadic=*/true);
MsgSendFpretFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
SourceLocation(),
SourceLocation(),
@@ -2737,9 +2735,9 @@ Stmt *RewriteModernObjC::RewriteObjCArrayLiteralExpr(ObjCArrayLiteral *Exp) {
Context->getPointerType(Context->VoidPtrTy),
nullptr, /*BitWidth=*/nullptr,
/*Mutable=*/true, ICIS_NoInit);
- MemberExpr *ArrayLiteralME = new (Context)
- MemberExpr(NSArrayCallExpr, false, SourceLocation(), ARRFD,
- SourceLocation(), ARRFD->getType(), VK_LValue, OK_Ordinary);
+ MemberExpr *ArrayLiteralME =
+ MemberExpr::CreateImplicit(*Context, NSArrayCallExpr, false, ARRFD,
+ ARRFD->getType(), VK_LValue, OK_Ordinary);
QualType ConstIdT = Context->getObjCIdType().withConst();
CStyleCastExpr * ArrayLiteralObjects =
NoTypeInfoCStyleCastExpr(Context,
@@ -2866,9 +2864,9 @@ Stmt *RewriteModernObjC::RewriteObjCDictionaryLiteralExpr(ObjCDictionaryLiteral
Context->getPointerType(Context->VoidPtrTy),
nullptr, /*BitWidth=*/nullptr,
/*Mutable=*/true, ICIS_NoInit);
- MemberExpr *DictLiteralValueME = new (Context)
- MemberExpr(NSValueCallExpr, false, SourceLocation(), ARRFD,
- SourceLocation(), ARRFD->getType(), VK_LValue, OK_Ordinary);
+ MemberExpr *DictLiteralValueME =
+ MemberExpr::CreateImplicit(*Context, NSValueCallExpr, false, ARRFD,
+ ARRFD->getType(), VK_LValue, OK_Ordinary);
QualType ConstIdT = Context->getObjCIdType().withConst();
CStyleCastExpr * DictValueObjects =
NoTypeInfoCStyleCastExpr(Context,
@@ -2879,9 +2877,9 @@ Stmt *RewriteModernObjC::RewriteObjCDictionaryLiteralExpr(ObjCDictionaryLiteral
Expr *NSKeyCallExpr = CallExpr::Create(
*Context, NSDictDRE, KeyExprs, NSDictFType, VK_LValue, SourceLocation());
- MemberExpr *DictLiteralKeyME = new (Context)
- MemberExpr(NSKeyCallExpr, false, SourceLocation(), ARRFD,
- SourceLocation(), ARRFD->getType(), VK_LValue, OK_Ordinary);
+ MemberExpr *DictLiteralKeyME =
+ MemberExpr::CreateImplicit(*Context, NSKeyCallExpr, false, ARRFD,
+ ARRFD->getType(), VK_LValue, OK_Ordinary);
CStyleCastExpr * DictKeyObjects =
NoTypeInfoCStyleCastExpr(Context,
@@ -3181,9 +3179,8 @@ Expr *RewriteModernObjC::SynthMsgSendStretCallExpr(FunctionDecl *MsgSendStretFla
returnType, nullptr,
/*BitWidth=*/nullptr,
/*Mutable=*/true, ICIS_NoInit);
- MemberExpr *ME = new (Context)
- MemberExpr(STCE, false, SourceLocation(), FieldD, SourceLocation(),
- FieldD->getType(), VK_LValue, OK_Ordinary);
+ MemberExpr *ME = MemberExpr::CreateImplicit(
+ *Context, STCE, false, FieldD, FieldD->getType(), VK_LValue, OK_Ordinary);
return ME;
}
@@ -4630,9 +4627,8 @@ Stmt *RewriteModernObjC::SynthesizeBlockCall(CallExpr *Exp, const Expr *BlockExp
Context->VoidPtrTy, nullptr,
/*BitWidth=*/nullptr, /*Mutable=*/true,
ICIS_NoInit);
- MemberExpr *ME =
- new (Context) MemberExpr(PE, true, SourceLocation(), FD, SourceLocation(),
- FD->getType(), VK_LValue, OK_Ordinary);
+ MemberExpr *ME = MemberExpr::CreateImplicit(
+ *Context, PE, true, FD, FD->getType(), VK_LValue, OK_Ordinary);
CastExpr *FunkCast = NoTypeInfoCStyleCastExpr(Context, PtrToFuncCastType,
CK_BitCast, ME);
@@ -4677,9 +4673,8 @@ Stmt *RewriteModernObjC::RewriteBlockDeclRefExpr(DeclRefExpr *DeclRefExp) {
Context->VoidPtrTy, nullptr,
/*BitWidth=*/nullptr, /*Mutable=*/true,
ICIS_NoInit);
- MemberExpr *ME = new (Context)
- MemberExpr(DeclRefExp, isArrow, SourceLocation(), FD, SourceLocation(),
- FD->getType(), VK_LValue, OK_Ordinary);
+ MemberExpr *ME = MemberExpr::CreateImplicit(
+ *Context, DeclRefExp, isArrow, FD, FD->getType(), VK_LValue, OK_Ordinary);
StringRef Name = VD->getName();
FD = FieldDecl::Create(*Context, nullptr, SourceLocation(), SourceLocation(),
@@ -4687,9 +4682,8 @@ Stmt *RewriteModernObjC::RewriteBlockDeclRefExpr(DeclRefExpr *DeclRefExp) {
Context->VoidPtrTy, nullptr,
/*BitWidth=*/nullptr, /*Mutable=*/true,
ICIS_NoInit);
- ME =
- new (Context) MemberExpr(ME, true, SourceLocation(), FD, SourceLocation(),
- DeclRefExp->getType(), VK_LValue, OK_Ordinary);
+ ME = MemberExpr::CreateImplicit(*Context, ME, true, FD, DeclRefExp->getType(),
+ VK_LValue, OK_Ordinary);
// Need parens to enforce precedence.
ParenExpr *PE = new (Context) ParenExpr(DeclRefExp->getExprLoc(),
@@ -7529,9 +7523,8 @@ Stmt *RewriteModernObjC::RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV) {
IvarT, nullptr,
/*BitWidth=*/nullptr,
/*Mutable=*/true, ICIS_NoInit);
- MemberExpr *ME = new (Context)
- MemberExpr(PE, true, SourceLocation(), FD, SourceLocation(),
- FD->getType(), VK_LValue, OK_Ordinary);
+ MemberExpr *ME = MemberExpr::CreateImplicit(
+ *Context, PE, true, FD, FD->getType(), VK_LValue, OK_Ordinary);
IvarT = Context->getDecltypeType(ME, ME->getType());
}
}
@@ -7558,9 +7551,9 @@ Stmt *RewriteModernObjC::RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV) {
D->getType(), nullptr,
/*BitWidth=*/D->getBitWidth(),
/*Mutable=*/true, ICIS_NoInit);
- MemberExpr *ME = new (Context)
- MemberExpr(PE, /*isArrow*/ false, SourceLocation(), FD,
- SourceLocation(), FD->getType(), VK_LValue, OK_Ordinary);
+ MemberExpr *ME =
+ MemberExpr::CreateImplicit(*Context, PE, /*isArrow*/ false, FD,
+ FD->getType(), VK_LValue, OK_Ordinary);
Replacement = ME;
}
diff --git a/lib/Frontend/Rewrite/RewriteObjC.cpp b/lib/Frontend/Rewrite/RewriteObjC.cpp
index 3e018800b909..05078baee790 100644
--- a/lib/Frontend/Rewrite/RewriteObjC.cpp
+++ b/lib/Frontend/Rewrite/RewriteObjC.cpp
@@ -1,9 +1,8 @@
//===--- RewriteObjC.cpp - Playground for the code rewriter ---------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -2336,7 +2335,7 @@ void RewriteObjC::SynthMsgSendFunctionDecl() {
assert(!argT.isNull() && "Can't find 'SEL' type");
ArgTys.push_back(argT);
QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(),
- ArgTys, /*isVariadic=*/true);
+ ArgTys, /*variadic=*/true);
MsgSendFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
SourceLocation(),
SourceLocation(),
@@ -2358,7 +2357,7 @@ void RewriteObjC::SynthMsgSendSuperFunctionDecl() {
assert(!argT.isNull() && "Can't find 'SEL' type");
ArgTys.push_back(argT);
QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(),
- ArgTys, /*isVariadic=*/true);
+ ArgTys, /*variadic=*/true);
MsgSendSuperFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
SourceLocation(),
SourceLocation(),
@@ -2377,7 +2376,7 @@ void RewriteObjC::SynthMsgSendStretFunctionDecl() {
assert(!argT.isNull() && "Can't find 'SEL' type");
ArgTys.push_back(argT);
QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(),
- ArgTys, /*isVariadic=*/true);
+ ArgTys, /*variadic=*/true);
MsgSendStretFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
SourceLocation(),
SourceLocation(),
@@ -2401,7 +2400,7 @@ void RewriteObjC::SynthMsgSendSuperStretFunctionDecl() {
assert(!argT.isNull() && "Can't find 'SEL' type");
ArgTys.push_back(argT);
QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(),
- ArgTys, /*isVariadic=*/true);
+ ArgTys, /*variadic=*/true);
MsgSendSuperStretFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
SourceLocation(),
SourceLocation(),
@@ -2421,7 +2420,7 @@ void RewriteObjC::SynthMsgSendFpretFunctionDecl() {
assert(!argT.isNull() && "Can't find 'SEL' type");
ArgTys.push_back(argT);
QualType msgSendType = getSimpleFunctionType(Context->DoubleTy,
- ArgTys, /*isVariadic=*/true);
+ ArgTys, /*variadic=*/true);
MsgSendFpretFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
SourceLocation(),
SourceLocation(),
@@ -3794,9 +3793,8 @@ Stmt *RewriteObjC::SynthesizeBlockCall(CallExpr *Exp, const Expr *BlockExp) {
Context->VoidPtrTy, nullptr,
/*BitWidth=*/nullptr, /*Mutable=*/true,
ICIS_NoInit);
- MemberExpr *ME =
- new (Context) MemberExpr(PE, true, SourceLocation(), FD, SourceLocation(),
- FD->getType(), VK_LValue, OK_Ordinary);
+ MemberExpr *ME = MemberExpr::CreateImplicit(
+ *Context, PE, true, FD, FD->getType(), VK_LValue, OK_Ordinary);
CastExpr *FunkCast = NoTypeInfoCStyleCastExpr(Context, PtrToFuncCastType,
CK_BitCast, ME);
@@ -3841,9 +3839,9 @@ Stmt *RewriteObjC::RewriteBlockDeclRefExpr(DeclRefExpr *DeclRefExp) {
Context->VoidPtrTy, nullptr,
/*BitWidth=*/nullptr, /*Mutable=*/true,
ICIS_NoInit);
- MemberExpr *ME = new (Context)
- MemberExpr(DeclRefExp, isArrow, SourceLocation(), FD, SourceLocation(),
- FD->getType(), VK_LValue, OK_Ordinary);
+ MemberExpr *ME =
+ MemberExpr::CreateImplicit(*Context, DeclRefExp, isArrow, FD,
+ FD->getType(), VK_LValue, OK_Ordinary);
StringRef Name = VD->getName();
FD = FieldDecl::Create(*Context, nullptr, SourceLocation(), SourceLocation(),
@@ -3851,9 +3849,8 @@ Stmt *RewriteObjC::RewriteBlockDeclRefExpr(DeclRefExpr *DeclRefExp) {
Context->VoidPtrTy, nullptr,
/*BitWidth=*/nullptr, /*Mutable=*/true,
ICIS_NoInit);
- ME =
- new (Context) MemberExpr(ME, true, SourceLocation(), FD, SourceLocation(),
- DeclRefExp->getType(), VK_LValue, OK_Ordinary);
+ ME = MemberExpr::CreateImplicit(*Context, ME, true, FD, DeclRefExp->getType(),
+ VK_LValue, OK_Ordinary);
// Need parens to enforce precedence.
ParenExpr *PE = new (Context) ParenExpr(DeclRefExp->getExprLoc(),
@@ -5831,10 +5828,10 @@ Stmt *RewriteObjCFragileABI::RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV) {
OldRange.getEnd(),
castExpr);
if (IV->isFreeIvar() &&
- declaresSameEntity(CurMethodDef->getClassInterface(), iFaceDecl->getDecl())) {
- MemberExpr *ME = new (Context)
- MemberExpr(PE, true, SourceLocation(), D, IV->getLocation(),
- D->getType(), VK_LValue, OK_Ordinary);
+ declaresSameEntity(CurMethodDef->getClassInterface(),
+ iFaceDecl->getDecl())) {
+ MemberExpr *ME = MemberExpr::CreateImplicit(
+ *Context, PE, true, D, D->getType(), VK_LValue, OK_Ordinary);
Replacement = ME;
} else {
IV->setBase(PE);
diff --git a/lib/Frontend/Rewrite/RewriteTest.cpp b/lib/Frontend/Rewrite/RewriteTest.cpp
index b0791f4cddd7..fa8232c8c3c9 100644
--- a/lib/Frontend/Rewrite/RewriteTest.cpp
+++ b/lib/Frontend/Rewrite/RewriteTest.cpp
@@ -1,9 +1,8 @@
//===--- RewriteTest.cpp - Rewriter playground ----------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Frontend/SerializedDiagnosticPrinter.cpp b/lib/Frontend/SerializedDiagnosticPrinter.cpp
index 22546ce4c097..c1434a95cc70 100644
--- a/lib/Frontend/SerializedDiagnosticPrinter.cpp
+++ b/lib/Frontend/SerializedDiagnosticPrinter.cpp
@@ -1,9 +1,8 @@
//===--- SerializedDiagnosticPrinter.cpp - Serializer for diagnostics -----===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -21,6 +20,8 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/Bitstream/BitCodes.h"
+#include "llvm/Bitstream/BitstreamReader.h"
#include "llvm/Support/raw_ostream.h"
#include <utility>
diff --git a/lib/Frontend/SerializedDiagnosticReader.cpp b/lib/Frontend/SerializedDiagnosticReader.cpp
index 458717819c41..eca6f5ee1803 100644
--- a/lib/Frontend/SerializedDiagnosticReader.cpp
+++ b/lib/Frontend/SerializedDiagnosticReader.cpp
@@ -1,9 +1,8 @@
//===- SerializedDiagnosticReader.cpp - Reads diagnostics -----------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -14,8 +13,8 @@
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/Bitcode/BitCodes.h"
-#include "llvm/Bitcode/BitstreamReader.h"
+#include "llvm/Bitstream/BitCodes.h"
+#include "llvm/Bitstream/BitstreamReader.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/ErrorOr.h"
@@ -42,21 +41,47 @@ std::error_code SerializedDiagnosticReader::readDiagnostics(StringRef File) {
return SDError::InvalidSignature;
// Sniff for the signature.
- if (Stream.Read(8) != 'D' ||
- Stream.Read(8) != 'I' ||
- Stream.Read(8) != 'A' ||
- Stream.Read(8) != 'G')
+ for (unsigned char C : {'D', 'I', 'A', 'G'}) {
+ if (Expected<llvm::SimpleBitstreamCursor::word_t> Res = Stream.Read(8)) {
+ if (Res.get() == C)
+ continue;
+ } else {
+ // FIXME this drops the error on the floor.
+ consumeError(Res.takeError());
+ }
return SDError::InvalidSignature;
+ }
// Read the top level blocks.
while (!Stream.AtEndOfStream()) {
- if (Stream.ReadCode() != llvm::bitc::ENTER_SUBBLOCK)
+ if (Expected<unsigned> Res = Stream.ReadCode()) {
+ if (Res.get() != llvm::bitc::ENTER_SUBBLOCK)
+ return SDError::InvalidDiagnostics;
+ } else {
+ // FIXME this drops the error on the floor.
+ consumeError(Res.takeError());
return SDError::InvalidDiagnostics;
+ }
std::error_code EC;
- switch (Stream.ReadSubBlockID()) {
- case llvm::bitc::BLOCKINFO_BLOCK_ID:
- BlockInfo = Stream.ReadBlockInfoBlock();
+ Expected<unsigned> MaybeSubBlockID = Stream.ReadSubBlockID();
+ if (!MaybeSubBlockID) {
+ // FIXME this drops the error on the floor.
+ consumeError(MaybeSubBlockID.takeError());
+ return SDError::InvalidDiagnostics;
+ }
+
+ switch (MaybeSubBlockID.get()) {
+ case llvm::bitc::BLOCKINFO_BLOCK_ID: {
+ Expected<Optional<llvm::BitstreamBlockInfo>> MaybeBlockInfo =
+ Stream.ReadBlockInfoBlock();
+ if (!MaybeBlockInfo) {
+ // FIXME this drops the error on the floor.
+ consumeError(MaybeBlockInfo.takeError());
+ return SDError::InvalidDiagnostics;
+ }
+ BlockInfo = std::move(MaybeBlockInfo.get());
+ }
if (!BlockInfo)
return SDError::MalformedBlockInfoBlock;
Stream.setBlockInfo(&*BlockInfo);
@@ -70,8 +95,11 @@ std::error_code SerializedDiagnosticReader::readDiagnostics(StringRef File) {
return EC;
continue;
default:
- if (!Stream.SkipBlock())
+ if (llvm::Error Err = Stream.SkipBlock()) {
+ // FIXME this drops the error on the floor.
+ consumeError(std::move(Err));
return SDError::MalformedTopLevelBlock;
+ }
continue;
}
}
@@ -90,11 +118,23 @@ SerializedDiagnosticReader::skipUntilRecordOrBlock(
BlockOrRecordID = 0;
while (!Stream.AtEndOfStream()) {
- unsigned Code = Stream.ReadCode();
+ unsigned Code;
+ if (Expected<unsigned> Res = Stream.ReadCode())
+ Code = Res.get();
+ else
+ return llvm::errorToErrorCode(Res.takeError());
- switch ((llvm::bitc::FixedAbbrevIDs)Code) {
+ if (Code >= static_cast<unsigned>(llvm::bitc::FIRST_APPLICATION_ABBREV)) {
+ // We found a record.
+ BlockOrRecordID = Code;
+ return Cursor::Record;
+ }
+ switch (static_cast<llvm::bitc::FixedAbbrevIDs>(Code)) {
case llvm::bitc::ENTER_SUBBLOCK:
- BlockOrRecordID = Stream.ReadSubBlockID();
+ if (Expected<unsigned> Res = Stream.ReadSubBlockID())
+ BlockOrRecordID = Res.get();
+ else
+ return llvm::errorToErrorCode(Res.takeError());
return Cursor::BlockBegin;
case llvm::bitc::END_BLOCK:
@@ -103,16 +143,15 @@ SerializedDiagnosticReader::skipUntilRecordOrBlock(
return Cursor::BlockEnd;
case llvm::bitc::DEFINE_ABBREV:
- Stream.ReadAbbrevRecord();
+ if (llvm::Error Err = Stream.ReadAbbrevRecord())
+ return llvm::errorToErrorCode(std::move(Err));
continue;
case llvm::bitc::UNABBREV_RECORD:
return SDError::UnsupportedConstruct;
- default:
- // We found a record.
- BlockOrRecordID = Code;
- return Cursor::Record;
+ case llvm::bitc::FIRST_APPLICATION_ABBREV:
+ llvm_unreachable("Unexpected abbrev id.");
}
}
@@ -121,8 +160,12 @@ SerializedDiagnosticReader::skipUntilRecordOrBlock(
std::error_code
SerializedDiagnosticReader::readMetaBlock(llvm::BitstreamCursor &Stream) {
- if (Stream.EnterSubBlock(clang::serialized_diags::BLOCK_META))
+ if (llvm::Error Err =
+ Stream.EnterSubBlock(clang::serialized_diags::BLOCK_META)) {
+ // FIXME this drops the error on the floor.
+ consumeError(std::move(Err));
return SDError::MalformedMetadataBlock;
+ }
bool VersionChecked = false;
@@ -136,8 +179,11 @@ SerializedDiagnosticReader::readMetaBlock(llvm::BitstreamCursor &Stream) {
case Cursor::Record:
break;
case Cursor::BlockBegin:
- if (Stream.SkipBlock())
+ if (llvm::Error Err = Stream.SkipBlock()) {
+ // FIXME this drops the error on the floor.
+ consumeError(std::move(Err));
return SDError::MalformedMetadataBlock;
+ }
LLVM_FALLTHROUGH;
case Cursor::BlockEnd:
if (!VersionChecked)
@@ -146,7 +192,10 @@ SerializedDiagnosticReader::readMetaBlock(llvm::BitstreamCursor &Stream) {
}
SmallVector<uint64_t, 1> Record;
- unsigned RecordID = Stream.readRecord(BlockOrCode, Record);
+ Expected<unsigned> MaybeRecordID = Stream.readRecord(BlockOrCode, Record);
+ if (!MaybeRecordID)
+ return errorToErrorCode(MaybeRecordID.takeError());
+ unsigned RecordID = MaybeRecordID.get();
if (RecordID == RECORD_VERSION) {
if (Record.size() < 1)
@@ -160,8 +209,12 @@ SerializedDiagnosticReader::readMetaBlock(llvm::BitstreamCursor &Stream) {
std::error_code
SerializedDiagnosticReader::readDiagnosticBlock(llvm::BitstreamCursor &Stream) {
- if (Stream.EnterSubBlock(clang::serialized_diags::BLOCK_DIAG))
+ if (llvm::Error Err =
+ Stream.EnterSubBlock(clang::serialized_diags::BLOCK_DIAG)) {
+ // FIXME this drops the error on the floor.
+ consumeError(std::move(Err));
return SDError::MalformedDiagnosticBlock;
+ }
std::error_code EC;
if ((EC = visitStartOfDiagnostic()))
@@ -180,8 +233,11 @@ SerializedDiagnosticReader::readDiagnosticBlock(llvm::BitstreamCursor &Stream) {
if (BlockOrCode == serialized_diags::BLOCK_DIAG) {
if ((EC = readDiagnosticBlock(Stream)))
return EC;
- } else if (!Stream.SkipBlock())
+ } else if (llvm::Error Err = Stream.SkipBlock()) {
+ // FIXME this drops the error on the floor.
+ consumeError(std::move(Err));
return SDError::MalformedSubBlock;
+ }
continue;
case Cursor::BlockEnd:
if ((EC = visitEndOfDiagnostic()))
@@ -194,7 +250,11 @@ SerializedDiagnosticReader::readDiagnosticBlock(llvm::BitstreamCursor &Stream) {
// Read the record.
Record.clear();
StringRef Blob;
- unsigned RecID = Stream.readRecord(BlockOrCode, Record, &Blob);
+ Expected<unsigned> MaybeRecID =
+ Stream.readRecord(BlockOrCode, Record, &Blob);
+ if (!MaybeRecID)
+ return errorToErrorCode(MaybeRecID.takeError());
+ unsigned RecID = MaybeRecID.get();
if (RecID < serialized_diags::RECORD_FIRST ||
RecID > serialized_diags::RECORD_LAST)
diff --git a/lib/Frontend/TestModuleFileExtension.cpp b/lib/Frontend/TestModuleFileExtension.cpp
index 087bdc543548..354aa7f5cd3f 100644
--- a/lib/Frontend/TestModuleFileExtension.cpp
+++ b/lib/Frontend/TestModuleFileExtension.cpp
@@ -1,16 +1,15 @@
//===-- TestModuleFileExtension.cpp - Module Extension Tester -------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "TestModuleFileExtension.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/Serialization/ASTReader.h"
#include "llvm/ADT/Hashing.h"
-#include "llvm/Bitcode/BitstreamWriter.h"
+#include "llvm/Bitstream/BitstreamWriter.h"
#include "llvm/Support/raw_ostream.h"
#include <cstdio>
using namespace clang;
@@ -49,7 +48,12 @@ TestModuleFileExtension::Reader::Reader(ModuleFileExtension *Ext,
// Read the extension block.
SmallVector<uint64_t, 4> Record;
while (true) {
- llvm::BitstreamEntry Entry = Stream.advanceSkippingSubblocks();
+ llvm::Expected<llvm::BitstreamEntry> MaybeEntry =
+ Stream.advanceSkippingSubblocks();
+ if (!MaybeEntry)
+ (void)MaybeEntry.takeError();
+ llvm::BitstreamEntry Entry = MaybeEntry.get();
+
switch (Entry.Kind) {
case llvm::BitstreamEntry::SubBlock:
case llvm::BitstreamEntry::EndBlock:
@@ -62,8 +66,12 @@ TestModuleFileExtension::Reader::Reader(ModuleFileExtension *Ext,
Record.clear();
StringRef Blob;
- unsigned RecCode = Stream.readRecord(Entry.ID, Record, &Blob);
- switch (RecCode) {
+ Expected<unsigned> MaybeRecCode =
+ Stream.readRecord(Entry.ID, Record, &Blob);
+ if (!MaybeRecCode)
+ fprintf(stderr, "Failed reading rec code: %s\n",
+ toString(MaybeRecCode.takeError()).c_str());
+ switch (MaybeRecCode.get()) {
case FIRST_EXTENSION_RECORD_ID: {
StringRef Message = Blob.substr(0, Record[0]);
fprintf(stderr, "Read extension block message: %s\n",
diff --git a/lib/Frontend/TestModuleFileExtension.h b/lib/Frontend/TestModuleFileExtension.h
index 41f3ca9f05fc..13e090783b11 100644
--- a/lib/Frontend/TestModuleFileExtension.h
+++ b/lib/Frontend/TestModuleFileExtension.h
@@ -1,9 +1,8 @@
//===-- TestModuleFileExtension.h - Module Extension Tester -----*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_FRONTEND_TESTMODULEFILEEXTENSION_H
@@ -12,7 +11,7 @@
#include "clang/Serialization/ModuleFileExtension.h"
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/Bitcode/BitstreamReader.h"
+#include "llvm/Bitstream/BitstreamReader.h"
#include <string>
namespace clang {
diff --git a/lib/Frontend/TextDiagnostic.cpp b/lib/Frontend/TextDiagnostic.cpp
index 35b99b10f94a..d0c91286250e 100644
--- a/lib/Frontend/TextDiagnostic.cpp
+++ b/lib/Frontend/TextDiagnostic.cpp
@@ -1,9 +1,8 @@
//===--- TextDiagnostic.cpp - Text Diagnostic Pretty-Printing -------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -334,8 +333,7 @@ static void selectInterestingSourceRegion(std::string &SourceLine,
// No special characters are allowed in CaretLine.
assert(CaretLine.end() ==
- std::find_if(CaretLine.begin(), CaretLine.end(),
- [](char c) { return c < ' ' || '~' < c; }));
+ llvm::find_if(CaretLine, [](char c) { return c < ' ' || '~' < c; }));
// Find the slice that we need to display the full caret line
// correctly.
@@ -767,7 +765,28 @@ void TextDiagnostic::emitFilename(StringRef Filename, const SourceManager &SM) {
const DirectoryEntry *Dir = SM.getFileManager().getDirectory(
llvm::sys::path::parent_path(Filename));
if (Dir) {
+ // We want to print a simplified absolute path, i. e. without "dots".
+ //
+ // The hardest part here are the paths like "<part1>/<link>/../<part2>".
+ // On Unix-like systems, we cannot just collapse "<link>/..", because
+ // paths are resolved sequentially, and, thereby, the path
+ // "<part1>/<part2>" may point to a different location. That is why
+ // we use FileManager::getCanonicalName(), which expands all indirections
+ // with llvm::sys::fs::real_path() and caches the result.
+ //
+ // On the other hand, it would be better to preserve as much of the
+ // original path as possible, because that helps a user to recognize it.
+ // real_path() expands all links, which sometimes too much. Luckily,
+ // on Windows we can just use llvm::sys::path::remove_dots(), because,
+ // on that system, both aforementioned paths point to the same place.
+#ifdef _WIN32
+ SmallString<4096> DirName = Dir->getName();
+ llvm::sys::fs::make_absolute(DirName);
+ llvm::sys::path::native(DirName);
+ llvm::sys::path::remove_dots(DirName, /* remove_dot_dot */ true);
+#else
StringRef DirName = SM.getFileManager().getCanonicalName(Dir);
+#endif
llvm::sys::path::append(AbsoluteFilename, DirName,
llvm::sys::path::filename(Filename));
Filename = StringRef(AbsoluteFilename.data(), AbsoluteFilename.size());
@@ -793,8 +812,6 @@ void TextDiagnostic::emitDiagnosticLoc(FullSourceLoc Loc, PresumedLoc PLoc,
const FileEntry *FE = Loc.getFileEntry();
if (FE && FE->isValid()) {
emitFilename(FE->getName(), Loc.getManager());
- if (FE->isInPCH())
- OS << " (in PCH)";
OS << ": ";
}
}
@@ -838,7 +855,7 @@ void TextDiagnostic::emitDiagnosticLoc(FullSourceLoc Loc, PresumedLoc PLoc,
if (LangOpts.MSCompatibilityVersion &&
!LangOpts.isCompatibleWithMSVC(LangOptions::MSVC2015))
OS << ' ';
- OS << ": ";
+ OS << ':';
break;
}
diff --git a/lib/Frontend/TextDiagnosticBuffer.cpp b/lib/Frontend/TextDiagnosticBuffer.cpp
index 44bb2bc29bc0..b2497f56cbcd 100644
--- a/lib/Frontend/TextDiagnosticBuffer.cpp
+++ b/lib/Frontend/TextDiagnosticBuffer.cpp
@@ -1,9 +1,8 @@
//===- TextDiagnosticBuffer.cpp - Buffer Text Diagnostics -----------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Frontend/TextDiagnosticPrinter.cpp b/lib/Frontend/TextDiagnosticPrinter.cpp
index a37382c116ae..0c0a44a1388b 100644
--- a/lib/Frontend/TextDiagnosticPrinter.cpp
+++ b/lib/Frontend/TextDiagnosticPrinter.cpp
@@ -1,9 +1,8 @@
//===--- TextDiagnosticPrinter.cpp - Diagnostic Printer -------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Frontend/VerifyDiagnosticConsumer.cpp b/lib/Frontend/VerifyDiagnosticConsumer.cpp
index 21933f474ff5..a68ef03d4db1 100644
--- a/lib/Frontend/VerifyDiagnosticConsumer.cpp
+++ b/lib/Frontend/VerifyDiagnosticConsumer.cpp
@@ -1,9 +1,8 @@
//===- VerifyDiagnosticConsumer.cpp - Verifying Diagnostic Client ---------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -51,23 +50,6 @@ using Directive = VerifyDiagnosticConsumer::Directive;
using DirectiveList = VerifyDiagnosticConsumer::DirectiveList;
using ExpectedData = VerifyDiagnosticConsumer::ExpectedData;
-VerifyDiagnosticConsumer::VerifyDiagnosticConsumer(DiagnosticsEngine &Diags_)
- : Diags(Diags_), PrimaryClient(Diags.getClient()),
- PrimaryClientOwner(Diags.takeClient()),
- Buffer(new TextDiagnosticBuffer()), Status(HasNoDirectives) {
- if (Diags.hasSourceManager())
- setSourceManager(Diags.getSourceManager());
-}
-
-VerifyDiagnosticConsumer::~VerifyDiagnosticConsumer() {
- assert(!ActiveSourceFiles && "Incomplete parsing of source files!");
- assert(!CurrentPreprocessor && "CurrentPreprocessor should be invalid!");
- SrcManager = nullptr;
- CheckDiagnostics();
- assert(!Diags.ownsClient() &&
- "The VerifyDiagnosticConsumer takes over ownership of the client!");
-}
-
#ifndef NDEBUG
namespace {
@@ -94,86 +76,6 @@ public:
#endif
-// DiagnosticConsumer interface.
-
-void VerifyDiagnosticConsumer::BeginSourceFile(const LangOptions &LangOpts,
- const Preprocessor *PP) {
- // Attach comment handler on first invocation.
- if (++ActiveSourceFiles == 1) {
- if (PP) {
- CurrentPreprocessor = PP;
- this->LangOpts = &LangOpts;
- setSourceManager(PP->getSourceManager());
- const_cast<Preprocessor *>(PP)->addCommentHandler(this);
-#ifndef NDEBUG
- // Debug build tracks parsed files.
- const_cast<Preprocessor *>(PP)->addPPCallbacks(
- llvm::make_unique<VerifyFileTracker>(*this, *SrcManager));
-#endif
- }
- }
-
- assert((!PP || CurrentPreprocessor == PP) && "Preprocessor changed!");
- PrimaryClient->BeginSourceFile(LangOpts, PP);
-}
-
-void VerifyDiagnosticConsumer::EndSourceFile() {
- assert(ActiveSourceFiles && "No active source files!");
- PrimaryClient->EndSourceFile();
-
- // Detach comment handler once last active source file completed.
- if (--ActiveSourceFiles == 0) {
- if (CurrentPreprocessor)
- const_cast<Preprocessor *>(CurrentPreprocessor)->
- removeCommentHandler(this);
-
- // Check diagnostics once last file completed.
- CheckDiagnostics();
- CurrentPreprocessor = nullptr;
- LangOpts = nullptr;
- }
-}
-
-void VerifyDiagnosticConsumer::HandleDiagnostic(
- DiagnosticsEngine::Level DiagLevel, const Diagnostic &Info) {
- if (Info.hasSourceManager()) {
- // If this diagnostic is for a different source manager, ignore it.
- if (SrcManager && &Info.getSourceManager() != SrcManager)
- return;
-
- setSourceManager(Info.getSourceManager());
- }
-
-#ifndef NDEBUG
- // Debug build tracks unparsed files for possible
- // unparsed expected-* directives.
- if (SrcManager) {
- SourceLocation Loc = Info.getLocation();
- if (Loc.isValid()) {
- ParsedStatus PS = IsUnparsed;
-
- Loc = SrcManager->getExpansionLoc(Loc);
- FileID FID = SrcManager->getFileID(Loc);
-
- const FileEntry *FE = SrcManager->getFileEntryForID(FID);
- if (FE && CurrentPreprocessor && SrcManager->isLoadedFileID(FID)) {
- // If the file is a modules header file it shall not be parsed
- // for expected-* directives.
- HeaderSearch &HS = CurrentPreprocessor->getHeaderSearchInfo();
- if (HS.findModuleForHeader(FE))
- PS = IsUnparsedNoDirectives;
- }
-
- UpdateParsedFileStatus(*SrcManager, FID, PS);
- }
- }
-#endif
-
- // Send the diagnostic to the buffer, we will check it once we reach the end
- // of the source file (or are destructed).
- Buffer->HandleDiagnostic(DiagLevel, Info);
-}
-
//===----------------------------------------------------------------------===//
// Checking diagnostics implementation.
//===----------------------------------------------------------------------===//
@@ -242,17 +144,31 @@ public:
bool Next(unsigned &N) {
unsigned TMP = 0;
P = C;
- for (; P < End && P[0] >= '0' && P[0] <= '9'; ++P) {
+ PEnd = P;
+ for (; PEnd < End && *PEnd >= '0' && *PEnd <= '9'; ++PEnd) {
TMP *= 10;
- TMP += P[0] - '0';
+ TMP += *PEnd - '0';
}
- if (P == C)
+ if (PEnd == C)
return false;
- PEnd = P;
N = TMP;
return true;
}
+ // Return true if a marker is next.
+ // A marker is the longest match for /#[A-Za-z0-9_-]+/.
+ bool NextMarker() {
+ P = C;
+ if (P == End || *P != '#')
+ return false;
+ PEnd = P;
+ ++PEnd;
+ while ((isAlphanumeric(*PEnd) || *PEnd == '-' || *PEnd == '_') &&
+ PEnd < End)
+ ++PEnd;
+ return PEnd > P + 1;
+ }
+
// Return true if string literal S is matched in content.
// When true, P marks begin-position of the match, and calling Advance sets C
// to end-position of the match.
@@ -334,6 +250,10 @@ public:
return C < End;
}
+ // Return the text matched by the previous next/search.
+ // Behavior is undefined if previous next/search failed.
+ StringRef Match() { return StringRef(P, PEnd - P); }
+
// Skip zero or more whitespace.
void SkipWhitespace() {
for (; C < End && isWhitespace(*C); ++C)
@@ -354,6 +274,7 @@ public:
// Position of next char in content.
const char *C;
+ // Previous next/search subject start.
const char *P;
private:
@@ -361,17 +282,142 @@ private:
const char *PEnd = nullptr;
};
+// The information necessary to create a directive.
+struct UnattachedDirective {
+ DirectiveList *DL = nullptr;
+ bool RegexKind = false;
+ SourceLocation DirectivePos, ContentBegin;
+ std::string Text;
+ unsigned Min = 1, Max = 1;
+};
+
+// Attach the specified directive to the line of code indicated by
+// \p ExpectedLoc.
+void attachDirective(DiagnosticsEngine &Diags, const UnattachedDirective &UD,
+ SourceLocation ExpectedLoc, bool MatchAnyLine = false) {
+ // Construct new directive.
+ std::unique_ptr<Directive> D =
+ Directive::create(UD.RegexKind, UD.DirectivePos, ExpectedLoc,
+ MatchAnyLine, UD.Text, UD.Min, UD.Max);
+
+ std::string Error;
+ if (!D->isValid(Error)) {
+ Diags.Report(UD.ContentBegin, diag::err_verify_invalid_content)
+ << (UD.RegexKind ? "regex" : "string") << Error;
+ }
+
+ UD.DL->push_back(std::move(D));
+}
+
} // anonymous
+// Tracker for markers in the input files. A marker is a comment of the form
+//
+// n = 123; // #123
+//
+// ... that can be referred to by a later expected-* directive:
+//
+// // expected-error@#123 {{undeclared identifier 'n'}}
+//
+// Marker declarations must be at the start of a comment or preceded by
+// whitespace to distinguish them from uses of markers in directives.
+class VerifyDiagnosticConsumer::MarkerTracker {
+ DiagnosticsEngine &Diags;
+
+ struct Marker {
+ SourceLocation DefLoc;
+ SourceLocation RedefLoc;
+ SourceLocation UseLoc;
+ };
+ llvm::StringMap<Marker> Markers;
+
+ // Directives that couldn't be created yet because they name an unknown
+ // marker.
+ llvm::StringMap<llvm::SmallVector<UnattachedDirective, 2>> DeferredDirectives;
+
+public:
+ MarkerTracker(DiagnosticsEngine &Diags) : Diags(Diags) {}
+
+ // Register a marker.
+ void addMarker(StringRef MarkerName, SourceLocation Pos) {
+ auto InsertResult = Markers.insert(
+ {MarkerName, Marker{Pos, SourceLocation(), SourceLocation()}});
+
+ Marker &M = InsertResult.first->second;
+ if (!InsertResult.second) {
+ // Marker was redefined.
+ M.RedefLoc = Pos;
+ } else {
+ // First definition: build any deferred directives.
+ auto Deferred = DeferredDirectives.find(MarkerName);
+ if (Deferred != DeferredDirectives.end()) {
+ for (auto &UD : Deferred->second) {
+ if (M.UseLoc.isInvalid())
+ M.UseLoc = UD.DirectivePos;
+ attachDirective(Diags, UD, Pos);
+ }
+ DeferredDirectives.erase(Deferred);
+ }
+ }
+ }
+
+ // Register a directive at the specified marker.
+ void addDirective(StringRef MarkerName, const UnattachedDirective &UD) {
+ auto MarkerIt = Markers.find(MarkerName);
+ if (MarkerIt != Markers.end()) {
+ Marker &M = MarkerIt->second;
+ if (M.UseLoc.isInvalid())
+ M.UseLoc = UD.DirectivePos;
+ return attachDirective(Diags, UD, M.DefLoc);
+ }
+ DeferredDirectives[MarkerName].push_back(UD);
+ }
+
+ // Ensure we have no remaining deferred directives, and no
+ // multiply-defined-and-used markers.
+ void finalize() {
+ for (auto &MarkerInfo : Markers) {
+ StringRef Name = MarkerInfo.first();
+ Marker &M = MarkerInfo.second;
+ if (M.RedefLoc.isValid() && M.UseLoc.isValid()) {
+ Diags.Report(M.UseLoc, diag::err_verify_ambiguous_marker) << Name;
+ Diags.Report(M.DefLoc, diag::note_verify_ambiguous_marker) << Name;
+ Diags.Report(M.RedefLoc, diag::note_verify_ambiguous_marker) << Name;
+ }
+ }
+
+ for (auto &DeferredPair : DeferredDirectives) {
+ Diags.Report(DeferredPair.second.front().DirectivePos,
+ diag::err_verify_no_such_marker)
+ << DeferredPair.first();
+ }
+ }
+};
+
/// ParseDirective - Go through the comment and see if it indicates expected
/// diagnostics. If so, then put them in the appropriate directive list.
///
/// Returns true if any valid directives were found.
static bool ParseDirective(StringRef S, ExpectedData *ED, SourceManager &SM,
Preprocessor *PP, SourceLocation Pos,
- VerifyDiagnosticConsumer::DirectiveStatus &Status) {
+ VerifyDiagnosticConsumer::DirectiveStatus &Status,
+ VerifyDiagnosticConsumer::MarkerTracker &Markers) {
DiagnosticsEngine &Diags = PP ? PP->getDiagnostics() : SM.getDiagnostics();
+ // First, scan the comment looking for markers.
+ for (ParseHelper PH(S); !PH.Done();) {
+ if (!PH.Search("#", true))
+ break;
+ PH.C = PH.P;
+ if (!PH.NextMarker()) {
+ PH.Next("#");
+ PH.Advance();
+ continue;
+ }
+ PH.Advance();
+ Markers.addMarker(PH.Match(), Pos);
+ }
+
// A single comment may contain multiple directives.
bool FoundDirective = false;
for (ParseHelper PH(S); !PH.Done();) {
@@ -382,41 +428,41 @@ static bool ParseDirective(StringRef S, ExpectedData *ED, SourceManager &SM,
if (!(Prefixes.size() == 1 ? PH.Search(*Prefixes.begin(), true, true)
: PH.Search("", true, true)))
break;
+
+ StringRef DToken = PH.Match();
PH.Advance();
// Default directive kind.
- bool RegexKind = false;
- const char* KindStr = "string";
+ UnattachedDirective D;
+ const char *KindStr = "string";
// Parse the initial directive token in reverse so we can easily determine
// its exact actual prefix. If we were to parse it from the front instead,
// it would be harder to determine where the prefix ends because there
// might be multiple matching -verify prefixes because some might prefix
// others.
- StringRef DToken(PH.P, PH.C - PH.P);
// Regex in initial directive token: -re
if (DToken.endswith("-re")) {
- RegexKind = true;
+ D.RegexKind = true;
KindStr = "regex";
DToken = DToken.substr(0, DToken.size()-3);
}
// Type in initial directive token: -{error|warning|note|no-diagnostics}
- DirectiveList *DL = nullptr;
bool NoDiag = false;
StringRef DType;
if (DToken.endswith(DType="-error"))
- DL = ED ? &ED->Errors : nullptr;
+ D.DL = ED ? &ED->Errors : nullptr;
else if (DToken.endswith(DType="-warning"))
- DL = ED ? &ED->Warnings : nullptr;
+ D.DL = ED ? &ED->Warnings : nullptr;
else if (DToken.endswith(DType="-remark"))
- DL = ED ? &ED->Remarks : nullptr;
+ D.DL = ED ? &ED->Remarks : nullptr;
else if (DToken.endswith(DType="-note"))
- DL = ED ? &ED->Notes : nullptr;
+ D.DL = ED ? &ED->Notes : nullptr;
else if (DToken.endswith(DType="-no-diagnostics")) {
NoDiag = true;
- if (RegexKind)
+ if (D.RegexKind)
continue;
}
else
@@ -446,11 +492,12 @@ static bool ParseDirective(StringRef S, ExpectedData *ED, SourceManager &SM,
// If a directive has been found but we're not interested
// in storing the directive information, return now.
- if (!DL)
+ if (!D.DL)
return true;
// Next optional token: @
SourceLocation ExpectedLoc;
+ StringRef Marker;
bool MatchAnyLine = false;
if (!PH.Next("@")) {
ExpectedLoc = Pos;
@@ -472,6 +519,8 @@ static bool ParseDirective(StringRef S, ExpectedData *ED, SourceManager &SM,
// Absolute line number.
if (Line > 0)
ExpectedLoc = SM.translateLineCol(SM.getFileID(Pos), Line, 1);
+ } else if (PH.NextMarker()) {
+ Marker = PH.Match();
} else if (PP && PH.Search(":")) {
// Specific source file.
StringRef Filename(PH.C, PH.P-PH.C);
@@ -481,7 +530,7 @@ static bool ParseDirective(StringRef S, ExpectedData *ED, SourceManager &SM,
const DirectoryLookup *CurDir;
const FileEntry *FE =
PP->LookupFile(Pos, Filename, false, nullptr, nullptr, CurDir,
- nullptr, nullptr, nullptr, nullptr);
+ nullptr, nullptr, nullptr, nullptr, nullptr);
if (!FE) {
Diags.Report(Pos.getLocWithOffset(PH.C-PH.Begin),
diag::err_verify_missing_file) << Filename << KindStr;
@@ -502,7 +551,7 @@ static bool ParseDirective(StringRef S, ExpectedData *ED, SourceManager &SM,
ExpectedLoc = SourceLocation();
}
- if (ExpectedLoc.isInvalid() && !MatchAnyLine) {
+ if (ExpectedLoc.isInvalid() && !MatchAnyLine && Marker.empty()) {
Diags.Report(Pos.getLocWithOffset(PH.C-PH.Begin),
diag::err_verify_missing_line) << KindStr;
continue;
@@ -514,29 +563,27 @@ static bool ParseDirective(StringRef S, ExpectedData *ED, SourceManager &SM,
PH.SkipWhitespace();
// Next optional token: positive integer or a '+'.
- unsigned Min = 1;
- unsigned Max = 1;
- if (PH.Next(Min)) {
+ if (PH.Next(D.Min)) {
PH.Advance();
// A positive integer can be followed by a '+' meaning min
// or more, or by a '-' meaning a range from min to max.
if (PH.Next("+")) {
- Max = Directive::MaxCount;
+ D.Max = Directive::MaxCount;
PH.Advance();
} else if (PH.Next("-")) {
PH.Advance();
- if (!PH.Next(Max) || Max < Min) {
+ if (!PH.Next(D.Max) || D.Max < D.Min) {
Diags.Report(Pos.getLocWithOffset(PH.C-PH.Begin),
diag::err_verify_invalid_range) << KindStr;
continue;
}
PH.Advance();
} else {
- Max = Min;
+ D.Max = D.Min;
}
} else if (PH.Next("+")) {
// '+' on its own means "1 or more".
- Max = Directive::MaxCount;
+ D.Max = Directive::MaxCount;
PH.Advance();
}
@@ -551,7 +598,6 @@ static bool ParseDirective(StringRef S, ExpectedData *ED, SourceManager &SM,
}
PH.Advance();
const char* const ContentBegin = PH.C; // mark content begin
-
// Search for token: }}
if (!PH.SearchClosingBrace("{{", "}}")) {
Diags.Report(Pos.getLocWithOffset(PH.C-PH.Begin),
@@ -561,43 +607,137 @@ static bool ParseDirective(StringRef S, ExpectedData *ED, SourceManager &SM,
const char* const ContentEnd = PH.P; // mark content end
PH.Advance();
+ D.DirectivePos = Pos;
+ D.ContentBegin = Pos.getLocWithOffset(ContentBegin - PH.Begin);
+
// Build directive text; convert \n to newlines.
- std::string Text;
StringRef NewlineStr = "\\n";
StringRef Content(ContentBegin, ContentEnd-ContentBegin);
size_t CPos = 0;
size_t FPos;
while ((FPos = Content.find(NewlineStr, CPos)) != StringRef::npos) {
- Text += Content.substr(CPos, FPos-CPos);
- Text += '\n';
+ D.Text += Content.substr(CPos, FPos-CPos);
+ D.Text += '\n';
CPos = FPos + NewlineStr.size();
}
- if (Text.empty())
- Text.assign(ContentBegin, ContentEnd);
+ if (D.Text.empty())
+ D.Text.assign(ContentBegin, ContentEnd);
// Check that regex directives contain at least one regex.
- if (RegexKind && Text.find("{{") == StringRef::npos) {
- Diags.Report(Pos.getLocWithOffset(ContentBegin-PH.Begin),
- diag::err_verify_missing_regex) << Text;
+ if (D.RegexKind && D.Text.find("{{") == StringRef::npos) {
+ Diags.Report(D.ContentBegin, diag::err_verify_missing_regex) << D.Text;
return false;
}
- // Construct new directive.
- std::unique_ptr<Directive> D = Directive::create(
- RegexKind, Pos, ExpectedLoc, MatchAnyLine, Text, Min, Max);
+ if (Marker.empty())
+ attachDirective(Diags, D, ExpectedLoc, MatchAnyLine);
+ else
+ Markers.addDirective(Marker, D);
+ FoundDirective = true;
+ }
- std::string Error;
- if (D->isValid(Error)) {
- DL->push_back(std::move(D));
- FoundDirective = true;
- } else {
- Diags.Report(Pos.getLocWithOffset(ContentBegin-PH.Begin),
- diag::err_verify_invalid_content)
- << KindStr << Error;
+ return FoundDirective;
+}
+
+VerifyDiagnosticConsumer::VerifyDiagnosticConsumer(DiagnosticsEngine &Diags_)
+ : Diags(Diags_), PrimaryClient(Diags.getClient()),
+ PrimaryClientOwner(Diags.takeClient()),
+ Buffer(new TextDiagnosticBuffer()), Markers(new MarkerTracker(Diags)),
+ Status(HasNoDirectives) {
+ if (Diags.hasSourceManager())
+ setSourceManager(Diags.getSourceManager());
+}
+
+VerifyDiagnosticConsumer::~VerifyDiagnosticConsumer() {
+ assert(!ActiveSourceFiles && "Incomplete parsing of source files!");
+ assert(!CurrentPreprocessor && "CurrentPreprocessor should be invalid!");
+ SrcManager = nullptr;
+ CheckDiagnostics();
+ assert(!Diags.ownsClient() &&
+ "The VerifyDiagnosticConsumer takes over ownership of the client!");
+}
+
+// DiagnosticConsumer interface.
+
+void VerifyDiagnosticConsumer::BeginSourceFile(const LangOptions &LangOpts,
+ const Preprocessor *PP) {
+ // Attach comment handler on first invocation.
+ if (++ActiveSourceFiles == 1) {
+ if (PP) {
+ CurrentPreprocessor = PP;
+ this->LangOpts = &LangOpts;
+ setSourceManager(PP->getSourceManager());
+ const_cast<Preprocessor *>(PP)->addCommentHandler(this);
+#ifndef NDEBUG
+ // Debug build tracks parsed files.
+ const_cast<Preprocessor *>(PP)->addPPCallbacks(
+ llvm::make_unique<VerifyFileTracker>(*this, *SrcManager));
+#endif
}
}
- return FoundDirective;
+ assert((!PP || CurrentPreprocessor == PP) && "Preprocessor changed!");
+ PrimaryClient->BeginSourceFile(LangOpts, PP);
+}
+
+void VerifyDiagnosticConsumer::EndSourceFile() {
+ assert(ActiveSourceFiles && "No active source files!");
+ PrimaryClient->EndSourceFile();
+
+ // Detach comment handler once last active source file completed.
+ if (--ActiveSourceFiles == 0) {
+ if (CurrentPreprocessor)
+ const_cast<Preprocessor *>(CurrentPreprocessor)->
+ removeCommentHandler(this);
+
+ // Diagnose any used-but-not-defined markers.
+ Markers->finalize();
+
+ // Check diagnostics once last file completed.
+ CheckDiagnostics();
+ CurrentPreprocessor = nullptr;
+ LangOpts = nullptr;
+ }
+}
+
+void VerifyDiagnosticConsumer::HandleDiagnostic(
+ DiagnosticsEngine::Level DiagLevel, const Diagnostic &Info) {
+ if (Info.hasSourceManager()) {
+ // If this diagnostic is for a different source manager, ignore it.
+ if (SrcManager && &Info.getSourceManager() != SrcManager)
+ return;
+
+ setSourceManager(Info.getSourceManager());
+ }
+
+#ifndef NDEBUG
+ // Debug build tracks unparsed files for possible
+ // unparsed expected-* directives.
+ if (SrcManager) {
+ SourceLocation Loc = Info.getLocation();
+ if (Loc.isValid()) {
+ ParsedStatus PS = IsUnparsed;
+
+ Loc = SrcManager->getExpansionLoc(Loc);
+ FileID FID = SrcManager->getFileID(Loc);
+
+ const FileEntry *FE = SrcManager->getFileEntryForID(FID);
+ if (FE && CurrentPreprocessor && SrcManager->isLoadedFileID(FID)) {
+ // If the file is a modules header file it shall not be parsed
+ // for expected-* directives.
+ HeaderSearch &HS = CurrentPreprocessor->getHeaderSearchInfo();
+ if (HS.findModuleForHeader(FE))
+ PS = IsUnparsedNoDirectives;
+ }
+
+ UpdateParsedFileStatus(*SrcManager, FID, PS);
+ }
+ }
+#endif
+
+ // Send the diagnostic to the buffer, we will check it once we reach the end
+ // of the source file (or are destructed).
+ Buffer->HandleDiagnostic(DiagLevel, Info);
}
/// HandleComment - Hook into the preprocessor and extract comments containing
@@ -621,7 +761,7 @@ bool VerifyDiagnosticConsumer::HandleComment(Preprocessor &PP,
// Fold any "\<EOL>" sequences
size_t loc = C.find('\\');
if (loc == StringRef::npos) {
- ParseDirective(C, &ED, SM, &PP, CommentBegin, Status);
+ ParseDirective(C, &ED, SM, &PP, CommentBegin, Status, *Markers);
return false;
}
@@ -651,7 +791,7 @@ bool VerifyDiagnosticConsumer::HandleComment(Preprocessor &PP,
}
if (!C2.empty())
- ParseDirective(C2, &ED, SM, &PP, CommentBegin, Status);
+ ParseDirective(C2, &ED, SM, &PP, CommentBegin, Status, *Markers);
return false;
}
@@ -685,9 +825,12 @@ static bool findDirectives(SourceManager &SM, FileID FID,
std::string Comment = RawLex.getSpelling(Tok, SM, LangOpts);
if (Comment.empty()) continue;
+ // We don't care about tracking markers for this phase.
+ VerifyDiagnosticConsumer::MarkerTracker Markers(SM.getDiagnostics());
+
// Find first directive.
if (ParseDirective(Comment, nullptr, SM, nullptr, Tok.getLocation(),
- Status))
+ Status, Markers))
return true;
}
return false;
diff --git a/lib/FrontendTool/ExecuteCompilerInvocation.cpp b/lib/FrontendTool/ExecuteCompilerInvocation.cpp
index 7015772fa168..69e773658c5c 100644
--- a/lib/FrontendTool/ExecuteCompilerInvocation.cpp
+++ b/lib/FrontendTool/ExecuteCompilerInvocation.cpp
@@ -1,9 +1,8 @@
//===--- ExecuteCompilerInvocation.cpp ------------------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -65,6 +64,10 @@ CreateFrontendBaseAction(CompilerInstance &CI) {
case GenerateHeaderModule:
return llvm::make_unique<GenerateHeaderModuleAction>();
case GeneratePCH: return llvm::make_unique<GeneratePCHAction>();
+ case GenerateInterfaceYAMLExpV1:
+ return llvm::make_unique<GenerateInterfaceYAMLExpV1Action>();
+ case GenerateInterfaceTBEExpV1:
+ return llvm::make_unique<GenerateInterfaceTBEExpV1Action>();
case InitOnly: return llvm::make_unique<InitOnlyAction>();
case ParseSyntaxOnly: return llvm::make_unique<SyntaxOnlyAction>();
case ModuleFileInfo: return llvm::make_unique<DumpModuleInfoAction>();
@@ -117,6 +120,8 @@ CreateFrontendBaseAction(CompilerInstance &CI) {
case RunAnalysis: Action = "RunAnalysis"; break;
#endif
case RunPreprocessorOnly: return llvm::make_unique<PreprocessOnlyAction>();
+ case PrintDependencyDirectivesSourceMinimizerOutput:
+ return llvm::make_unique<PrintDependencyDirectivesSourceMinimizerAction>();
}
#if !CLANG_ENABLE_ARCMT || !CLANG_ENABLE_STATIC_ANALYZER \
@@ -235,24 +240,42 @@ bool ExecuteCompilerInvocation(CompilerInstance *Clang) {
}
#if CLANG_ENABLE_STATIC_ANALYZER
- // Honor -analyzer-checker-help.
- // This should happen AFTER plugins have been loaded!
- if (Clang->getAnalyzerOpts()->ShowCheckerHelp) {
- ento::printCheckerHelp(llvm::outs(), Clang->getFrontendOpts().Plugins,
- Clang->getDiagnostics());
+ // These should happen AFTER plugins have been loaded!
+
+ AnalyzerOptions &AnOpts = *Clang->getAnalyzerOpts();
+ // Honor -analyzer-checker-help and -analyzer-checker-help-hidden.
+ if (AnOpts.ShowCheckerHelp || AnOpts.ShowCheckerHelpAlpha ||
+ AnOpts.ShowCheckerHelpDeveloper) {
+ ento::printCheckerHelp(llvm::outs(),
+ Clang->getFrontendOpts().Plugins,
+ AnOpts,
+ Clang->getDiagnostics(),
+ Clang->getLangOpts());
+ return true;
+ }
+
+ // Honor -analyzer-checker-option-help.
+ if (AnOpts.ShowCheckerOptionList || AnOpts.ShowCheckerOptionAlphaList ||
+ AnOpts.ShowCheckerOptionDeveloperList) {
+ ento::printCheckerConfigList(llvm::outs(),
+ Clang->getFrontendOpts().Plugins,
+ *Clang->getAnalyzerOpts(),
+ Clang->getDiagnostics(),
+ Clang->getLangOpts());
return true;
}
// Honor -analyzer-list-enabled-checkers.
- if (Clang->getAnalyzerOpts()->ShowEnabledCheckerList) {
+ if (AnOpts.ShowEnabledCheckerList) {
ento::printEnabledCheckerList(llvm::outs(),
Clang->getFrontendOpts().Plugins,
- *Clang->getAnalyzerOpts(),
- Clang->getDiagnostics());
+ AnOpts,
+ Clang->getDiagnostics(),
+ Clang->getLangOpts());
}
// Honor -analyzer-config-help.
- if (Clang->getAnalyzerOpts()->ShowConfigOptionsList) {
+ if (AnOpts.ShowConfigOptionsList) {
ento::printAnalyzerConfigList(llvm::outs());
return true;
}
diff --git a/lib/Headers/__clang_cuda_builtin_vars.h b/lib/Headers/__clang_cuda_builtin_vars.h
index 290c4b298433..2ba1521f2580 100644
--- a/lib/Headers/__clang_cuda_builtin_vars.h
+++ b/lib/Headers/__clang_cuda_builtin_vars.h
@@ -1,22 +1,8 @@
/*===---- cuda_builtin_vars.h - CUDA built-in variables ---------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/__clang_cuda_cmath.h b/lib/Headers/__clang_cuda_cmath.h
index 5331ba401a95..834a2e3fd134 100644
--- a/lib/Headers/__clang_cuda_cmath.h
+++ b/lib/Headers/__clang_cuda_cmath.h
@@ -1,22 +1,8 @@
/*===---- __clang_cuda_cmath.h - Device-side CUDA cmath support ------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
@@ -44,12 +30,32 @@
// implementation. Declaring in the global namespace and pulling into namespace
// std covers all of the known knowns.
+#ifdef _OPENMP
+#define __DEVICE__ static __attribute__((always_inline))
+#else
#define __DEVICE__ static __device__ __inline__ __attribute__((always_inline))
+#endif
+// For C++ 17 we need to include noexcept attribute to be compatible
+// with the header-defined version. This may be removed once
+// variant is supported.
+#if defined(_OPENMP) && defined(__cplusplus) && __cplusplus >= 201703L
+#define __NOEXCEPT noexcept
+#else
+#define __NOEXCEPT
+#endif
+
+#if !(defined(_OPENMP) && defined(__cplusplus))
__DEVICE__ long long abs(long long __n) { return ::llabs(__n); }
__DEVICE__ long abs(long __n) { return ::labs(__n); }
__DEVICE__ float abs(float __x) { return ::fabsf(__x); }
__DEVICE__ double abs(double __x) { return ::fabs(__x); }
+#endif
+// TODO: remove once variat is supported.
+#if defined(_OPENMP) && defined(__cplusplus)
+__DEVICE__ const float abs(const float __x) { return ::fabsf((float)__x); }
+__DEVICE__ const double abs(const double __x) { return ::fabs((double)__x); }
+#endif
__DEVICE__ float acos(float __x) { return ::acosf(__x); }
__DEVICE__ float asin(float __x) { return ::asinf(__x); }
__DEVICE__ float atan(float __x) { return ::atanf(__x); }
@@ -58,9 +64,11 @@ __DEVICE__ float ceil(float __x) { return ::ceilf(__x); }
__DEVICE__ float cos(float __x) { return ::cosf(__x); }
__DEVICE__ float cosh(float __x) { return ::coshf(__x); }
__DEVICE__ float exp(float __x) { return ::expf(__x); }
-__DEVICE__ float fabs(float __x) { return ::fabsf(__x); }
+__DEVICE__ float fabs(float __x) __NOEXCEPT { return ::fabsf(__x); }
__DEVICE__ float floor(float __x) { return ::floorf(__x); }
__DEVICE__ float fmod(float __x, float __y) { return ::fmodf(__x, __y); }
+// TODO: remove when variant is supported
+#ifndef _OPENMP
__DEVICE__ int fpclassify(float __x) {
return __builtin_fpclassify(FP_NAN, FP_INFINITE, FP_NORMAL, FP_SUBNORMAL,
FP_ZERO, __x);
@@ -69,6 +77,7 @@ __DEVICE__ int fpclassify(double __x) {
return __builtin_fpclassify(FP_NAN, FP_INFINITE, FP_NORMAL, FP_SUBNORMAL,
FP_ZERO, __x);
}
+#endif
__DEVICE__ float frexp(float __arg, int *__exp) {
return ::frexpf(__arg, __exp);
}
@@ -448,7 +457,10 @@ using ::remainderf;
using ::remquof;
using ::rintf;
using ::roundf;
+// TODO: remove once variant is supported
+#ifndef _OPENMP
using ::scalblnf;
+#endif
using ::scalbnf;
using ::sinf;
using ::sinhf;
@@ -467,6 +479,7 @@ _GLIBCXX_END_NAMESPACE_VERSION
} // namespace std
#endif
+#undef __NOEXCEPT
#undef __DEVICE__
#endif
diff --git a/lib/Headers/__clang_cuda_complex_builtins.h b/lib/Headers/__clang_cuda_complex_builtins.h
index beef7deff87f..576a958b16bb 100644
--- a/lib/Headers/__clang_cuda_complex_builtins.h
+++ b/lib/Headers/__clang_cuda_complex_builtins.h
@@ -1,22 +1,8 @@
/*===-- __clang_cuda_complex_builtins - CUDA impls of runtime complex fns ---===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/__clang_cuda_device_functions.h b/lib/Headers/__clang_cuda_device_functions.h
index 67bbc68b1637..50ad674f9483 100644
--- a/lib/Headers/__clang_cuda_device_functions.h
+++ b/lib/Headers/__clang_cuda_device_functions.h
@@ -1,22 +1,8 @@
/*===---- __clang_cuda_device_functions.h - CUDA runtime support -----------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
@@ -24,15 +10,21 @@
#ifndef __CLANG_CUDA_DEVICE_FUNCTIONS_H__
#define __CLANG_CUDA_DEVICE_FUNCTIONS_H__
+#ifndef _OPENMP
#if CUDA_VERSION < 9000
#error This file is intended to be used with CUDA-9+ only.
#endif
+#endif
// __DEVICE__ is a helper macro with common set of attributes for the wrappers
// we implement in this file. We need static in order to avoid emitting unused
// functions and __forceinline__ helps inlining these wrappers at -O1.
#pragma push_macro("__DEVICE__")
+#ifdef _OPENMP
+#define __DEVICE__ static __attribute__((always_inline))
+#else
#define __DEVICE__ static __device__ __forceinline__
+#endif
// libdevice provides fast low precision and slow full-recision implementations
// for some functions. Which one gets selected depends on
@@ -45,6 +37,15 @@
#define __FAST_OR_SLOW(fast, slow) slow
#endif
+// For C++ 17 we need to include noexcept attribute to be compatible
+// with the header-defined version. This may be removed once
+// variant is supported.
+#if defined(_OPENMP) && defined(__cplusplus) && __cplusplus >= 201703L
+#define __NOEXCEPT noexcept
+#else
+#define __NOEXCEPT
+#endif
+
__DEVICE__ int __all(int __a) { return __nvvm_vote_all(__a); }
__DEVICE__ int __any(int __a) { return __nvvm_vote_any(__a); }
__DEVICE__ unsigned int __ballot(int __a) { return __nvvm_vote_ballot(__a); }
@@ -52,8 +53,13 @@ __DEVICE__ unsigned int __brev(unsigned int __a) { return __nv_brev(__a); }
__DEVICE__ unsigned long long __brevll(unsigned long long __a) {
return __nv_brevll(__a);
}
+#if defined(__cplusplus)
__DEVICE__ void __brkpt() { asm volatile("brkpt;"); }
__DEVICE__ void __brkpt(int __a) { __brkpt(); }
+#else
+__DEVICE__ void __attribute__((overloadable)) __brkpt(void) { asm volatile("brkpt;"); }
+__DEVICE__ void __attribute__((overloadable)) __brkpt(int __a) { __brkpt(); }
+#endif
__DEVICE__ unsigned int __byte_perm(unsigned int __a, unsigned int __b,
unsigned int __c) {
return __nv_byte_perm(__a, __b, __c);
@@ -237,6 +243,9 @@ __DEVICE__ int __ffs(int __a) { return __nv_ffs(__a); }
__DEVICE__ int __ffsll(long long __a) { return __nv_ffsll(__a); }
__DEVICE__ int __finite(double __a) { return __nv_isfinited(__a); }
__DEVICE__ int __finitef(float __a) { return __nv_finitef(__a); }
+#ifdef _MSC_VER
+__DEVICE__ int __finitel(long double __a);
+#endif
__DEVICE__ int __float2int_rd(float __a) { return __nv_float2int_rd(__a); }
__DEVICE__ int __float2int_rn(float __a) { return __nv_float2int_rn(__a); }
__DEVICE__ int __float2int_ru(float __a) { return __nv_float2int_ru(__a); }
@@ -445,8 +454,14 @@ __DEVICE__ float __int_as_float(int __a) { return __nv_int_as_float(__a); }
__DEVICE__ int __isfinited(double __a) { return __nv_isfinited(__a); }
__DEVICE__ int __isinf(double __a) { return __nv_isinfd(__a); }
__DEVICE__ int __isinff(float __a) { return __nv_isinff(__a); }
+#ifdef _MSC_VER
+__DEVICE__ int __isinfl(long double __a);
+#endif
__DEVICE__ int __isnan(double __a) { return __nv_isnand(__a); }
__DEVICE__ int __isnanf(float __a) { return __nv_isnanf(__a); }
+#ifdef _MSC_VER
+__DEVICE__ int __isnanl(long double __a);
+#endif
__DEVICE__ double __ll2double_rd(long long __a) {
return __nv_ll2double_rd(__a);
}
@@ -520,8 +535,8 @@ __DEVICE__ unsigned int __sad(int __a, int __b, unsigned int __c) {
__DEVICE__ float __saturatef(float __a) { return __nv_saturatef(__a); }
__DEVICE__ int __signbitd(double __a) { return __nv_signbitd(__a); }
__DEVICE__ int __signbitf(float __a) { return __nv_signbitf(__a); }
-__DEVICE__ void __sincosf(float __a, float *__sptr, float *__cptr) {
- return __nv_fast_sincosf(__a, __sptr, __cptr);
+__DEVICE__ void __sincosf(float __a, float *__s, float *__c) {
+ return __nv_fast_sincosf(__a, __s, __c);
}
__DEVICE__ float __sinf(float __a) { return __nv_fast_sinf(__a); }
__DEVICE__ int __syncthreads_and(int __a) { return __nvvm_bar0_and(__a); }
@@ -1468,7 +1483,8 @@ __DEVICE__ unsigned int __vsubus4(unsigned int __a, unsigned int __b) {
return r;
}
#endif // CUDA_VERSION >= 9020
-__DEVICE__ int abs(int __a) { return __nv_abs(__a); }
+__DEVICE__ int abs(int __a) __NOEXCEPT { return __nv_abs(__a); }
+__DEVICE__ double fabs(double __a) __NOEXCEPT { return __nv_fabs(__a); }
__DEVICE__ double acos(double __a) { return __nv_acos(__a); }
__DEVICE__ float acosf(float __a) { return __nv_acosf(__a); }
__DEVICE__ double acosh(double __a) { return __nv_acosh(__a); }
@@ -1487,8 +1503,10 @@ __DEVICE__ double cbrt(double __a) { return __nv_cbrt(__a); }
__DEVICE__ float cbrtf(float __a) { return __nv_cbrtf(__a); }
__DEVICE__ double ceil(double __a) { return __nv_ceil(__a); }
__DEVICE__ float ceilf(float __a) { return __nv_ceilf(__a); }
+#ifndef _OPENMP
__DEVICE__ int clock() { return __nvvm_read_ptx_sreg_clock(); }
__DEVICE__ long long clock64() { return __nvvm_read_ptx_sreg_clock64(); }
+#endif
__DEVICE__ double copysign(double __a, double __b) {
return __nv_copysign(__a, __b);
}
@@ -1525,7 +1543,6 @@ __DEVICE__ float exp2f(float __a) { return __nv_exp2f(__a); }
__DEVICE__ float expf(float __a) { return __nv_expf(__a); }
__DEVICE__ double expm1(double __a) { return __nv_expm1(__a); }
__DEVICE__ float expm1f(float __a) { return __nv_expm1f(__a); }
-__DEVICE__ double fabs(double __a) { return __nv_fabs(__a); }
__DEVICE__ float fabsf(float __a) { return __nv_fabsf(__a); }
__DEVICE__ double fdim(double __a, double __b) { return __nv_fdim(__a, __b); }
__DEVICE__ float fdimf(float __a, float __b) { return __nv_fdimf(__a, __b); }
@@ -1563,16 +1580,16 @@ __DEVICE__ double j1(double __a) { return __nv_j1(__a); }
__DEVICE__ float j1f(float __a) { return __nv_j1f(__a); }
__DEVICE__ double jn(int __n, double __a) { return __nv_jn(__n, __a); }
__DEVICE__ float jnf(int __n, float __a) { return __nv_jnf(__n, __a); }
-#if defined(__LP64__)
-__DEVICE__ long labs(long __a) { return llabs(__a); };
+#if defined(__LP64__) || defined(_WIN64)
+__DEVICE__ long labs(long __a) __NOEXCEPT { return __nv_llabs(__a); };
#else
-__DEVICE__ long labs(long __a) { return __nv_abs(__a); };
+__DEVICE__ long labs(long __a) __NOEXCEPT { return __nv_abs(__a); };
#endif
__DEVICE__ double ldexp(double __a, int __b) { return __nv_ldexp(__a, __b); }
__DEVICE__ float ldexpf(float __a, int __b) { return __nv_ldexpf(__a, __b); }
__DEVICE__ double lgamma(double __a) { return __nv_lgamma(__a); }
__DEVICE__ float lgammaf(float __a) { return __nv_lgammaf(__a); }
-__DEVICE__ long long llabs(long long __a) { return __nv_llabs(__a); }
+__DEVICE__ long long llabs(long long __a) __NOEXCEPT { return __nv_llabs(__a); }
__DEVICE__ long long llmax(long long __a, long long __b) {
return __nv_llmax(__a, __b);
}
@@ -1597,7 +1614,7 @@ __DEVICE__ float logbf(float __a) { return __nv_logbf(__a); }
__DEVICE__ float logf(float __a) {
return __FAST_OR_SLOW(__nv_fast_logf, __nv_logf)(__a);
}
-#if defined(__LP64__)
+#if defined(__LP64__) || defined(_WIN64)
__DEVICE__ long lrint(double __a) { return llrint(__a); }
__DEVICE__ long lrintf(float __a) { return __float2ll_rn(__a); }
__DEVICE__ long lround(double __a) { return llround(__a); }
@@ -1609,12 +1626,16 @@ __DEVICE__ long lround(double __a) { return round(__a); }
__DEVICE__ long lroundf(float __a) { return roundf(__a); }
#endif
__DEVICE__ int max(int __a, int __b) { return __nv_max(__a, __b); }
+// These functions shouldn't be declared when including this header
+// for math function resolution purposes.
+#ifndef _OPENMP
__DEVICE__ void *memcpy(void *__a, const void *__b, size_t __c) {
return __builtin_memcpy(__a, __b, __c);
}
__DEVICE__ void *memset(void *__a, int __b, size_t __c) {
return __builtin_memset(__a, __b, __c);
}
+#endif
__DEVICE__ int min(int __a, int __b) { return __nv_min(__a, __b); }
__DEVICE__ double modf(double __a, double *__b) { return __nv_modf(__a, __b); }
__DEVICE__ float modff(float __a, float *__b) { return __nv_modff(__a, __b); }
@@ -1698,6 +1719,8 @@ __DEVICE__ double rsqrt(double __a) { return __nv_rsqrt(__a); }
__DEVICE__ float rsqrtf(float __a) { return __nv_rsqrtf(__a); }
__DEVICE__ double scalbn(double __a, int __b) { return __nv_scalbn(__a, __b); }
__DEVICE__ float scalbnf(float __a, int __b) { return __nv_scalbnf(__a, __b); }
+// TODO: remove once variant is supported
+#ifndef _OPENMP
__DEVICE__ double scalbln(double __a, long __b) {
if (__b > INT_MAX)
return __a > 0 ? HUGE_VAL : -HUGE_VAL;
@@ -1712,18 +1735,19 @@ __DEVICE__ float scalblnf(float __a, long __b) {
return __a > 0 ? 0.f : -0.f;
return scalbnf(__a, (int)__b);
}
+#endif
__DEVICE__ double sin(double __a) { return __nv_sin(__a); }
-__DEVICE__ void sincos(double __a, double *__sptr, double *__cptr) {
- return __nv_sincos(__a, __sptr, __cptr);
+__DEVICE__ void sincos(double __a, double *__s, double *__c) {
+ return __nv_sincos(__a, __s, __c);
}
-__DEVICE__ void sincosf(float __a, float *__sptr, float *__cptr) {
- return __FAST_OR_SLOW(__nv_fast_sincosf, __nv_sincosf)(__a, __sptr, __cptr);
+__DEVICE__ void sincosf(float __a, float *__s, float *__c) {
+ return __FAST_OR_SLOW(__nv_fast_sincosf, __nv_sincosf)(__a, __s, __c);
}
-__DEVICE__ void sincospi(double __a, double *__sptr, double *__cptr) {
- return __nv_sincospi(__a, __sptr, __cptr);
+__DEVICE__ void sincospi(double __a, double *__s, double *__c) {
+ return __nv_sincospi(__a, __s, __c);
}
-__DEVICE__ void sincospif(float __a, float *__sptr, float *__cptr) {
- return __nv_sincospif(__a, __sptr, __cptr);
+__DEVICE__ void sincospif(float __a, float *__s, float *__c) {
+ return __nv_sincospif(__a, __s, __c);
}
__DEVICE__ float sinf(float __a) {
return __FAST_OR_SLOW(__nv_fast_sinf, __nv_sinf)(__a);
@@ -1763,6 +1787,7 @@ __DEVICE__ float y1f(float __a) { return __nv_y1f(__a); }
__DEVICE__ double yn(int __a, double __b) { return __nv_yn(__a, __b); }
__DEVICE__ float ynf(int __a, float __b) { return __nv_ynf(__a, __b); }
+#undef __NOEXCEPT
#pragma pop_macro("__DEVICE__")
#pragma pop_macro("__FAST_OR_SLOW")
#endif // __CLANG_CUDA_DEVICE_FUNCTIONS_H__
diff --git a/lib/Headers/__clang_cuda_intrinsics.h b/lib/Headers/__clang_cuda_intrinsics.h
index 3c0cde94ed44..2970d17f89ee 100644
--- a/lib/Headers/__clang_cuda_intrinsics.h
+++ b/lib/Headers/__clang_cuda_intrinsics.h
@@ -1,22 +1,8 @@
/*===--- __clang_cuda_intrinsics.h - Device-side CUDA intrinsic wrappers ---===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/__clang_cuda_libdevice_declares.h b/lib/Headers/__clang_cuda_libdevice_declares.h
index 71df7f849d15..4d70353394c8 100644
--- a/lib/Headers/__clang_cuda_libdevice_declares.h
+++ b/lib/Headers/__clang_cuda_libdevice_declares.h
@@ -1,22 +1,8 @@
/*===-- __clang_cuda_libdevice_declares.h - decls for libdevice functions --===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
@@ -24,443 +10,453 @@
#ifndef __CLANG_CUDA_LIBDEVICE_DECLARES_H__
#define __CLANG_CUDA_LIBDEVICE_DECLARES_H__
+#if defined(__cplusplus)
extern "C" {
+#endif
+
+#if defined(_OPENMP)
+#define __DEVICE__
+#elif defined(__CUDA__)
+#define __DEVICE__ __device__
+#endif
-__device__ int __nv_abs(int __a);
-__device__ double __nv_acos(double __a);
-__device__ float __nv_acosf(float __a);
-__device__ double __nv_acosh(double __a);
-__device__ float __nv_acoshf(float __a);
-__device__ double __nv_asin(double __a);
-__device__ float __nv_asinf(float __a);
-__device__ double __nv_asinh(double __a);
-__device__ float __nv_asinhf(float __a);
-__device__ double __nv_atan2(double __a, double __b);
-__device__ float __nv_atan2f(float __a, float __b);
-__device__ double __nv_atan(double __a);
-__device__ float __nv_atanf(float __a);
-__device__ double __nv_atanh(double __a);
-__device__ float __nv_atanhf(float __a);
-__device__ int __nv_brev(int __a);
-__device__ long long __nv_brevll(long long __a);
-__device__ int __nv_byte_perm(int __a, int __b, int __c);
-__device__ double __nv_cbrt(double __a);
-__device__ float __nv_cbrtf(float __a);
-__device__ double __nv_ceil(double __a);
-__device__ float __nv_ceilf(float __a);
-__device__ int __nv_clz(int __a);
-__device__ int __nv_clzll(long long __a);
-__device__ double __nv_copysign(double __a, double __b);
-__device__ float __nv_copysignf(float __a, float __b);
-__device__ double __nv_cos(double __a);
-__device__ float __nv_cosf(float __a);
-__device__ double __nv_cosh(double __a);
-__device__ float __nv_coshf(float __a);
-__device__ double __nv_cospi(double __a);
-__device__ float __nv_cospif(float __a);
-__device__ double __nv_cyl_bessel_i0(double __a);
-__device__ float __nv_cyl_bessel_i0f(float __a);
-__device__ double __nv_cyl_bessel_i1(double __a);
-__device__ float __nv_cyl_bessel_i1f(float __a);
-__device__ double __nv_dadd_rd(double __a, double __b);
-__device__ double __nv_dadd_rn(double __a, double __b);
-__device__ double __nv_dadd_ru(double __a, double __b);
-__device__ double __nv_dadd_rz(double __a, double __b);
-__device__ double __nv_ddiv_rd(double __a, double __b);
-__device__ double __nv_ddiv_rn(double __a, double __b);
-__device__ double __nv_ddiv_ru(double __a, double __b);
-__device__ double __nv_ddiv_rz(double __a, double __b);
-__device__ double __nv_dmul_rd(double __a, double __b);
-__device__ double __nv_dmul_rn(double __a, double __b);
-__device__ double __nv_dmul_ru(double __a, double __b);
-__device__ double __nv_dmul_rz(double __a, double __b);
-__device__ float __nv_double2float_rd(double __a);
-__device__ float __nv_double2float_rn(double __a);
-__device__ float __nv_double2float_ru(double __a);
-__device__ float __nv_double2float_rz(double __a);
-__device__ int __nv_double2hiint(double __a);
-__device__ int __nv_double2int_rd(double __a);
-__device__ int __nv_double2int_rn(double __a);
-__device__ int __nv_double2int_ru(double __a);
-__device__ int __nv_double2int_rz(double __a);
-__device__ long long __nv_double2ll_rd(double __a);
-__device__ long long __nv_double2ll_rn(double __a);
-__device__ long long __nv_double2ll_ru(double __a);
-__device__ long long __nv_double2ll_rz(double __a);
-__device__ int __nv_double2loint(double __a);
-__device__ unsigned int __nv_double2uint_rd(double __a);
-__device__ unsigned int __nv_double2uint_rn(double __a);
-__device__ unsigned int __nv_double2uint_ru(double __a);
-__device__ unsigned int __nv_double2uint_rz(double __a);
-__device__ unsigned long long __nv_double2ull_rd(double __a);
-__device__ unsigned long long __nv_double2ull_rn(double __a);
-__device__ unsigned long long __nv_double2ull_ru(double __a);
-__device__ unsigned long long __nv_double2ull_rz(double __a);
-__device__ unsigned long long __nv_double_as_longlong(double __a);
-__device__ double __nv_drcp_rd(double __a);
-__device__ double __nv_drcp_rn(double __a);
-__device__ double __nv_drcp_ru(double __a);
-__device__ double __nv_drcp_rz(double __a);
-__device__ double __nv_dsqrt_rd(double __a);
-__device__ double __nv_dsqrt_rn(double __a);
-__device__ double __nv_dsqrt_ru(double __a);
-__device__ double __nv_dsqrt_rz(double __a);
-__device__ double __nv_dsub_rd(double __a, double __b);
-__device__ double __nv_dsub_rn(double __a, double __b);
-__device__ double __nv_dsub_ru(double __a, double __b);
-__device__ double __nv_dsub_rz(double __a, double __b);
-__device__ double __nv_erfc(double __a);
-__device__ float __nv_erfcf(float __a);
-__device__ double __nv_erfcinv(double __a);
-__device__ float __nv_erfcinvf(float __a);
-__device__ double __nv_erfcx(double __a);
-__device__ float __nv_erfcxf(float __a);
-__device__ double __nv_erf(double __a);
-__device__ float __nv_erff(float __a);
-__device__ double __nv_erfinv(double __a);
-__device__ float __nv_erfinvf(float __a);
-__device__ double __nv_exp10(double __a);
-__device__ float __nv_exp10f(float __a);
-__device__ double __nv_exp2(double __a);
-__device__ float __nv_exp2f(float __a);
-__device__ double __nv_exp(double __a);
-__device__ float __nv_expf(float __a);
-__device__ double __nv_expm1(double __a);
-__device__ float __nv_expm1f(float __a);
-__device__ double __nv_fabs(double __a);
-__device__ float __nv_fabsf(float __a);
-__device__ float __nv_fadd_rd(float __a, float __b);
-__device__ float __nv_fadd_rn(float __a, float __b);
-__device__ float __nv_fadd_ru(float __a, float __b);
-__device__ float __nv_fadd_rz(float __a, float __b);
-__device__ float __nv_fast_cosf(float __a);
-__device__ float __nv_fast_exp10f(float __a);
-__device__ float __nv_fast_expf(float __a);
-__device__ float __nv_fast_fdividef(float __a, float __b);
-__device__ float __nv_fast_log10f(float __a);
-__device__ float __nv_fast_log2f(float __a);
-__device__ float __nv_fast_logf(float __a);
-__device__ float __nv_fast_powf(float __a, float __b);
-__device__ void __nv_fast_sincosf(float __a, float *__sptr, float *__cptr);
-__device__ float __nv_fast_sinf(float __a);
-__device__ float __nv_fast_tanf(float __a);
-__device__ double __nv_fdim(double __a, double __b);
-__device__ float __nv_fdimf(float __a, float __b);
-__device__ float __nv_fdiv_rd(float __a, float __b);
-__device__ float __nv_fdiv_rn(float __a, float __b);
-__device__ float __nv_fdiv_ru(float __a, float __b);
-__device__ float __nv_fdiv_rz(float __a, float __b);
-__device__ int __nv_ffs(int __a);
-__device__ int __nv_ffsll(long long __a);
-__device__ int __nv_finitef(float __a);
-__device__ unsigned short __nv_float2half_rn(float __a);
-__device__ int __nv_float2int_rd(float __a);
-__device__ int __nv_float2int_rn(float __a);
-__device__ int __nv_float2int_ru(float __a);
-__device__ int __nv_float2int_rz(float __a);
-__device__ long long __nv_float2ll_rd(float __a);
-__device__ long long __nv_float2ll_rn(float __a);
-__device__ long long __nv_float2ll_ru(float __a);
-__device__ long long __nv_float2ll_rz(float __a);
-__device__ unsigned int __nv_float2uint_rd(float __a);
-__device__ unsigned int __nv_float2uint_rn(float __a);
-__device__ unsigned int __nv_float2uint_ru(float __a);
-__device__ unsigned int __nv_float2uint_rz(float __a);
-__device__ unsigned long long __nv_float2ull_rd(float __a);
-__device__ unsigned long long __nv_float2ull_rn(float __a);
-__device__ unsigned long long __nv_float2ull_ru(float __a);
-__device__ unsigned long long __nv_float2ull_rz(float __a);
-__device__ int __nv_float_as_int(float __a);
-__device__ unsigned int __nv_float_as_uint(float __a);
-__device__ double __nv_floor(double __a);
-__device__ float __nv_floorf(float __a);
-__device__ double __nv_fma(double __a, double __b, double __c);
-__device__ float __nv_fmaf(float __a, float __b, float __c);
-__device__ float __nv_fmaf_ieee_rd(float __a, float __b, float __c);
-__device__ float __nv_fmaf_ieee_rn(float __a, float __b, float __c);
-__device__ float __nv_fmaf_ieee_ru(float __a, float __b, float __c);
-__device__ float __nv_fmaf_ieee_rz(float __a, float __b, float __c);
-__device__ float __nv_fmaf_rd(float __a, float __b, float __c);
-__device__ float __nv_fmaf_rn(float __a, float __b, float __c);
-__device__ float __nv_fmaf_ru(float __a, float __b, float __c);
-__device__ float __nv_fmaf_rz(float __a, float __b, float __c);
-__device__ double __nv_fma_rd(double __a, double __b, double __c);
-__device__ double __nv_fma_rn(double __a, double __b, double __c);
-__device__ double __nv_fma_ru(double __a, double __b, double __c);
-__device__ double __nv_fma_rz(double __a, double __b, double __c);
-__device__ double __nv_fmax(double __a, double __b);
-__device__ float __nv_fmaxf(float __a, float __b);
-__device__ double __nv_fmin(double __a, double __b);
-__device__ float __nv_fminf(float __a, float __b);
-__device__ double __nv_fmod(double __a, double __b);
-__device__ float __nv_fmodf(float __a, float __b);
-__device__ float __nv_fmul_rd(float __a, float __b);
-__device__ float __nv_fmul_rn(float __a, float __b);
-__device__ float __nv_fmul_ru(float __a, float __b);
-__device__ float __nv_fmul_rz(float __a, float __b);
-__device__ float __nv_frcp_rd(float __a);
-__device__ float __nv_frcp_rn(float __a);
-__device__ float __nv_frcp_ru(float __a);
-__device__ float __nv_frcp_rz(float __a);
-__device__ double __nv_frexp(double __a, int *__b);
-__device__ float __nv_frexpf(float __a, int *__b);
-__device__ float __nv_frsqrt_rn(float __a);
-__device__ float __nv_fsqrt_rd(float __a);
-__device__ float __nv_fsqrt_rn(float __a);
-__device__ float __nv_fsqrt_ru(float __a);
-__device__ float __nv_fsqrt_rz(float __a);
-__device__ float __nv_fsub_rd(float __a, float __b);
-__device__ float __nv_fsub_rn(float __a, float __b);
-__device__ float __nv_fsub_ru(float __a, float __b);
-__device__ float __nv_fsub_rz(float __a, float __b);
-__device__ int __nv_hadd(int __a, int __b);
-__device__ float __nv_half2float(unsigned short __h);
-__device__ double __nv_hiloint2double(int __a, int __b);
-__device__ double __nv_hypot(double __a, double __b);
-__device__ float __nv_hypotf(float __a, float __b);
-__device__ int __nv_ilogb(double __a);
-__device__ int __nv_ilogbf(float __a);
-__device__ double __nv_int2double_rn(int __a);
-__device__ float __nv_int2float_rd(int __a);
-__device__ float __nv_int2float_rn(int __a);
-__device__ float __nv_int2float_ru(int __a);
-__device__ float __nv_int2float_rz(int __a);
-__device__ float __nv_int_as_float(int __a);
-__device__ int __nv_isfinited(double __a);
-__device__ int __nv_isinfd(double __a);
-__device__ int __nv_isinff(float __a);
-__device__ int __nv_isnand(double __a);
-__device__ int __nv_isnanf(float __a);
-__device__ double __nv_j0(double __a);
-__device__ float __nv_j0f(float __a);
-__device__ double __nv_j1(double __a);
-__device__ float __nv_j1f(float __a);
-__device__ float __nv_jnf(int __a, float __b);
-__device__ double __nv_jn(int __a, double __b);
-__device__ double __nv_ldexp(double __a, int __b);
-__device__ float __nv_ldexpf(float __a, int __b);
-__device__ double __nv_lgamma(double __a);
-__device__ float __nv_lgammaf(float __a);
-__device__ double __nv_ll2double_rd(long long __a);
-__device__ double __nv_ll2double_rn(long long __a);
-__device__ double __nv_ll2double_ru(long long __a);
-__device__ double __nv_ll2double_rz(long long __a);
-__device__ float __nv_ll2float_rd(long long __a);
-__device__ float __nv_ll2float_rn(long long __a);
-__device__ float __nv_ll2float_ru(long long __a);
-__device__ float __nv_ll2float_rz(long long __a);
-__device__ long long __nv_llabs(long long __a);
-__device__ long long __nv_llmax(long long __a, long long __b);
-__device__ long long __nv_llmin(long long __a, long long __b);
-__device__ long long __nv_llrint(double __a);
-__device__ long long __nv_llrintf(float __a);
-__device__ long long __nv_llround(double __a);
-__device__ long long __nv_llroundf(float __a);
-__device__ double __nv_log10(double __a);
-__device__ float __nv_log10f(float __a);
-__device__ double __nv_log1p(double __a);
-__device__ float __nv_log1pf(float __a);
-__device__ double __nv_log2(double __a);
-__device__ float __nv_log2f(float __a);
-__device__ double __nv_logb(double __a);
-__device__ float __nv_logbf(float __a);
-__device__ double __nv_log(double __a);
-__device__ float __nv_logf(float __a);
-__device__ double __nv_longlong_as_double(long long __a);
-__device__ int __nv_max(int __a, int __b);
-__device__ int __nv_min(int __a, int __b);
-__device__ double __nv_modf(double __a, double *__b);
-__device__ float __nv_modff(float __a, float *__b);
-__device__ int __nv_mul24(int __a, int __b);
-__device__ long long __nv_mul64hi(long long __a, long long __b);
-__device__ int __nv_mulhi(int __a, int __b);
-__device__ double __nv_nan(const signed char *__a);
-__device__ float __nv_nanf(const signed char *__a);
-__device__ double __nv_nearbyint(double __a);
-__device__ float __nv_nearbyintf(float __a);
-__device__ double __nv_nextafter(double __a, double __b);
-__device__ float __nv_nextafterf(float __a, float __b);
-__device__ double __nv_norm3d(double __a, double __b, double __c);
-__device__ float __nv_norm3df(float __a, float __b, float __c);
-__device__ double __nv_norm4d(double __a, double __b, double __c, double __d);
-__device__ float __nv_norm4df(float __a, float __b, float __c, float __d);
-__device__ double __nv_normcdf(double __a);
-__device__ float __nv_normcdff(float __a);
-__device__ double __nv_normcdfinv(double __a);
-__device__ float __nv_normcdfinvf(float __a);
-__device__ float __nv_normf(int __a, const float *__b);
-__device__ double __nv_norm(int __a, const double *__b);
-__device__ int __nv_popc(int __a);
-__device__ int __nv_popcll(long long __a);
-__device__ double __nv_pow(double __a, double __b);
-__device__ float __nv_powf(float __a, float __b);
-__device__ double __nv_powi(double __a, int __b);
-__device__ float __nv_powif(float __a, int __b);
-__device__ double __nv_rcbrt(double __a);
-__device__ float __nv_rcbrtf(float __a);
-__device__ double __nv_rcp64h(double __a);
-__device__ double __nv_remainder(double __a, double __b);
-__device__ float __nv_remainderf(float __a, float __b);
-__device__ double __nv_remquo(double __a, double __b, int *__c);
-__device__ float __nv_remquof(float __a, float __b, int *__c);
-__device__ int __nv_rhadd(int __a, int __b);
-__device__ double __nv_rhypot(double __a, double __b);
-__device__ float __nv_rhypotf(float __a, float __b);
-__device__ double __nv_rint(double __a);
-__device__ float __nv_rintf(float __a);
-__device__ double __nv_rnorm3d(double __a, double __b, double __c);
-__device__ float __nv_rnorm3df(float __a, float __b, float __c);
-__device__ double __nv_rnorm4d(double __a, double __b, double __c, double __d);
-__device__ float __nv_rnorm4df(float __a, float __b, float __c, float __d);
-__device__ float __nv_rnormf(int __a, const float *__b);
-__device__ double __nv_rnorm(int __a, const double *__b);
-__device__ double __nv_round(double __a);
-__device__ float __nv_roundf(float __a);
-__device__ double __nv_rsqrt(double __a);
-__device__ float __nv_rsqrtf(float __a);
-__device__ int __nv_sad(int __a, int __b, int __c);
-__device__ float __nv_saturatef(float __a);
-__device__ double __nv_scalbn(double __a, int __b);
-__device__ float __nv_scalbnf(float __a, int __b);
-__device__ int __nv_signbitd(double __a);
-__device__ int __nv_signbitf(float __a);
-__device__ void __nv_sincos(double __a, double *__b, double *__c);
-__device__ void __nv_sincosf(float __a, float *__b, float *__c);
-__device__ void __nv_sincospi(double __a, double *__b, double *__c);
-__device__ void __nv_sincospif(float __a, float *__b, float *__c);
-__device__ double __nv_sin(double __a);
-__device__ float __nv_sinf(float __a);
-__device__ double __nv_sinh(double __a);
-__device__ float __nv_sinhf(float __a);
-__device__ double __nv_sinpi(double __a);
-__device__ float __nv_sinpif(float __a);
-__device__ double __nv_sqrt(double __a);
-__device__ float __nv_sqrtf(float __a);
-__device__ double __nv_tan(double __a);
-__device__ float __nv_tanf(float __a);
-__device__ double __nv_tanh(double __a);
-__device__ float __nv_tanhf(float __a);
-__device__ double __nv_tgamma(double __a);
-__device__ float __nv_tgammaf(float __a);
-__device__ double __nv_trunc(double __a);
-__device__ float __nv_truncf(float __a);
-__device__ int __nv_uhadd(unsigned int __a, unsigned int __b);
-__device__ double __nv_uint2double_rn(unsigned int __i);
-__device__ float __nv_uint2float_rd(unsigned int __a);
-__device__ float __nv_uint2float_rn(unsigned int __a);
-__device__ float __nv_uint2float_ru(unsigned int __a);
-__device__ float __nv_uint2float_rz(unsigned int __a);
-__device__ float __nv_uint_as_float(unsigned int __a);
-__device__ double __nv_ull2double_rd(unsigned long long __a);
-__device__ double __nv_ull2double_rn(unsigned long long __a);
-__device__ double __nv_ull2double_ru(unsigned long long __a);
-__device__ double __nv_ull2double_rz(unsigned long long __a);
-__device__ float __nv_ull2float_rd(unsigned long long __a);
-__device__ float __nv_ull2float_rn(unsigned long long __a);
-__device__ float __nv_ull2float_ru(unsigned long long __a);
-__device__ float __nv_ull2float_rz(unsigned long long __a);
-__device__ unsigned long long __nv_ullmax(unsigned long long __a,
+__DEVICE__ int __nv_abs(int __a);
+__DEVICE__ double __nv_acos(double __a);
+__DEVICE__ float __nv_acosf(float __a);
+__DEVICE__ double __nv_acosh(double __a);
+__DEVICE__ float __nv_acoshf(float __a);
+__DEVICE__ double __nv_asin(double __a);
+__DEVICE__ float __nv_asinf(float __a);
+__DEVICE__ double __nv_asinh(double __a);
+__DEVICE__ float __nv_asinhf(float __a);
+__DEVICE__ double __nv_atan2(double __a, double __b);
+__DEVICE__ float __nv_atan2f(float __a, float __b);
+__DEVICE__ double __nv_atan(double __a);
+__DEVICE__ float __nv_atanf(float __a);
+__DEVICE__ double __nv_atanh(double __a);
+__DEVICE__ float __nv_atanhf(float __a);
+__DEVICE__ int __nv_brev(int __a);
+__DEVICE__ long long __nv_brevll(long long __a);
+__DEVICE__ int __nv_byte_perm(int __a, int __b, int __c);
+__DEVICE__ double __nv_cbrt(double __a);
+__DEVICE__ float __nv_cbrtf(float __a);
+__DEVICE__ double __nv_ceil(double __a);
+__DEVICE__ float __nv_ceilf(float __a);
+__DEVICE__ int __nv_clz(int __a);
+__DEVICE__ int __nv_clzll(long long __a);
+__DEVICE__ double __nv_copysign(double __a, double __b);
+__DEVICE__ float __nv_copysignf(float __a, float __b);
+__DEVICE__ double __nv_cos(double __a);
+__DEVICE__ float __nv_cosf(float __a);
+__DEVICE__ double __nv_cosh(double __a);
+__DEVICE__ float __nv_coshf(float __a);
+__DEVICE__ double __nv_cospi(double __a);
+__DEVICE__ float __nv_cospif(float __a);
+__DEVICE__ double __nv_cyl_bessel_i0(double __a);
+__DEVICE__ float __nv_cyl_bessel_i0f(float __a);
+__DEVICE__ double __nv_cyl_bessel_i1(double __a);
+__DEVICE__ float __nv_cyl_bessel_i1f(float __a);
+__DEVICE__ double __nv_dadd_rd(double __a, double __b);
+__DEVICE__ double __nv_dadd_rn(double __a, double __b);
+__DEVICE__ double __nv_dadd_ru(double __a, double __b);
+__DEVICE__ double __nv_dadd_rz(double __a, double __b);
+__DEVICE__ double __nv_ddiv_rd(double __a, double __b);
+__DEVICE__ double __nv_ddiv_rn(double __a, double __b);
+__DEVICE__ double __nv_ddiv_ru(double __a, double __b);
+__DEVICE__ double __nv_ddiv_rz(double __a, double __b);
+__DEVICE__ double __nv_dmul_rd(double __a, double __b);
+__DEVICE__ double __nv_dmul_rn(double __a, double __b);
+__DEVICE__ double __nv_dmul_ru(double __a, double __b);
+__DEVICE__ double __nv_dmul_rz(double __a, double __b);
+__DEVICE__ float __nv_double2float_rd(double __a);
+__DEVICE__ float __nv_double2float_rn(double __a);
+__DEVICE__ float __nv_double2float_ru(double __a);
+__DEVICE__ float __nv_double2float_rz(double __a);
+__DEVICE__ int __nv_double2hiint(double __a);
+__DEVICE__ int __nv_double2int_rd(double __a);
+__DEVICE__ int __nv_double2int_rn(double __a);
+__DEVICE__ int __nv_double2int_ru(double __a);
+__DEVICE__ int __nv_double2int_rz(double __a);
+__DEVICE__ long long __nv_double2ll_rd(double __a);
+__DEVICE__ long long __nv_double2ll_rn(double __a);
+__DEVICE__ long long __nv_double2ll_ru(double __a);
+__DEVICE__ long long __nv_double2ll_rz(double __a);
+__DEVICE__ int __nv_double2loint(double __a);
+__DEVICE__ unsigned int __nv_double2uint_rd(double __a);
+__DEVICE__ unsigned int __nv_double2uint_rn(double __a);
+__DEVICE__ unsigned int __nv_double2uint_ru(double __a);
+__DEVICE__ unsigned int __nv_double2uint_rz(double __a);
+__DEVICE__ unsigned long long __nv_double2ull_rd(double __a);
+__DEVICE__ unsigned long long __nv_double2ull_rn(double __a);
+__DEVICE__ unsigned long long __nv_double2ull_ru(double __a);
+__DEVICE__ unsigned long long __nv_double2ull_rz(double __a);
+__DEVICE__ unsigned long long __nv_double_as_longlong(double __a);
+__DEVICE__ double __nv_drcp_rd(double __a);
+__DEVICE__ double __nv_drcp_rn(double __a);
+__DEVICE__ double __nv_drcp_ru(double __a);
+__DEVICE__ double __nv_drcp_rz(double __a);
+__DEVICE__ double __nv_dsqrt_rd(double __a);
+__DEVICE__ double __nv_dsqrt_rn(double __a);
+__DEVICE__ double __nv_dsqrt_ru(double __a);
+__DEVICE__ double __nv_dsqrt_rz(double __a);
+__DEVICE__ double __nv_dsub_rd(double __a, double __b);
+__DEVICE__ double __nv_dsub_rn(double __a, double __b);
+__DEVICE__ double __nv_dsub_ru(double __a, double __b);
+__DEVICE__ double __nv_dsub_rz(double __a, double __b);
+__DEVICE__ double __nv_erfc(double __a);
+__DEVICE__ float __nv_erfcf(float __a);
+__DEVICE__ double __nv_erfcinv(double __a);
+__DEVICE__ float __nv_erfcinvf(float __a);
+__DEVICE__ double __nv_erfcx(double __a);
+__DEVICE__ float __nv_erfcxf(float __a);
+__DEVICE__ double __nv_erf(double __a);
+__DEVICE__ float __nv_erff(float __a);
+__DEVICE__ double __nv_erfinv(double __a);
+__DEVICE__ float __nv_erfinvf(float __a);
+__DEVICE__ double __nv_exp10(double __a);
+__DEVICE__ float __nv_exp10f(float __a);
+__DEVICE__ double __nv_exp2(double __a);
+__DEVICE__ float __nv_exp2f(float __a);
+__DEVICE__ double __nv_exp(double __a);
+__DEVICE__ float __nv_expf(float __a);
+__DEVICE__ double __nv_expm1(double __a);
+__DEVICE__ float __nv_expm1f(float __a);
+__DEVICE__ double __nv_fabs(double __a);
+__DEVICE__ float __nv_fabsf(float __a);
+__DEVICE__ float __nv_fadd_rd(float __a, float __b);
+__DEVICE__ float __nv_fadd_rn(float __a, float __b);
+__DEVICE__ float __nv_fadd_ru(float __a, float __b);
+__DEVICE__ float __nv_fadd_rz(float __a, float __b);
+__DEVICE__ float __nv_fast_cosf(float __a);
+__DEVICE__ float __nv_fast_exp10f(float __a);
+__DEVICE__ float __nv_fast_expf(float __a);
+__DEVICE__ float __nv_fast_fdividef(float __a, float __b);
+__DEVICE__ float __nv_fast_log10f(float __a);
+__DEVICE__ float __nv_fast_log2f(float __a);
+__DEVICE__ float __nv_fast_logf(float __a);
+__DEVICE__ float __nv_fast_powf(float __a, float __b);
+__DEVICE__ void __nv_fast_sincosf(float __a, float *__s, float *__c);
+__DEVICE__ float __nv_fast_sinf(float __a);
+__DEVICE__ float __nv_fast_tanf(float __a);
+__DEVICE__ double __nv_fdim(double __a, double __b);
+__DEVICE__ float __nv_fdimf(float __a, float __b);
+__DEVICE__ float __nv_fdiv_rd(float __a, float __b);
+__DEVICE__ float __nv_fdiv_rn(float __a, float __b);
+__DEVICE__ float __nv_fdiv_ru(float __a, float __b);
+__DEVICE__ float __nv_fdiv_rz(float __a, float __b);
+__DEVICE__ int __nv_ffs(int __a);
+__DEVICE__ int __nv_ffsll(long long __a);
+__DEVICE__ int __nv_finitef(float __a);
+__DEVICE__ unsigned short __nv_float2half_rn(float __a);
+__DEVICE__ int __nv_float2int_rd(float __a);
+__DEVICE__ int __nv_float2int_rn(float __a);
+__DEVICE__ int __nv_float2int_ru(float __a);
+__DEVICE__ int __nv_float2int_rz(float __a);
+__DEVICE__ long long __nv_float2ll_rd(float __a);
+__DEVICE__ long long __nv_float2ll_rn(float __a);
+__DEVICE__ long long __nv_float2ll_ru(float __a);
+__DEVICE__ long long __nv_float2ll_rz(float __a);
+__DEVICE__ unsigned int __nv_float2uint_rd(float __a);
+__DEVICE__ unsigned int __nv_float2uint_rn(float __a);
+__DEVICE__ unsigned int __nv_float2uint_ru(float __a);
+__DEVICE__ unsigned int __nv_float2uint_rz(float __a);
+__DEVICE__ unsigned long long __nv_float2ull_rd(float __a);
+__DEVICE__ unsigned long long __nv_float2ull_rn(float __a);
+__DEVICE__ unsigned long long __nv_float2ull_ru(float __a);
+__DEVICE__ unsigned long long __nv_float2ull_rz(float __a);
+__DEVICE__ int __nv_float_as_int(float __a);
+__DEVICE__ unsigned int __nv_float_as_uint(float __a);
+__DEVICE__ double __nv_floor(double __a);
+__DEVICE__ float __nv_floorf(float __a);
+__DEVICE__ double __nv_fma(double __a, double __b, double __c);
+__DEVICE__ float __nv_fmaf(float __a, float __b, float __c);
+__DEVICE__ float __nv_fmaf_ieee_rd(float __a, float __b, float __c);
+__DEVICE__ float __nv_fmaf_ieee_rn(float __a, float __b, float __c);
+__DEVICE__ float __nv_fmaf_ieee_ru(float __a, float __b, float __c);
+__DEVICE__ float __nv_fmaf_ieee_rz(float __a, float __b, float __c);
+__DEVICE__ float __nv_fmaf_rd(float __a, float __b, float __c);
+__DEVICE__ float __nv_fmaf_rn(float __a, float __b, float __c);
+__DEVICE__ float __nv_fmaf_ru(float __a, float __b, float __c);
+__DEVICE__ float __nv_fmaf_rz(float __a, float __b, float __c);
+__DEVICE__ double __nv_fma_rd(double __a, double __b, double __c);
+__DEVICE__ double __nv_fma_rn(double __a, double __b, double __c);
+__DEVICE__ double __nv_fma_ru(double __a, double __b, double __c);
+__DEVICE__ double __nv_fma_rz(double __a, double __b, double __c);
+__DEVICE__ double __nv_fmax(double __a, double __b);
+__DEVICE__ float __nv_fmaxf(float __a, float __b);
+__DEVICE__ double __nv_fmin(double __a, double __b);
+__DEVICE__ float __nv_fminf(float __a, float __b);
+__DEVICE__ double __nv_fmod(double __a, double __b);
+__DEVICE__ float __nv_fmodf(float __a, float __b);
+__DEVICE__ float __nv_fmul_rd(float __a, float __b);
+__DEVICE__ float __nv_fmul_rn(float __a, float __b);
+__DEVICE__ float __nv_fmul_ru(float __a, float __b);
+__DEVICE__ float __nv_fmul_rz(float __a, float __b);
+__DEVICE__ float __nv_frcp_rd(float __a);
+__DEVICE__ float __nv_frcp_rn(float __a);
+__DEVICE__ float __nv_frcp_ru(float __a);
+__DEVICE__ float __nv_frcp_rz(float __a);
+__DEVICE__ double __nv_frexp(double __a, int *__b);
+__DEVICE__ float __nv_frexpf(float __a, int *__b);
+__DEVICE__ float __nv_frsqrt_rn(float __a);
+__DEVICE__ float __nv_fsqrt_rd(float __a);
+__DEVICE__ float __nv_fsqrt_rn(float __a);
+__DEVICE__ float __nv_fsqrt_ru(float __a);
+__DEVICE__ float __nv_fsqrt_rz(float __a);
+__DEVICE__ float __nv_fsub_rd(float __a, float __b);
+__DEVICE__ float __nv_fsub_rn(float __a, float __b);
+__DEVICE__ float __nv_fsub_ru(float __a, float __b);
+__DEVICE__ float __nv_fsub_rz(float __a, float __b);
+__DEVICE__ int __nv_hadd(int __a, int __b);
+__DEVICE__ float __nv_half2float(unsigned short __h);
+__DEVICE__ double __nv_hiloint2double(int __a, int __b);
+__DEVICE__ double __nv_hypot(double __a, double __b);
+__DEVICE__ float __nv_hypotf(float __a, float __b);
+__DEVICE__ int __nv_ilogb(double __a);
+__DEVICE__ int __nv_ilogbf(float __a);
+__DEVICE__ double __nv_int2double_rn(int __a);
+__DEVICE__ float __nv_int2float_rd(int __a);
+__DEVICE__ float __nv_int2float_rn(int __a);
+__DEVICE__ float __nv_int2float_ru(int __a);
+__DEVICE__ float __nv_int2float_rz(int __a);
+__DEVICE__ float __nv_int_as_float(int __a);
+__DEVICE__ int __nv_isfinited(double __a);
+__DEVICE__ int __nv_isinfd(double __a);
+__DEVICE__ int __nv_isinff(float __a);
+__DEVICE__ int __nv_isnand(double __a);
+__DEVICE__ int __nv_isnanf(float __a);
+__DEVICE__ double __nv_j0(double __a);
+__DEVICE__ float __nv_j0f(float __a);
+__DEVICE__ double __nv_j1(double __a);
+__DEVICE__ float __nv_j1f(float __a);
+__DEVICE__ float __nv_jnf(int __a, float __b);
+__DEVICE__ double __nv_jn(int __a, double __b);
+__DEVICE__ double __nv_ldexp(double __a, int __b);
+__DEVICE__ float __nv_ldexpf(float __a, int __b);
+__DEVICE__ double __nv_lgamma(double __a);
+__DEVICE__ float __nv_lgammaf(float __a);
+__DEVICE__ double __nv_ll2double_rd(long long __a);
+__DEVICE__ double __nv_ll2double_rn(long long __a);
+__DEVICE__ double __nv_ll2double_ru(long long __a);
+__DEVICE__ double __nv_ll2double_rz(long long __a);
+__DEVICE__ float __nv_ll2float_rd(long long __a);
+__DEVICE__ float __nv_ll2float_rn(long long __a);
+__DEVICE__ float __nv_ll2float_ru(long long __a);
+__DEVICE__ float __nv_ll2float_rz(long long __a);
+__DEVICE__ long long __nv_llabs(long long __a);
+__DEVICE__ long long __nv_llmax(long long __a, long long __b);
+__DEVICE__ long long __nv_llmin(long long __a, long long __b);
+__DEVICE__ long long __nv_llrint(double __a);
+__DEVICE__ long long __nv_llrintf(float __a);
+__DEVICE__ long long __nv_llround(double __a);
+__DEVICE__ long long __nv_llroundf(float __a);
+__DEVICE__ double __nv_log10(double __a);
+__DEVICE__ float __nv_log10f(float __a);
+__DEVICE__ double __nv_log1p(double __a);
+__DEVICE__ float __nv_log1pf(float __a);
+__DEVICE__ double __nv_log2(double __a);
+__DEVICE__ float __nv_log2f(float __a);
+__DEVICE__ double __nv_logb(double __a);
+__DEVICE__ float __nv_logbf(float __a);
+__DEVICE__ double __nv_log(double __a);
+__DEVICE__ float __nv_logf(float __a);
+__DEVICE__ double __nv_longlong_as_double(long long __a);
+__DEVICE__ int __nv_max(int __a, int __b);
+__DEVICE__ int __nv_min(int __a, int __b);
+__DEVICE__ double __nv_modf(double __a, double *__b);
+__DEVICE__ float __nv_modff(float __a, float *__b);
+__DEVICE__ int __nv_mul24(int __a, int __b);
+__DEVICE__ long long __nv_mul64hi(long long __a, long long __b);
+__DEVICE__ int __nv_mulhi(int __a, int __b);
+__DEVICE__ double __nv_nan(const signed char *__a);
+__DEVICE__ float __nv_nanf(const signed char *__a);
+__DEVICE__ double __nv_nearbyint(double __a);
+__DEVICE__ float __nv_nearbyintf(float __a);
+__DEVICE__ double __nv_nextafter(double __a, double __b);
+__DEVICE__ float __nv_nextafterf(float __a, float __b);
+__DEVICE__ double __nv_norm3d(double __a, double __b, double __c);
+__DEVICE__ float __nv_norm3df(float __a, float __b, float __c);
+__DEVICE__ double __nv_norm4d(double __a, double __b, double __c, double __d);
+__DEVICE__ float __nv_norm4df(float __a, float __b, float __c, float __d);
+__DEVICE__ double __nv_normcdf(double __a);
+__DEVICE__ float __nv_normcdff(float __a);
+__DEVICE__ double __nv_normcdfinv(double __a);
+__DEVICE__ float __nv_normcdfinvf(float __a);
+__DEVICE__ float __nv_normf(int __a, const float *__b);
+__DEVICE__ double __nv_norm(int __a, const double *__b);
+__DEVICE__ int __nv_popc(int __a);
+__DEVICE__ int __nv_popcll(long long __a);
+__DEVICE__ double __nv_pow(double __a, double __b);
+__DEVICE__ float __nv_powf(float __a, float __b);
+__DEVICE__ double __nv_powi(double __a, int __b);
+__DEVICE__ float __nv_powif(float __a, int __b);
+__DEVICE__ double __nv_rcbrt(double __a);
+__DEVICE__ float __nv_rcbrtf(float __a);
+__DEVICE__ double __nv_rcp64h(double __a);
+__DEVICE__ double __nv_remainder(double __a, double __b);
+__DEVICE__ float __nv_remainderf(float __a, float __b);
+__DEVICE__ double __nv_remquo(double __a, double __b, int *__c);
+__DEVICE__ float __nv_remquof(float __a, float __b, int *__c);
+__DEVICE__ int __nv_rhadd(int __a, int __b);
+__DEVICE__ double __nv_rhypot(double __a, double __b);
+__DEVICE__ float __nv_rhypotf(float __a, float __b);
+__DEVICE__ double __nv_rint(double __a);
+__DEVICE__ float __nv_rintf(float __a);
+__DEVICE__ double __nv_rnorm3d(double __a, double __b, double __c);
+__DEVICE__ float __nv_rnorm3df(float __a, float __b, float __c);
+__DEVICE__ double __nv_rnorm4d(double __a, double __b, double __c, double __d);
+__DEVICE__ float __nv_rnorm4df(float __a, float __b, float __c, float __d);
+__DEVICE__ float __nv_rnormf(int __a, const float *__b);
+__DEVICE__ double __nv_rnorm(int __a, const double *__b);
+__DEVICE__ double __nv_round(double __a);
+__DEVICE__ float __nv_roundf(float __a);
+__DEVICE__ double __nv_rsqrt(double __a);
+__DEVICE__ float __nv_rsqrtf(float __a);
+__DEVICE__ int __nv_sad(int __a, int __b, int __c);
+__DEVICE__ float __nv_saturatef(float __a);
+__DEVICE__ double __nv_scalbn(double __a, int __b);
+__DEVICE__ float __nv_scalbnf(float __a, int __b);
+__DEVICE__ int __nv_signbitd(double __a);
+__DEVICE__ int __nv_signbitf(float __a);
+__DEVICE__ void __nv_sincos(double __a, double *__b, double *__c);
+__DEVICE__ void __nv_sincosf(float __a, float *__b, float *__c);
+__DEVICE__ void __nv_sincospi(double __a, double *__b, double *__c);
+__DEVICE__ void __nv_sincospif(float __a, float *__b, float *__c);
+__DEVICE__ double __nv_sin(double __a);
+__DEVICE__ float __nv_sinf(float __a);
+__DEVICE__ double __nv_sinh(double __a);
+__DEVICE__ float __nv_sinhf(float __a);
+__DEVICE__ double __nv_sinpi(double __a);
+__DEVICE__ float __nv_sinpif(float __a);
+__DEVICE__ double __nv_sqrt(double __a);
+__DEVICE__ float __nv_sqrtf(float __a);
+__DEVICE__ double __nv_tan(double __a);
+__DEVICE__ float __nv_tanf(float __a);
+__DEVICE__ double __nv_tanh(double __a);
+__DEVICE__ float __nv_tanhf(float __a);
+__DEVICE__ double __nv_tgamma(double __a);
+__DEVICE__ float __nv_tgammaf(float __a);
+__DEVICE__ double __nv_trunc(double __a);
+__DEVICE__ float __nv_truncf(float __a);
+__DEVICE__ int __nv_uhadd(unsigned int __a, unsigned int __b);
+__DEVICE__ double __nv_uint2double_rn(unsigned int __i);
+__DEVICE__ float __nv_uint2float_rd(unsigned int __a);
+__DEVICE__ float __nv_uint2float_rn(unsigned int __a);
+__DEVICE__ float __nv_uint2float_ru(unsigned int __a);
+__DEVICE__ float __nv_uint2float_rz(unsigned int __a);
+__DEVICE__ float __nv_uint_as_float(unsigned int __a);
+__DEVICE__ double __nv_ull2double_rd(unsigned long long __a);
+__DEVICE__ double __nv_ull2double_rn(unsigned long long __a);
+__DEVICE__ double __nv_ull2double_ru(unsigned long long __a);
+__DEVICE__ double __nv_ull2double_rz(unsigned long long __a);
+__DEVICE__ float __nv_ull2float_rd(unsigned long long __a);
+__DEVICE__ float __nv_ull2float_rn(unsigned long long __a);
+__DEVICE__ float __nv_ull2float_ru(unsigned long long __a);
+__DEVICE__ float __nv_ull2float_rz(unsigned long long __a);
+__DEVICE__ unsigned long long __nv_ullmax(unsigned long long __a,
unsigned long long __b);
-__device__ unsigned long long __nv_ullmin(unsigned long long __a,
+__DEVICE__ unsigned long long __nv_ullmin(unsigned long long __a,
unsigned long long __b);
-__device__ unsigned int __nv_umax(unsigned int __a, unsigned int __b);
-__device__ unsigned int __nv_umin(unsigned int __a, unsigned int __b);
-__device__ unsigned int __nv_umul24(unsigned int __a, unsigned int __b);
-__device__ unsigned long long __nv_umul64hi(unsigned long long __a,
+__DEVICE__ unsigned int __nv_umax(unsigned int __a, unsigned int __b);
+__DEVICE__ unsigned int __nv_umin(unsigned int __a, unsigned int __b);
+__DEVICE__ unsigned int __nv_umul24(unsigned int __a, unsigned int __b);
+__DEVICE__ unsigned long long __nv_umul64hi(unsigned long long __a,
unsigned long long __b);
-__device__ unsigned int __nv_umulhi(unsigned int __a, unsigned int __b);
-__device__ unsigned int __nv_urhadd(unsigned int __a, unsigned int __b);
-__device__ unsigned int __nv_usad(unsigned int __a, unsigned int __b,
+__DEVICE__ unsigned int __nv_umulhi(unsigned int __a, unsigned int __b);
+__DEVICE__ unsigned int __nv_urhadd(unsigned int __a, unsigned int __b);
+__DEVICE__ unsigned int __nv_usad(unsigned int __a, unsigned int __b,
unsigned int __c);
#if CUDA_VERSION >= 9000 && CUDA_VERSION < 9020
-__device__ int __nv_vabs2(int __a);
-__device__ int __nv_vabs4(int __a);
-__device__ int __nv_vabsdiffs2(int __a, int __b);
-__device__ int __nv_vabsdiffs4(int __a, int __b);
-__device__ int __nv_vabsdiffu2(int __a, int __b);
-__device__ int __nv_vabsdiffu4(int __a, int __b);
-__device__ int __nv_vabsss2(int __a);
-__device__ int __nv_vabsss4(int __a);
-__device__ int __nv_vadd2(int __a, int __b);
-__device__ int __nv_vadd4(int __a, int __b);
-__device__ int __nv_vaddss2(int __a, int __b);
-__device__ int __nv_vaddss4(int __a, int __b);
-__device__ int __nv_vaddus2(int __a, int __b);
-__device__ int __nv_vaddus4(int __a, int __b);
-__device__ int __nv_vavgs2(int __a, int __b);
-__device__ int __nv_vavgs4(int __a, int __b);
-__device__ int __nv_vavgu2(int __a, int __b);
-__device__ int __nv_vavgu4(int __a, int __b);
-__device__ int __nv_vcmpeq2(int __a, int __b);
-__device__ int __nv_vcmpeq4(int __a, int __b);
-__device__ int __nv_vcmpges2(int __a, int __b);
-__device__ int __nv_vcmpges4(int __a, int __b);
-__device__ int __nv_vcmpgeu2(int __a, int __b);
-__device__ int __nv_vcmpgeu4(int __a, int __b);
-__device__ int __nv_vcmpgts2(int __a, int __b);
-__device__ int __nv_vcmpgts4(int __a, int __b);
-__device__ int __nv_vcmpgtu2(int __a, int __b);
-__device__ int __nv_vcmpgtu4(int __a, int __b);
-__device__ int __nv_vcmples2(int __a, int __b);
-__device__ int __nv_vcmples4(int __a, int __b);
-__device__ int __nv_vcmpleu2(int __a, int __b);
-__device__ int __nv_vcmpleu4(int __a, int __b);
-__device__ int __nv_vcmplts2(int __a, int __b);
-__device__ int __nv_vcmplts4(int __a, int __b);
-__device__ int __nv_vcmpltu2(int __a, int __b);
-__device__ int __nv_vcmpltu4(int __a, int __b);
-__device__ int __nv_vcmpne2(int __a, int __b);
-__device__ int __nv_vcmpne4(int __a, int __b);
-__device__ int __nv_vhaddu2(int __a, int __b);
-__device__ int __nv_vhaddu4(int __a, int __b);
-__device__ int __nv_vmaxs2(int __a, int __b);
-__device__ int __nv_vmaxs4(int __a, int __b);
-__device__ int __nv_vmaxu2(int __a, int __b);
-__device__ int __nv_vmaxu4(int __a, int __b);
-__device__ int __nv_vmins2(int __a, int __b);
-__device__ int __nv_vmins4(int __a, int __b);
-__device__ int __nv_vminu2(int __a, int __b);
-__device__ int __nv_vminu4(int __a, int __b);
-__device__ int __nv_vneg2(int __a);
-__device__ int __nv_vneg4(int __a);
-__device__ int __nv_vnegss2(int __a);
-__device__ int __nv_vnegss4(int __a);
-__device__ int __nv_vsads2(int __a, int __b);
-__device__ int __nv_vsads4(int __a, int __b);
-__device__ int __nv_vsadu2(int __a, int __b);
-__device__ int __nv_vsadu4(int __a, int __b);
-__device__ int __nv_vseteq2(int __a, int __b);
-__device__ int __nv_vseteq4(int __a, int __b);
-__device__ int __nv_vsetges2(int __a, int __b);
-__device__ int __nv_vsetges4(int __a, int __b);
-__device__ int __nv_vsetgeu2(int __a, int __b);
-__device__ int __nv_vsetgeu4(int __a, int __b);
-__device__ int __nv_vsetgts2(int __a, int __b);
-__device__ int __nv_vsetgts4(int __a, int __b);
-__device__ int __nv_vsetgtu2(int __a, int __b);
-__device__ int __nv_vsetgtu4(int __a, int __b);
-__device__ int __nv_vsetles2(int __a, int __b);
-__device__ int __nv_vsetles4(int __a, int __b);
-__device__ int __nv_vsetleu2(int __a, int __b);
-__device__ int __nv_vsetleu4(int __a, int __b);
-__device__ int __nv_vsetlts2(int __a, int __b);
-__device__ int __nv_vsetlts4(int __a, int __b);
-__device__ int __nv_vsetltu2(int __a, int __b);
-__device__ int __nv_vsetltu4(int __a, int __b);
-__device__ int __nv_vsetne2(int __a, int __b);
-__device__ int __nv_vsetne4(int __a, int __b);
-__device__ int __nv_vsub2(int __a, int __b);
-__device__ int __nv_vsub4(int __a, int __b);
-__device__ int __nv_vsubss2(int __a, int __b);
-__device__ int __nv_vsubss4(int __a, int __b);
-__device__ int __nv_vsubus2(int __a, int __b);
-__device__ int __nv_vsubus4(int __a, int __b);
+__DEVICE__ int __nv_vabs2(int __a);
+__DEVICE__ int __nv_vabs4(int __a);
+__DEVICE__ int __nv_vabsdiffs2(int __a, int __b);
+__DEVICE__ int __nv_vabsdiffs4(int __a, int __b);
+__DEVICE__ int __nv_vabsdiffu2(int __a, int __b);
+__DEVICE__ int __nv_vabsdiffu4(int __a, int __b);
+__DEVICE__ int __nv_vabsss2(int __a);
+__DEVICE__ int __nv_vabsss4(int __a);
+__DEVICE__ int __nv_vadd2(int __a, int __b);
+__DEVICE__ int __nv_vadd4(int __a, int __b);
+__DEVICE__ int __nv_vaddss2(int __a, int __b);
+__DEVICE__ int __nv_vaddss4(int __a, int __b);
+__DEVICE__ int __nv_vaddus2(int __a, int __b);
+__DEVICE__ int __nv_vaddus4(int __a, int __b);
+__DEVICE__ int __nv_vavgs2(int __a, int __b);
+__DEVICE__ int __nv_vavgs4(int __a, int __b);
+__DEVICE__ int __nv_vavgu2(int __a, int __b);
+__DEVICE__ int __nv_vavgu4(int __a, int __b);
+__DEVICE__ int __nv_vcmpeq2(int __a, int __b);
+__DEVICE__ int __nv_vcmpeq4(int __a, int __b);
+__DEVICE__ int __nv_vcmpges2(int __a, int __b);
+__DEVICE__ int __nv_vcmpges4(int __a, int __b);
+__DEVICE__ int __nv_vcmpgeu2(int __a, int __b);
+__DEVICE__ int __nv_vcmpgeu4(int __a, int __b);
+__DEVICE__ int __nv_vcmpgts2(int __a, int __b);
+__DEVICE__ int __nv_vcmpgts4(int __a, int __b);
+__DEVICE__ int __nv_vcmpgtu2(int __a, int __b);
+__DEVICE__ int __nv_vcmpgtu4(int __a, int __b);
+__DEVICE__ int __nv_vcmples2(int __a, int __b);
+__DEVICE__ int __nv_vcmples4(int __a, int __b);
+__DEVICE__ int __nv_vcmpleu2(int __a, int __b);
+__DEVICE__ int __nv_vcmpleu4(int __a, int __b);
+__DEVICE__ int __nv_vcmplts2(int __a, int __b);
+__DEVICE__ int __nv_vcmplts4(int __a, int __b);
+__DEVICE__ int __nv_vcmpltu2(int __a, int __b);
+__DEVICE__ int __nv_vcmpltu4(int __a, int __b);
+__DEVICE__ int __nv_vcmpne2(int __a, int __b);
+__DEVICE__ int __nv_vcmpne4(int __a, int __b);
+__DEVICE__ int __nv_vhaddu2(int __a, int __b);
+__DEVICE__ int __nv_vhaddu4(int __a, int __b);
+__DEVICE__ int __nv_vmaxs2(int __a, int __b);
+__DEVICE__ int __nv_vmaxs4(int __a, int __b);
+__DEVICE__ int __nv_vmaxu2(int __a, int __b);
+__DEVICE__ int __nv_vmaxu4(int __a, int __b);
+__DEVICE__ int __nv_vmins2(int __a, int __b);
+__DEVICE__ int __nv_vmins4(int __a, int __b);
+__DEVICE__ int __nv_vminu2(int __a, int __b);
+__DEVICE__ int __nv_vminu4(int __a, int __b);
+__DEVICE__ int __nv_vneg2(int __a);
+__DEVICE__ int __nv_vneg4(int __a);
+__DEVICE__ int __nv_vnegss2(int __a);
+__DEVICE__ int __nv_vnegss4(int __a);
+__DEVICE__ int __nv_vsads2(int __a, int __b);
+__DEVICE__ int __nv_vsads4(int __a, int __b);
+__DEVICE__ int __nv_vsadu2(int __a, int __b);
+__DEVICE__ int __nv_vsadu4(int __a, int __b);
+__DEVICE__ int __nv_vseteq2(int __a, int __b);
+__DEVICE__ int __nv_vseteq4(int __a, int __b);
+__DEVICE__ int __nv_vsetges2(int __a, int __b);
+__DEVICE__ int __nv_vsetges4(int __a, int __b);
+__DEVICE__ int __nv_vsetgeu2(int __a, int __b);
+__DEVICE__ int __nv_vsetgeu4(int __a, int __b);
+__DEVICE__ int __nv_vsetgts2(int __a, int __b);
+__DEVICE__ int __nv_vsetgts4(int __a, int __b);
+__DEVICE__ int __nv_vsetgtu2(int __a, int __b);
+__DEVICE__ int __nv_vsetgtu4(int __a, int __b);
+__DEVICE__ int __nv_vsetles2(int __a, int __b);
+__DEVICE__ int __nv_vsetles4(int __a, int __b);
+__DEVICE__ int __nv_vsetleu2(int __a, int __b);
+__DEVICE__ int __nv_vsetleu4(int __a, int __b);
+__DEVICE__ int __nv_vsetlts2(int __a, int __b);
+__DEVICE__ int __nv_vsetlts4(int __a, int __b);
+__DEVICE__ int __nv_vsetltu2(int __a, int __b);
+__DEVICE__ int __nv_vsetltu4(int __a, int __b);
+__DEVICE__ int __nv_vsetne2(int __a, int __b);
+__DEVICE__ int __nv_vsetne4(int __a, int __b);
+__DEVICE__ int __nv_vsub2(int __a, int __b);
+__DEVICE__ int __nv_vsub4(int __a, int __b);
+__DEVICE__ int __nv_vsubss2(int __a, int __b);
+__DEVICE__ int __nv_vsubss4(int __a, int __b);
+__DEVICE__ int __nv_vsubus2(int __a, int __b);
+__DEVICE__ int __nv_vsubus4(int __a, int __b);
#endif // CUDA_VERSION
-__device__ double __nv_y0(double __a);
-__device__ float __nv_y0f(float __a);
-__device__ double __nv_y1(double __a);
-__device__ float __nv_y1f(float __a);
-__device__ float __nv_ynf(int __a, float __b);
-__device__ double __nv_yn(int __a, double __b);
+__DEVICE__ double __nv_y0(double __a);
+__DEVICE__ float __nv_y0f(float __a);
+__DEVICE__ double __nv_y1(double __a);
+__DEVICE__ float __nv_y1f(float __a);
+__DEVICE__ float __nv_ynf(int __a, float __b);
+__DEVICE__ double __nv_yn(int __a, double __b);
+#if defined(__cplusplus)
} // extern "C"
+#endif
#endif // __CLANG_CUDA_LIBDEVICE_DECLARES_H__
diff --git a/lib/Headers/__clang_cuda_math_forward_declares.h b/lib/Headers/__clang_cuda_math_forward_declares.h
index c31b1f4cda35..0afe4db556db 100644
--- a/lib/Headers/__clang_cuda_math_forward_declares.h
+++ b/lib/Headers/__clang_cuda_math_forward_declares.h
@@ -1,22 +1,8 @@
/*===- __clang_math_forward_declares.h - Prototypes of __device__ math fns --===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
@@ -34,14 +20,37 @@
// would preclude the use of our own __device__ overloads for these functions.
#pragma push_macro("__DEVICE__")
+#ifdef _OPENMP
+#define __DEVICE__ static __inline__ __attribute__((always_inline))
+#else
#define __DEVICE__ \
static __inline__ __attribute__((always_inline)) __attribute__((device))
+#endif
-__DEVICE__ double abs(double);
-__DEVICE__ float abs(float);
-__DEVICE__ int abs(int);
+// For C++ 17 we need to include noexcept attribute to be compatible
+// with the header-defined version. This may be removed once
+// variant is supported.
+#if defined(_OPENMP) && defined(__cplusplus) && __cplusplus >= 201703L
+#define __NOEXCEPT noexcept
+#else
+#define __NOEXCEPT
+#endif
+
+#if !(defined(_OPENMP) && defined(__cplusplus))
__DEVICE__ long abs(long);
__DEVICE__ long long abs(long long);
+__DEVICE__ double abs(double);
+__DEVICE__ float abs(float);
+#endif
+// While providing the CUDA declarations and definitions for math functions,
+// we may manually define additional functions.
+// TODO: Once variant is supported the additional functions will have
+// to be removed.
+#if defined(_OPENMP) && defined(__cplusplus)
+__DEVICE__ const double abs(const double);
+__DEVICE__ const float abs(const float);
+#endif
+__DEVICE__ int abs(int) __NOEXCEPT;
__DEVICE__ double acos(double);
__DEVICE__ float acos(float);
__DEVICE__ double acosh(double);
@@ -76,8 +85,8 @@ __DEVICE__ double exp(double);
__DEVICE__ float exp(float);
__DEVICE__ double expm1(double);
__DEVICE__ float expm1(float);
-__DEVICE__ double fabs(double);
-__DEVICE__ float fabs(float);
+__DEVICE__ double fabs(double) __NOEXCEPT;
+__DEVICE__ float fabs(float) __NOEXCEPT;
__DEVICE__ double fdim(double, double);
__DEVICE__ float fdim(float, float);
__DEVICE__ double floor(double);
@@ -98,12 +107,18 @@ __DEVICE__ double hypot(double, double);
__DEVICE__ float hypot(float, float);
__DEVICE__ int ilogb(double);
__DEVICE__ int ilogb(float);
+#ifdef _MSC_VER
+__DEVICE__ bool isfinite(long double);
+#endif
__DEVICE__ bool isfinite(double);
__DEVICE__ bool isfinite(float);
__DEVICE__ bool isgreater(double, double);
__DEVICE__ bool isgreaterequal(double, double);
__DEVICE__ bool isgreaterequal(float, float);
__DEVICE__ bool isgreater(float, float);
+#ifdef _MSC_VER
+__DEVICE__ bool isinf(long double);
+#endif
__DEVICE__ bool isinf(double);
__DEVICE__ bool isinf(float);
__DEVICE__ bool isless(double, double);
@@ -112,18 +127,21 @@ __DEVICE__ bool islessequal(float, float);
__DEVICE__ bool isless(float, float);
__DEVICE__ bool islessgreater(double, double);
__DEVICE__ bool islessgreater(float, float);
+#ifdef _MSC_VER
+__DEVICE__ bool isnan(long double);
+#endif
__DEVICE__ bool isnan(double);
__DEVICE__ bool isnan(float);
__DEVICE__ bool isnormal(double);
__DEVICE__ bool isnormal(float);
__DEVICE__ bool isunordered(double, double);
__DEVICE__ bool isunordered(float, float);
-__DEVICE__ long labs(long);
+__DEVICE__ long labs(long) __NOEXCEPT;
__DEVICE__ double ldexp(double, int);
__DEVICE__ float ldexp(float, int);
__DEVICE__ double lgamma(double);
__DEVICE__ float lgamma(float);
-__DEVICE__ long long llabs(long long);
+__DEVICE__ long long llabs(long long) __NOEXCEPT;
__DEVICE__ long long llrint(double);
__DEVICE__ long long llrint(float);
__DEVICE__ double log10(double);
@@ -134,6 +152,9 @@ __DEVICE__ double log2(double);
__DEVICE__ float log2(float);
__DEVICE__ double logb(double);
__DEVICE__ float logb(float);
+#if defined(_OPENMP) && defined(__cplusplus)
+__DEVICE__ long double log(long double);
+#endif
__DEVICE__ double log(double);
__DEVICE__ float log(float);
__DEVICE__ long lrint(double);
@@ -281,6 +302,7 @@ _GLIBCXX_END_NAMESPACE_VERSION
} // namespace std
#endif
+#undef __NOEXCEPT
#pragma pop_macro("__DEVICE__")
#endif
diff --git a/lib/Headers/__clang_cuda_runtime_wrapper.h b/lib/Headers/__clang_cuda_runtime_wrapper.h
index f05c0454a883..3e362dd967db 100644
--- a/lib/Headers/__clang_cuda_runtime_wrapper.h
+++ b/lib/Headers/__clang_cuda_runtime_wrapper.h
@@ -1,22 +1,8 @@
/*===---- __clang_cuda_runtime_wrapper.h - CUDA runtime support -------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
@@ -62,7 +48,7 @@
#include "cuda.h"
#if !defined(CUDA_VERSION)
#error "cuda.h did not define CUDA_VERSION"
-#elif CUDA_VERSION < 7000 || CUDA_VERSION > 10000
+#elif CUDA_VERSION < 7000 || CUDA_VERSION > 10010
#error "Unsupported CUDA version!"
#endif
@@ -426,5 +412,15 @@ __device__ inline __cuda_builtin_gridDim_t::operator dim3() const {
#pragma pop_macro("__USE_FAST_MATH__")
#pragma pop_macro("__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__")
+// CUDA runtime uses this undocumented function to access kernel launch
+// configuration. The declaration is in crt/device_functions.h but that file
+// includes a lot of other stuff we don't want. Instead, we'll provide our own
+// declaration for it here.
+#if CUDA_VERSION >= 9020
+extern "C" unsigned __cudaPushCallConfiguration(dim3 gridDim, dim3 blockDim,
+ size_t sharedMem = 0,
+ void *stream = 0);
+#endif
+
#endif // __CUDA__
#endif // __CLANG_CUDA_RUNTIME_WRAPPER_H__
diff --git a/lib/Headers/__stddef_max_align_t.h b/lib/Headers/__stddef_max_align_t.h
index 1e10ca9865c0..e3b439285d0f 100644
--- a/lib/Headers/__stddef_max_align_t.h
+++ b/lib/Headers/__stddef_max_align_t.h
@@ -1,24 +1,8 @@
/*===---- __stddef_max_align_t.h - Definition of max_align_t for modules ---===
*
- * Copyright (c) 2014 Chandler Carruth
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/__wmmintrin_aes.h b/lib/Headers/__wmmintrin_aes.h
index 70c355efc48c..f540319c7fd2 100644
--- a/lib/Headers/__wmmintrin_aes.h
+++ b/lib/Headers/__wmmintrin_aes.h
@@ -1,22 +1,8 @@
/*===---- __wmmintrin_aes.h - AES intrinsics -------------------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/__wmmintrin_pclmul.h b/lib/Headers/__wmmintrin_pclmul.h
index e0f928796ac1..fef4b93dbb43 100644
--- a/lib/Headers/__wmmintrin_pclmul.h
+++ b/lib/Headers/__wmmintrin_pclmul.h
@@ -1,22 +1,8 @@
/*===---- __wmmintrin_pclmul.h - PCMUL intrinsics ---------------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/adxintrin.h b/lib/Headers/adxintrin.h
index d6c454db8512..72b9ed08f40c 100644
--- a/lib/Headers/adxintrin.h
+++ b/lib/Headers/adxintrin.h
@@ -1,22 +1,8 @@
/*===---- adxintrin.h - ADX intrinsics -------------------------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/altivec.h b/lib/Headers/altivec.h
index 2dc6adb9009f..4008440b2bc5 100644
--- a/lib/Headers/altivec.h
+++ b/lib/Headers/altivec.h
@@ -1,22 +1,8 @@
/*===---- altivec.h - Standard header for type generic math ---------------===*\
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
\*===----------------------------------------------------------------------===*/
diff --git a/lib/Headers/ammintrin.h b/lib/Headers/ammintrin.h
index 680b4465eaae..3806be6ebc43 100644
--- a/lib/Headers/ammintrin.h
+++ b/lib/Headers/ammintrin.h
@@ -1,22 +1,8 @@
/*===---- ammintrin.h - SSE4a intrinsics -----------------------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/arm64intr.h b/lib/Headers/arm64intr.h
index be5228361895..4943b2db69d0 100644
--- a/lib/Headers/arm64intr.h
+++ b/lib/Headers/arm64intr.h
@@ -1,22 +1,8 @@
/*===---- arm64intr.h - ARM64 Windows intrinsics -------------------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/arm_acle.h b/lib/Headers/arm_acle.h
index ab2589798269..096cc261af2c 100644
--- a/lib/Headers/arm_acle.h
+++ b/lib/Headers/arm_acle.h
@@ -1,22 +1,8 @@
/*===---- arm_acle.h - ARM Non-Neon intrinsics -----------------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
@@ -611,6 +597,14 @@ __crc32cd(uint32_t __a, uint64_t __b) {
}
#endif
+/* Armv8.3-A Javascript conversion intrinsic */
+#if __ARM_64BIT_STATE && defined(__ARM_FEATURE_JCVT)
+static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
+__jcvt(double __a) {
+ return __builtin_arm_jcvt(__a);
+}
+#endif
+
/* 10.1 Special register intrinsics */
#define __arm_rsr(sysreg) __builtin_arm_rsr(sysreg)
#define __arm_rsr64(sysreg) __builtin_arm_rsr64(sysreg)
@@ -619,6 +613,16 @@ __crc32cd(uint32_t __a, uint64_t __b) {
#define __arm_wsr64(sysreg, v) __builtin_arm_wsr64(sysreg, v)
#define __arm_wsrp(sysreg, v) __builtin_arm_wsrp(sysreg, v)
+// Memory Tagging Extensions (MTE) Intrinsics
+#if __ARM_FEATURE_MEMORY_TAGGING
+#define __arm_mte_create_random_tag(__ptr, __mask) __builtin_arm_irg(__ptr, __mask)
+#define __arm_mte_increment_tag(__ptr, __tag_offset) __builtin_arm_addg(__ptr, __tag_offset)
+#define __arm_mte_exclude_tag(__ptr, __excluded) __builtin_arm_gmi(__ptr, __excluded)
+#define __arm_mte_get_tag(__ptr) __builtin_arm_ldg(__ptr)
+#define __arm_mte_set_tag(__ptr) __builtin_arm_stg(__ptr)
+#define __arm_mte_ptrdiff(__ptra, __ptrb) __builtin_arm_subp(__ptra, __ptrb)
+#endif
+
#if defined(__cplusplus)
}
#endif
diff --git a/lib/Headers/armintr.h b/lib/Headers/armintr.h
index 933afcbb91b6..300ed4ee470d 100644
--- a/lib/Headers/armintr.h
+++ b/lib/Headers/armintr.h
@@ -1,22 +1,8 @@
/*===---- armintr.h - ARM Windows intrinsics -------------------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/avx2intrin.h b/lib/Headers/avx2intrin.h
index 9688a96fde18..162e83ea2fbc 100644
--- a/lib/Headers/avx2intrin.h
+++ b/lib/Headers/avx2intrin.h
@@ -1,22 +1,8 @@
/*===---- avx2intrin.h - AVX2 intrinsics -----------------------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
@@ -146,21 +132,13 @@ _mm256_andnot_si256(__m256i __a, __m256i __b)
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_avg_epu8(__m256i __a, __m256i __b)
{
- typedef unsigned short __v32hu __attribute__((__vector_size__(64)));
- return (__m256i)__builtin_convertvector(
- ((__builtin_convertvector((__v32qu)__a, __v32hu) +
- __builtin_convertvector((__v32qu)__b, __v32hu)) + 1)
- >> 1, __v32qu);
+ return (__m256i)__builtin_ia32_pavgb256((__v32qi)__a, (__v32qi)__b);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_avg_epu16(__m256i __a, __m256i __b)
{
- typedef unsigned int __v16su __attribute__((__vector_size__(64)));
- return (__m256i)__builtin_convertvector(
- ((__builtin_convertvector((__v16hu)__a, __v16su) +
- __builtin_convertvector((__v16hu)__b, __v16su)) + 1)
- >> 1, __v16hu);
+ return (__m256i)__builtin_ia32_pavgw256((__v16hi)__a, (__v16hi)__b);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
diff --git a/lib/Headers/avx512bf16intrin.h b/lib/Headers/avx512bf16intrin.h
new file mode 100644
index 000000000000..d1d87e72f147
--- /dev/null
+++ b/lib/Headers/avx512bf16intrin.h
@@ -0,0 +1,279 @@
+/*===------------ avx512bf16intrin.h - AVX512_BF16 intrinsics --------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __IMMINTRIN_H
+#error "Never use <avx512bf16intrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __AVX512BF16INTRIN_H
+#define __AVX512BF16INTRIN_H
+
+typedef short __m512bh __attribute__((__vector_size__(64), __aligned__(64)));
+typedef short __m256bh __attribute__((__vector_size__(32), __aligned__(32)));
+typedef unsigned short __bfloat16;
+
+#define __DEFAULT_FN_ATTRS512 \
+ __attribute__((__always_inline__, __nodebug__, __target__("avx512bf16"), \
+ __min_vector_width__(512)))
+#define __DEFAULT_FN_ATTRS \
+ __attribute__((__always_inline__, __nodebug__, __target__("avx512bf16")))
+
+/// Convert One BF16 Data to One Single Float Data.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic does not correspond to a specific instruction.
+///
+/// \param __A
+/// A bfloat data.
+/// \returns A float data whose sign field and exponent field keep unchanged,
+/// and fraction field is extended to 23 bits.
+static __inline__ float __DEFAULT_FN_ATTRS _mm_cvtsbh_ss(__bfloat16 __A) {
+ return __builtin_ia32_cvtsbf162ss_32(__A);
+}
+
+/// Convert Two Packed Single Data to One Packed BF16 Data.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCVTNE2PS2BF16 </c> instructions.
+///
+/// \param __A
+/// A 512-bit vector of [16 x float].
+/// \param __B
+/// A 512-bit vector of [16 x float].
+/// \returns A 512-bit vector of [32 x bfloat] whose lower 256 bits come from
+/// conversion of __B, and higher 256 bits come from conversion of __A.
+static __inline__ __m512bh __DEFAULT_FN_ATTRS512
+_mm512_cvtne2ps_pbh(__m512 __A, __m512 __B) {
+ return (__m512bh)__builtin_ia32_cvtne2ps2bf16_512((__v16sf) __A,
+ (__v16sf) __B);
+}
+
+/// Convert Two Packed Single Data to One Packed BF16 Data.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCVTNE2PS2BF16 </c> instructions.
+///
+/// \param __A
+/// A 512-bit vector of [16 x float].
+/// \param __B
+/// A 512-bit vector of [16 x float].
+/// \param __W
+/// A 512-bit vector of [32 x bfloat].
+/// \param __U
+/// A 32-bit mask value specifying what is chosen for each element.
+/// A 1 means conversion of __A or __B. A 0 means element from __W.
+/// \returns A 512-bit vector of [32 x bfloat] whose lower 256 bits come from
+/// conversion of __B, and higher 256 bits come from conversion of __A.
+static __inline__ __m512bh __DEFAULT_FN_ATTRS512
+_mm512_mask_cvtne2ps_pbh(__m512bh __W, __mmask32 __U, __m512 __A, __m512 __B) {
+ return (__m512bh)__builtin_ia32_selectw_512((__mmask32)__U,
+ (__v32hi)_mm512_cvtne2ps_pbh(__A, __B),
+ (__v32hi)__W);
+}
+
+/// Convert Two Packed Single Data to One Packed BF16 Data.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCVTNE2PS2BF16 </c> instructions.
+///
+/// \param __A
+/// A 512-bit vector of [16 x float].
+/// \param __B
+/// A 512-bit vector of [16 x float].
+/// \param __U
+/// A 32-bit mask value specifying what is chosen for each element.
+/// A 1 means conversion of __A or __B. A 0 means element is zero.
+/// \returns A 512-bit vector of [32 x bfloat] whose lower 256 bits come from
+/// conversion of __B, and higher 256 bits come from conversion of __A.
+static __inline__ __m512bh __DEFAULT_FN_ATTRS512
+_mm512_maskz_cvtne2ps_pbh(__mmask32 __U, __m512 __A, __m512 __B) {
+ return (__m512bh)__builtin_ia32_selectw_512((__mmask32)__U,
+ (__v32hi)_mm512_cvtne2ps_pbh(__A, __B),
+ (__v32hi)_mm512_setzero_si512());
+}
+
+/// Convert Packed Single Data to Packed BF16 Data.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCVTNEPS2BF16 </c> instructions.
+///
+/// \param __A
+/// A 512-bit vector of [16 x float].
+/// \returns A 256-bit vector of [16 x bfloat] come from conversion of __A.
+static __inline__ __m256bh __DEFAULT_FN_ATTRS512
+_mm512_cvtneps_pbh(__m512 __A) {
+ return (__m256bh)__builtin_ia32_cvtneps2bf16_512_mask((__v16sf)__A,
+ (__v16hi)_mm256_undefined_si256(),
+ (__mmask16)-1);
+}
+
+/// Convert Packed Single Data to Packed BF16 Data.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCVTNEPS2BF16 </c> instructions.
+///
+/// \param __A
+/// A 512-bit vector of [16 x float].
+/// \param __W
+/// A 256-bit vector of [16 x bfloat].
+/// \param __U
+/// A 16-bit mask value specifying what is chosen for each element.
+/// A 1 means conversion of __A. A 0 means element from __W.
+/// \returns A 256-bit vector of [16 x bfloat] come from conversion of __A.
+static __inline__ __m256bh __DEFAULT_FN_ATTRS512
+_mm512_mask_cvtneps_pbh(__m256bh __W, __mmask16 __U, __m512 __A) {
+ return (__m256bh)__builtin_ia32_cvtneps2bf16_512_mask((__v16sf)__A,
+ (__v16hi)__W,
+ (__mmask16)__U);
+}
+
+/// Convert Packed Single Data to Packed BF16 Data.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCVTNEPS2BF16 </c> instructions.
+///
+/// \param __A
+/// A 512-bit vector of [16 x float].
+/// \param __U
+/// A 16-bit mask value specifying what is chosen for each element.
+/// A 1 means conversion of __A. A 0 means element is zero.
+/// \returns A 256-bit vector of [16 x bfloat] come from conversion of __A.
+static __inline__ __m256bh __DEFAULT_FN_ATTRS512
+_mm512_maskz_cvtneps_pbh(__mmask16 __U, __m512 __A) {
+ return (__m256bh)__builtin_ia32_cvtneps2bf16_512_mask((__v16sf)__A,
+ (__v16hi)_mm256_setzero_si256(),
+ (__mmask16)__U);
+}
+
+/// Dot Product of BF16 Pairs Accumulated into Packed Single Precision.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VDPBF16PS </c> instructions.
+///
+/// \param __A
+/// A 512-bit vector of [32 x bfloat].
+/// \param __B
+/// A 512-bit vector of [32 x bfloat].
+/// \param __D
+/// A 512-bit vector of [16 x float].
+/// \returns A 512-bit vector of [16 x float] comes from Dot Product of
+/// __A, __B and __D
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
+_mm512_dpbf16_ps(__m512 __D, __m512bh __A, __m512bh __B) {
+ return (__m512)__builtin_ia32_dpbf16ps_512((__v16sf) __D,
+ (__v16si) __A,
+ (__v16si) __B);
+}
+
+/// Dot Product of BF16 Pairs Accumulated into Packed Single Precision.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VDPBF16PS </c> instructions.
+///
+/// \param __A
+/// A 512-bit vector of [32 x bfloat].
+/// \param __B
+/// A 512-bit vector of [32 x bfloat].
+/// \param __D
+/// A 512-bit vector of [16 x float].
+/// \param __U
+/// A 16-bit mask value specifying what is chosen for each element.
+/// A 1 means __A and __B's dot product accumulated with __D. A 0 means __D.
+/// \returns A 512-bit vector of [16 x float] comes from Dot Product of
+/// __A, __B and __D
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
+_mm512_mask_dpbf16_ps(__m512 __D, __mmask16 __U, __m512bh __A, __m512bh __B) {
+ return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
+ (__v16sf)_mm512_dpbf16_ps(__D, __A, __B),
+ (__v16sf)__D);
+}
+
+/// Dot Product of BF16 Pairs Accumulated into Packed Single Precision.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VDPBF16PS </c> instructions.
+///
+/// \param __A
+/// A 512-bit vector of [32 x bfloat].
+/// \param __B
+/// A 512-bit vector of [32 x bfloat].
+/// \param __D
+/// A 512-bit vector of [16 x float].
+/// \param __U
+/// A 16-bit mask value specifying what is chosen for each element.
+/// A 1 means __A and __B's dot product accumulated with __D. A 0 means 0.
+/// \returns A 512-bit vector of [16 x float] comes from Dot Product of
+/// __A, __B and __D
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
+_mm512_maskz_dpbf16_ps(__mmask16 __U, __m512 __D, __m512bh __A, __m512bh __B) {
+ return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
+ (__v16sf)_mm512_dpbf16_ps(__D, __A, __B),
+ (__v16sf)_mm512_setzero_si512());
+}
+
+/// Convert Packed BF16 Data to Packed float Data.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \param __A
+/// A 256-bit vector of [16 x bfloat].
+/// \returns A 512-bit vector of [16 x float] come from convertion of __A
+static __inline__ __m512 __DEFAULT_FN_ATTRS512 _mm512_cvtpbh_ps(__m256bh __A) {
+ return _mm512_castsi512_ps((__m512i)_mm512_slli_epi32(
+ (__m512i)_mm512_cvtepi16_epi32((__m256i)__A), 16));
+}
+
+/// Convert Packed BF16 Data to Packed float Data using zeroing mask.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \param __U
+/// A 16-bit mask. Elements are zeroed out when the corresponding mask
+/// bit is not set.
+/// \param __A
+/// A 256-bit vector of [16 x bfloat].
+/// \returns A 512-bit vector of [16 x float] come from convertion of __A
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
+_mm512_maskz_cvtpbh_ps(__mmask16 __U, __m256bh __A) {
+ return _mm512_castsi512_ps((__m512i)_mm512_slli_epi32(
+ (__m512i)_mm512_maskz_cvtepi16_epi32((__mmask16)__U, (__m256i)__A), 16));
+}
+
+/// Convert Packed BF16 Data to Packed float Data using merging mask.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \param __S
+/// A 512-bit vector of [16 x float]. Elements are copied from __S when
+/// the corresponding mask bit is not set.
+/// \param __U
+/// A 16-bit mask.
+/// \param __A
+/// A 256-bit vector of [16 x bfloat].
+/// \returns A 512-bit vector of [16 x float] come from convertion of __A
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
+_mm512_mask_cvtpbh_ps(__m512 __S, __mmask16 __U, __m256bh __A) {
+ return _mm512_castsi512_ps((__m512i)_mm512_mask_slli_epi32(
+ (__m512i)__S, (__mmask16)__U,
+ (__m512i)_mm512_cvtepi16_epi32((__m256i)__A), 16));
+}
+
+#undef __DEFAULT_FN_ATTRS
+#undef __DEFAULT_FN_ATTRS512
+
+#endif
diff --git a/lib/Headers/avx512bitalgintrin.h b/lib/Headers/avx512bitalgintrin.h
index 56046f8c4999..d4411d156ba5 100644
--- a/lib/Headers/avx512bitalgintrin.h
+++ b/lib/Headers/avx512bitalgintrin.h
@@ -1,23 +1,9 @@
/*===------------- avx512bitalgintrin.h - BITALG intrinsics ------------------===
*
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/avx512bwintrin.h b/lib/Headers/avx512bwintrin.h
index a90a255376c0..cb2e07619cb7 100644
--- a/lib/Headers/avx512bwintrin.h
+++ b/lib/Headers/avx512bwintrin.h
@@ -1,23 +1,9 @@
/*===------------- avx512bwintrin.h - AVX512BW intrinsics ------------------===
*
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
@@ -719,11 +705,7 @@ _mm512_maskz_adds_epu16 (__mmask32 __U, __m512i __A, __m512i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_avg_epu8 (__m512i __A, __m512i __B)
{
- typedef unsigned short __v64hu __attribute__((__vector_size__(128)));
- return (__m512i)__builtin_convertvector(
- ((__builtin_convertvector((__v64qu) __A, __v64hu) +
- __builtin_convertvector((__v64qu) __B, __v64hu)) + 1)
- >> 1, __v64qu);
+ return (__m512i)__builtin_ia32_pavgb512((__v64qi)__A, (__v64qi)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -746,11 +728,7 @@ _mm512_maskz_avg_epu8 (__mmask64 __U, __m512i __A, __m512i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_avg_epu16 (__m512i __A, __m512i __B)
{
- typedef unsigned int __v32su __attribute__((__vector_size__(128)));
- return (__m512i)__builtin_convertvector(
- ((__builtin_convertvector((__v32hu) __A, __v32su) +
- __builtin_convertvector((__v32hu) __B, __v32su)) + 1)
- >> 1, __v32hu);
+ return (__m512i)__builtin_ia32_pavgw512((__v32hi)__A, (__v32hi)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1733,14 +1711,14 @@ _mm512_maskz_set1_epi8 (__mmask64 __M, char __A)
(__v64qi) _mm512_setzero_si512());
}
-static __inline__ __mmask64 __DEFAULT_FN_ATTRS512
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
_mm512_kunpackd (__mmask64 __A, __mmask64 __B)
{
return (__mmask64) __builtin_ia32_kunpckdi ((__mmask64) __A,
(__mmask64) __B);
}
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS512
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
_mm512_kunpackw (__mmask32 __A, __mmask32 __B)
{
return (__mmask32) __builtin_ia32_kunpcksi ((__mmask32) __A,
@@ -1751,7 +1729,7 @@ static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_loadu_epi16 (void const *__P)
{
struct __loadu_epi16 {
- __m512i __v;
+ __m512i_u __v;
} __attribute__((__packed__, __may_alias__));
return ((struct __loadu_epi16*)__P)->__v;
}
@@ -1777,7 +1755,7 @@ static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_loadu_epi8 (void const *__P)
{
struct __loadu_epi8 {
- __m512i __v;
+ __m512i_u __v;
} __attribute__((__packed__, __may_alias__));
return ((struct __loadu_epi8*)__P)->__v;
}
@@ -1803,7 +1781,7 @@ static __inline void __DEFAULT_FN_ATTRS512
_mm512_storeu_epi16 (void *__P, __m512i __A)
{
struct __storeu_epi16 {
- __m512i __v;
+ __m512i_u __v;
} __attribute__((__packed__, __may_alias__));
((struct __storeu_epi16*)__P)->__v = __A;
}
@@ -1820,7 +1798,7 @@ static __inline void __DEFAULT_FN_ATTRS512
_mm512_storeu_epi8 (void *__P, __m512i __A)
{
struct __storeu_epi8 {
- __m512i __v;
+ __m512i_u __v;
} __attribute__((__packed__, __may_alias__));
((struct __storeu_epi8*)__P)->__v = __A;
}
diff --git a/lib/Headers/avx512cdintrin.h b/lib/Headers/avx512cdintrin.h
index e63902743c06..bfdba84aa28b 100644
--- a/lib/Headers/avx512cdintrin.h
+++ b/lib/Headers/avx512cdintrin.h
@@ -1,23 +1,9 @@
/*===------------- avx512cdintrin.h - AVX512CD intrinsics ------------------===
*
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
@@ -34,49 +20,45 @@
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_conflict_epi64 (__m512i __A)
{
- return (__m512i) __builtin_ia32_vpconflictdi_512_mask ((__v8di) __A,
- (__v8di) _mm512_setzero_si512 (),
- (__mmask8) -1);
+ return (__m512i) __builtin_ia32_vpconflictdi_512 ((__v8di) __A);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_mask_conflict_epi64 (__m512i __W, __mmask8 __U, __m512i __A)
{
- return (__m512i) __builtin_ia32_vpconflictdi_512_mask ((__v8di) __A,
- (__v8di) __W,
- (__mmask8) __U);
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
+ (__v8di)_mm512_conflict_epi64(__A),
+ (__v8di)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_conflict_epi64 (__mmask8 __U, __m512i __A)
{
- return (__m512i) __builtin_ia32_vpconflictdi_512_mask ((__v8di) __A,
- (__v8di) _mm512_setzero_si512 (),
- (__mmask8) __U);
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
+ (__v8di)_mm512_conflict_epi64(__A),
+ (__v8di)_mm512_setzero_si512 ());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_conflict_epi32 (__m512i __A)
{
- return (__m512i) __builtin_ia32_vpconflictsi_512_mask ((__v16si) __A,
- (__v16si) _mm512_setzero_si512 (),
- (__mmask16) -1);
+ return (__m512i) __builtin_ia32_vpconflictsi_512 ((__v16si) __A);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_mask_conflict_epi32 (__m512i __W, __mmask16 __U, __m512i __A)
{
- return (__m512i) __builtin_ia32_vpconflictsi_512_mask ((__v16si) __A,
- (__v16si) __W,
- (__mmask16) __U);
+ return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
+ (__v16si)_mm512_conflict_epi32(__A),
+ (__v16si)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_conflict_epi32 (__mmask16 __U, __m512i __A)
{
- return (__m512i) __builtin_ia32_vpconflictsi_512_mask ((__v16si) __A,
- (__v16si) _mm512_setzero_si512 (),
- (__mmask16) __U);
+ return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
+ (__v16si)_mm512_conflict_epi32(__A),
+ (__v16si)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
diff --git a/lib/Headers/avx512dqintrin.h b/lib/Headers/avx512dqintrin.h
index 6e6c293af22e..337256c50f50 100644
--- a/lib/Headers/avx512dqintrin.h
+++ b/lib/Headers/avx512dqintrin.h
@@ -1,22 +1,8 @@
/*===---- avx512dqintrin.h - AVX512DQ intrinsics ---------------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/avx512erintrin.h b/lib/Headers/avx512erintrin.h
index 6348275c8d31..857006169906 100644
--- a/lib/Headers/avx512erintrin.h
+++ b/lib/Headers/avx512erintrin.h
@@ -1,22 +1,8 @@
/*===---- avx512erintrin.h - AVX512ER intrinsics ---------------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/avx512fintrin.h b/lib/Headers/avx512fintrin.h
index 1c19993ff1bb..132761f9ef5c 100644
--- a/lib/Headers/avx512fintrin.h
+++ b/lib/Headers/avx512fintrin.h
@@ -1,22 +1,8 @@
/*===---- avx512fintrin.h - AVX512F intrinsics -----------------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
@@ -40,9 +26,13 @@ typedef unsigned short __v32hu __attribute__((__vector_size__(64)));
typedef unsigned long long __v8du __attribute__((__vector_size__(64)));
typedef unsigned int __v16su __attribute__((__vector_size__(64)));
-typedef float __m512 __attribute__((__vector_size__(64)));
-typedef double __m512d __attribute__((__vector_size__(64)));
-typedef long long __m512i __attribute__((__vector_size__(64)));
+typedef float __m512 __attribute__((__vector_size__(64), __aligned__(64)));
+typedef double __m512d __attribute__((__vector_size__(64), __aligned__(64)));
+typedef long long __m512i __attribute__((__vector_size__(64), __aligned__(64)));
+
+typedef float __m512_u __attribute__((__vector_size__(64), __aligned__(1)));
+typedef double __m512d_u __attribute__((__vector_size__(64), __aligned__(1)));
+typedef long long __m512i_u __attribute__((__vector_size__(64), __aligned__(1)));
typedef unsigned char __mmask8;
typedef unsigned short __mmask16;
@@ -1991,12 +1981,12 @@ _mm512_maskz_add_ps(__mmask16 __U, __m512 __A, __m512 __B) {
#define _mm512_mask_add_round_pd(W, U, A, B, R) \
(__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
(__v8df)_mm512_add_round_pd((A), (B), (R)), \
- (__v8df)(__m512d)(W));
+ (__v8df)(__m512d)(W))
#define _mm512_maskz_add_round_pd(U, A, B, R) \
(__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
(__v8df)_mm512_add_round_pd((A), (B), (R)), \
- (__v8df)_mm512_setzero_pd());
+ (__v8df)_mm512_setzero_pd())
#define _mm512_add_round_ps(A, B, R) \
(__m512)__builtin_ia32_addps512((__v16sf)(__m512)(A), \
@@ -2005,12 +1995,12 @@ _mm512_maskz_add_ps(__mmask16 __U, __m512 __A, __m512 __B) {
#define _mm512_mask_add_round_ps(W, U, A, B, R) \
(__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
(__v16sf)_mm512_add_round_ps((A), (B), (R)), \
- (__v16sf)(__m512)(W));
+ (__v16sf)(__m512)(W))
#define _mm512_maskz_add_round_ps(U, A, B, R) \
(__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
(__v16sf)_mm512_add_round_ps((A), (B), (R)), \
- (__v16sf)_mm512_setzero_ps());
+ (__v16sf)_mm512_setzero_ps())
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_sub_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
@@ -2106,12 +2096,12 @@ _mm512_maskz_sub_ps(__mmask16 __U, __m512 __A, __m512 __B) {
#define _mm512_mask_sub_round_pd(W, U, A, B, R) \
(__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
(__v8df)_mm512_sub_round_pd((A), (B), (R)), \
- (__v8df)(__m512d)(W));
+ (__v8df)(__m512d)(W))
#define _mm512_maskz_sub_round_pd(U, A, B, R) \
(__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
(__v8df)_mm512_sub_round_pd((A), (B), (R)), \
- (__v8df)_mm512_setzero_pd());
+ (__v8df)_mm512_setzero_pd())
#define _mm512_sub_round_ps(A, B, R) \
(__m512)__builtin_ia32_subps512((__v16sf)(__m512)(A), \
@@ -2120,12 +2110,12 @@ _mm512_maskz_sub_ps(__mmask16 __U, __m512 __A, __m512 __B) {
#define _mm512_mask_sub_round_ps(W, U, A, B, R) \
(__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
(__v16sf)_mm512_sub_round_ps((A), (B), (R)), \
- (__v16sf)(__m512)(W));
+ (__v16sf)(__m512)(W))
#define _mm512_maskz_sub_round_ps(U, A, B, R) \
(__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
(__v16sf)_mm512_sub_round_ps((A), (B), (R)), \
- (__v16sf)_mm512_setzero_ps());
+ (__v16sf)_mm512_setzero_ps())
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_mul_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
@@ -2221,12 +2211,12 @@ _mm512_maskz_mul_ps(__mmask16 __U, __m512 __A, __m512 __B) {
#define _mm512_mask_mul_round_pd(W, U, A, B, R) \
(__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
(__v8df)_mm512_mul_round_pd((A), (B), (R)), \
- (__v8df)(__m512d)(W));
+ (__v8df)(__m512d)(W))
#define _mm512_maskz_mul_round_pd(U, A, B, R) \
(__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
(__v8df)_mm512_mul_round_pd((A), (B), (R)), \
- (__v8df)_mm512_setzero_pd());
+ (__v8df)_mm512_setzero_pd())
#define _mm512_mul_round_ps(A, B, R) \
(__m512)__builtin_ia32_mulps512((__v16sf)(__m512)(A), \
@@ -2235,12 +2225,12 @@ _mm512_maskz_mul_ps(__mmask16 __U, __m512 __A, __m512 __B) {
#define _mm512_mask_mul_round_ps(W, U, A, B, R) \
(__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
(__v16sf)_mm512_mul_round_ps((A), (B), (R)), \
- (__v16sf)(__m512)(W));
+ (__v16sf)(__m512)(W))
#define _mm512_maskz_mul_round_ps(U, A, B, R) \
(__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
(__v16sf)_mm512_mul_round_ps((A), (B), (R)), \
- (__v16sf)_mm512_setzero_ps());
+ (__v16sf)_mm512_setzero_ps())
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_div_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
@@ -2349,12 +2339,12 @@ _mm512_maskz_div_ps(__mmask16 __U, __m512 __A, __m512 __B) {
#define _mm512_mask_div_round_pd(W, U, A, B, R) \
(__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
(__v8df)_mm512_div_round_pd((A), (B), (R)), \
- (__v8df)(__m512d)(W));
+ (__v8df)(__m512d)(W))
#define _mm512_maskz_div_round_pd(U, A, B, R) \
(__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
(__v8df)_mm512_div_round_pd((A), (B), (R)), \
- (__v8df)_mm512_setzero_pd());
+ (__v8df)_mm512_setzero_pd())
#define _mm512_div_round_ps(A, B, R) \
(__m512)__builtin_ia32_divps512((__v16sf)(__m512)(A), \
@@ -2363,12 +2353,12 @@ _mm512_maskz_div_ps(__mmask16 __U, __m512 __A, __m512 __B) {
#define _mm512_mask_div_round_ps(W, U, A, B, R) \
(__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
(__v16sf)_mm512_div_round_ps((A), (B), (R)), \
- (__v16sf)(__m512)(W));
+ (__v16sf)(__m512)(W))
#define _mm512_maskz_div_round_ps(U, A, B, R) \
(__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
(__v16sf)_mm512_div_round_ps((A), (B), (R)), \
- (__v16sf)_mm512_setzero_ps());
+ (__v16sf)_mm512_setzero_ps())
#define _mm512_roundscale_ps(A, B) \
(__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(A), (int)(B), \
@@ -3789,20 +3779,9 @@ _mm512_mask_cvtpd_pslo (__m512 __W, __mmask8 __U,__m512d __A)
(__v16hi)_mm256_setzero_si256(), \
(__mmask16)(W))
-#define _mm512_cvtps_ph(A, I) \
- (__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \
- (__v16hi)_mm256_setzero_si256(), \
- (__mmask16)-1)
-
-#define _mm512_mask_cvtps_ph(U, W, A, I) \
- (__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \
- (__v16hi)(__m256i)(U), \
- (__mmask16)(W))
-
-#define _mm512_maskz_cvtps_ph(W, A, I) \
- (__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \
- (__v16hi)_mm256_setzero_si256(), \
- (__mmask16)(W))
+#define _mm512_cvtps_ph _mm512_cvt_roundps_ph
+#define _mm512_mask_cvtps_ph _mm512_mask_cvt_roundps_ph
+#define _mm512_maskz_cvtps_ph _mm512_maskz_cvt_roundps_ph
#define _mm512_cvt_roundph_ps(A, R) \
(__m512)__builtin_ia32_vcvtph2ps512_mask((__v16hi)(__m256i)(A), \
@@ -4324,7 +4303,7 @@ static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_loadu_si512 (void const *__P)
{
struct __loadu_si512 {
- __m512i __v;
+ __m512i_u __v;
} __attribute__((__packed__, __may_alias__));
return ((struct __loadu_si512*)__P)->__v;
}
@@ -4333,7 +4312,7 @@ static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_loadu_epi32 (void const *__P)
{
struct __loadu_epi32 {
- __m512i __v;
+ __m512i_u __v;
} __attribute__((__packed__, __may_alias__));
return ((struct __loadu_epi32*)__P)->__v;
}
@@ -4360,7 +4339,7 @@ static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_loadu_epi64 (void const *__P)
{
struct __loadu_epi64 {
- __m512i __v;
+ __m512i_u __v;
} __attribute__((__packed__, __may_alias__));
return ((struct __loadu_epi64*)__P)->__v;
}
@@ -4420,7 +4399,7 @@ static __inline __m512d __DEFAULT_FN_ATTRS512
_mm512_loadu_pd(void const *__p)
{
struct __loadu_pd {
- __m512d __v;
+ __m512d_u __v;
} __attribute__((__packed__, __may_alias__));
return ((struct __loadu_pd*)__p)->__v;
}
@@ -4429,7 +4408,7 @@ static __inline __m512 __DEFAULT_FN_ATTRS512
_mm512_loadu_ps(void const *__p)
{
struct __loadu_ps {
- __m512 __v;
+ __m512_u __v;
} __attribute__((__packed__, __may_alias__));
return ((struct __loadu_ps*)__p)->__v;
}
@@ -4504,7 +4483,7 @@ static __inline void __DEFAULT_FN_ATTRS512
_mm512_storeu_epi64 (void *__P, __m512i __A)
{
struct __storeu_epi64 {
- __m512i __v;
+ __m512i_u __v;
} __attribute__((__packed__, __may_alias__));
((struct __storeu_epi64*)__P)->__v = __A;
}
@@ -4520,7 +4499,7 @@ static __inline void __DEFAULT_FN_ATTRS512
_mm512_storeu_si512 (void *__P, __m512i __A)
{
struct __storeu_si512 {
- __m512i __v;
+ __m512i_u __v;
} __attribute__((__packed__, __may_alias__));
((struct __storeu_si512*)__P)->__v = __A;
}
@@ -4529,7 +4508,7 @@ static __inline void __DEFAULT_FN_ATTRS512
_mm512_storeu_epi32 (void *__P, __m512i __A)
{
struct __storeu_epi32 {
- __m512i __v;
+ __m512i_u __v;
} __attribute__((__packed__, __may_alias__));
((struct __storeu_epi32*)__P)->__v = __A;
}
@@ -4551,7 +4530,7 @@ static __inline void __DEFAULT_FN_ATTRS512
_mm512_storeu_pd(void *__P, __m512d __A)
{
struct __storeu_pd {
- __m512d __v;
+ __m512d_u __v;
} __attribute__((__packed__, __may_alias__));
((struct __storeu_pd*)__P)->__v = __A;
}
@@ -4567,7 +4546,7 @@ static __inline void __DEFAULT_FN_ATTRS512
_mm512_storeu_ps(void *__P, __m512 __A)
{
struct __storeu_ps {
- __m512 __v;
+ __m512_u __v;
} __attribute__((__packed__, __may_alias__));
((struct __storeu_ps*)__P)->__v = __A;
}
@@ -9329,7 +9308,7 @@ _mm512_mask_abs_pd(__m512d __W, __mmask8 __K, __m512d __A)
__v2du __t6 = __t4 op __t5; \
__v2du __t7 = __builtin_shufflevector(__t6, __t6, 1, 0); \
__v2du __t8 = __t6 op __t7; \
- return __t8[0];
+ return __t8[0]
static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_add_epi64(__m512i __W) {
_mm512_mask_reduce_operator(+);
@@ -9381,7 +9360,7 @@ _mm512_mask_reduce_or_epi64(__mmask8 __M, __m512i __W) {
__m128d __t6 = __t4 op __t5; \
__m128d __t7 = __builtin_shufflevector(__t6, __t6, 1, 0); \
__m128d __t8 = __t6 op __t7; \
- return __t8[0];
+ return __t8[0]
static __inline__ double __DEFAULT_FN_ATTRS512 _mm512_reduce_add_pd(__m512d __W) {
_mm512_mask_reduce_operator(+);
@@ -9415,7 +9394,7 @@ _mm512_mask_reduce_mul_pd(__mmask8 __M, __m512d __W) {
__v4su __t8 = __t6 op __t7; \
__v4su __t9 = __builtin_shufflevector(__t8, __t8, 1, 0, 3, 2); \
__v4su __t10 = __t8 op __t9; \
- return __t10[0];
+ return __t10[0]
static __inline__ int __DEFAULT_FN_ATTRS512
_mm512_reduce_add_epi32(__m512i __W) {
@@ -9473,7 +9452,7 @@ _mm512_mask_reduce_or_epi32(__mmask16 __M, __m512i __W) {
__m128 __t8 = __t6 op __t7; \
__m128 __t9 = __builtin_shufflevector(__t8, __t8, 1, 0, 3, 2); \
__m128 __t10 = __t8 op __t9; \
- return __t10[0];
+ return __t10[0]
static __inline__ float __DEFAULT_FN_ATTRS512
_mm512_reduce_add_ps(__m512 __W) {
@@ -9505,7 +9484,7 @@ _mm512_mask_reduce_mul_ps(__mmask16 __M, __m512 __W) {
__m512i __t4 = _mm512_##op(__t2, __t3); \
__m512i __t5 = (__m512i)__builtin_shufflevector((__v8di)__t4, (__v8di)__t4, 1, 0, 3, 2, 5, 4, 7, 6); \
__v8di __t6 = (__v8di)_mm512_##op(__t4, __t5); \
- return __t6[0];
+ return __t6[0]
static __inline__ long long __DEFAULT_FN_ATTRS512
_mm512_reduce_max_epi64(__m512i __V) {
@@ -9563,7 +9542,7 @@ _mm512_mask_reduce_min_epu64(__mmask8 __M, __m512i __V) {
__m128i __t8 = _mm_##op(__t6, __t7); \
__m128i __t9 = (__m128i)__builtin_shufflevector((__v4si)__t8, (__v4si)__t8, 1, 0, 3, 2); \
__v4si __t10 = (__v4si)_mm_##op(__t8, __t9); \
- return __t10[0];
+ return __t10[0]
static __inline__ int __DEFAULT_FN_ATTRS512
_mm512_reduce_max_epi32(__m512i __V) {
@@ -9619,7 +9598,7 @@ _mm512_mask_reduce_min_epu32(__mmask16 __M, __m512i __V) {
__m128d __t6 = _mm_##op(__t4, __t5); \
__m128d __t7 = __builtin_shufflevector(__t6, __t6, 1, 0); \
__m128d __t8 = _mm_##op(__t6, __t7); \
- return __t8[0];
+ return __t8[0]
static __inline__ double __DEFAULT_FN_ATTRS512
_mm512_reduce_max_pd(__m512d __V) {
@@ -9655,7 +9634,7 @@ _mm512_mask_reduce_min_pd(__mmask8 __M, __m512d __V) {
__m128 __t8 = _mm_##op(__t6, __t7); \
__m128 __t9 = __builtin_shufflevector(__t8, __t8, 1, 0, 3, 2); \
__m128 __t10 = _mm_##op(__t8, __t9); \
- return __t10[0];
+ return __t10[0]
static __inline__ float __DEFAULT_FN_ATTRS512
_mm512_reduce_max_ps(__m512 __V) {
diff --git a/lib/Headers/avx512ifmaintrin.h b/lib/Headers/avx512ifmaintrin.h
index 159713049c1a..5f7da52f1f73 100644
--- a/lib/Headers/avx512ifmaintrin.h
+++ b/lib/Headers/avx512ifmaintrin.h
@@ -1,23 +1,9 @@
/*===------------- avx512ifmaintrin.h - IFMA intrinsics ------------------===
*
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/avx512ifmavlintrin.h b/lib/Headers/avx512ifmavlintrin.h
index afdea888c55b..5889401d1055 100644
--- a/lib/Headers/avx512ifmavlintrin.h
+++ b/lib/Headers/avx512ifmavlintrin.h
@@ -1,23 +1,9 @@
/*===------------- avx512ifmavlintrin.h - IFMA intrinsics ------------------===
*
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/avx512pfintrin.h b/lib/Headers/avx512pfintrin.h
index 73b2234fb410..b8bcf49c6b24 100644
--- a/lib/Headers/avx512pfintrin.h
+++ b/lib/Headers/avx512pfintrin.h
@@ -1,23 +1,9 @@
/*===------------- avx512pfintrin.h - PF intrinsics ------------------------===
*
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/avx512vbmi2intrin.h b/lib/Headers/avx512vbmi2intrin.h
index 53242524293f..a23144616ce3 100644
--- a/lib/Headers/avx512vbmi2intrin.h
+++ b/lib/Headers/avx512vbmi2intrin.h
@@ -1,23 +1,9 @@
/*===------------- avx512vbmi2intrin.h - VBMI2 intrinsics ------------------===
*
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/avx512vbmiintrin.h b/lib/Headers/avx512vbmiintrin.h
index 5463d9015504..c0e0f94d48d4 100644
--- a/lib/Headers/avx512vbmiintrin.h
+++ b/lib/Headers/avx512vbmiintrin.h
@@ -1,23 +1,9 @@
/*===------------- avx512vbmiintrin.h - VBMI intrinsics ------------------===
*
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/avx512vbmivlintrin.h b/lib/Headers/avx512vbmivlintrin.h
index b5d5aa9af523..c5b96ae8ada7 100644
--- a/lib/Headers/avx512vbmivlintrin.h
+++ b/lib/Headers/avx512vbmivlintrin.h
@@ -1,23 +1,9 @@
/*===------------- avx512vbmivlintrin.h - VBMI intrinsics ------------------===
*
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/avx512vlbf16intrin.h b/lib/Headers/avx512vlbf16intrin.h
new file mode 100644
index 000000000000..1b1a744bcdbf
--- /dev/null
+++ b/lib/Headers/avx512vlbf16intrin.h
@@ -0,0 +1,474 @@
+/*===--------- avx512vlbf16intrin.h - AVX512_BF16 intrinsics ---------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __IMMINTRIN_H
+#error "Never use <avx512vlbf16intrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __AVX512VLBF16INTRIN_H
+#define __AVX512VLBF16INTRIN_H
+
+typedef short __m128bh __attribute__((__vector_size__(16), __aligned__(16)));
+
+#define __DEFAULT_FN_ATTRS128 \
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("avx512vl, avx512bf16"), __min_vector_width__(128)))
+#define __DEFAULT_FN_ATTRS256 \
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("avx512vl, avx512bf16"), __min_vector_width__(256)))
+
+/// Convert Two Packed Single Data to One Packed BF16 Data.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCVTNE2PS2BF16 </c> instructions.
+///
+/// \param __A
+/// A 128-bit vector of [4 x float].
+/// \param __B
+/// A 128-bit vector of [4 x float].
+/// \returns A 128-bit vector of [8 x bfloat] whose lower 64 bits come from
+/// conversion of __B, and higher 64 bits come from conversion of __A.
+static __inline__ __m128bh __DEFAULT_FN_ATTRS128
+_mm_cvtne2ps_pbh(__m128 __A, __m128 __B) {
+ return (__m128bh)__builtin_ia32_cvtne2ps2bf16_128((__v4sf) __A,
+ (__v4sf) __B);
+}
+
+/// Convert Two Packed Single Data to One Packed BF16 Data.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCVTNE2PS2BF16 </c> instructions.
+///
+/// \param __A
+/// A 128-bit vector of [4 x float].
+/// \param __B
+/// A 128-bit vector of [4 x float].
+/// \param __W
+/// A 128-bit vector of [8 x bfloat].
+/// \param __U
+/// A 8-bit mask value specifying what is chosen for each element.
+/// A 1 means conversion of __A or __B. A 0 means element from __W.
+/// \returns A 128-bit vector of [8 x bfloat] whose lower 64 bits come from
+/// conversion of __B, and higher 64 bits come from conversion of __A.
+static __inline__ __m128bh __DEFAULT_FN_ATTRS128
+_mm_mask_cvtne2ps_pbh(__m128bh __W, __mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128bh)__builtin_ia32_selectw_128((__mmask8)__U,
+ (__v8hi)_mm_cvtne2ps_pbh(__A, __B),
+ (__v8hi)__W);
+}
+
+/// Convert Two Packed Single Data to One Packed BF16 Data.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCVTNE2PS2BF16 </c> instructions.
+///
+/// \param __A
+/// A 128-bit vector of [4 x float].
+/// \param __B
+/// A 128-bit vector of [4 x float].
+/// \param __U
+/// A 8-bit mask value specifying what is chosen for each element.
+/// A 1 means conversion of __A or __B. A 0 means element is zero.
+/// \returns A 128-bit vector of [8 x bfloat] whose lower 64 bits come from
+/// conversion of __B, and higher 64 bits come from conversion of __A.
+static __inline__ __m128bh __DEFAULT_FN_ATTRS128
+_mm_maskz_cvtne2ps_pbh(__mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128bh)__builtin_ia32_selectw_128((__mmask8)__U,
+ (__v8hi)_mm_cvtne2ps_pbh(__A, __B),
+ (__v8hi)_mm_setzero_si128());
+}
+
+/// Convert Two Packed Single Data to One Packed BF16 Data.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCVTNE2PS2BF16 </c> instructions.
+///
+/// \param __A
+/// A 256-bit vector of [8 x float].
+/// \param __B
+/// A 256-bit vector of [8 x float].
+/// \returns A 256-bit vector of [16 x bfloat] whose lower 128 bits come from
+/// conversion of __B, and higher 128 bits come from conversion of __A.
+static __inline__ __m256bh __DEFAULT_FN_ATTRS256
+_mm256_cvtne2ps_pbh(__m256 __A, __m256 __B) {
+ return (__m256bh)__builtin_ia32_cvtne2ps2bf16_256((__v8sf) __A,
+ (__v8sf) __B);
+}
+
+/// Convert Two Packed Single Data to One Packed BF16 Data.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCVTNE2PS2BF16 </c> instructions.
+///
+/// \param __A
+/// A 256-bit vector of [8 x float].
+/// \param __B
+/// A 256-bit vector of [8 x float].
+/// \param __W
+/// A 256-bit vector of [16 x bfloat].
+/// \param __U
+/// A 16-bit mask value specifying what is chosen for each element.
+/// A 1 means conversion of __A or __B. A 0 means element from __W.
+/// \returns A 256-bit vector of [16 x bfloat] whose lower 128 bits come from
+/// conversion of __B, and higher 128 bits come from conversion of __A.
+static __inline__ __m256bh __DEFAULT_FN_ATTRS256
+_mm256_mask_cvtne2ps_pbh(__m256bh __W, __mmask16 __U, __m256 __A, __m256 __B) {
+ return (__m256bh)__builtin_ia32_selectw_256((__mmask16)__U,
+ (__v16hi)_mm256_cvtne2ps_pbh(__A, __B),
+ (__v16hi)__W);
+}
+
+/// Convert Two Packed Single Data to One Packed BF16 Data.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCVTNE2PS2BF16 </c> instructions.
+///
+/// \param __A
+/// A 256-bit vector of [8 x float].
+/// \param __B
+/// A 256-bit vector of [8 x float].
+/// \param __U
+/// A 16-bit mask value specifying what is chosen for each element.
+/// A 1 means conversion of __A or __B. A 0 means element is zero.
+/// \returns A 256-bit vector of [16 x bfloat] whose lower 128 bits come from
+/// conversion of __B, and higher 128 bits come from conversion of __A.
+static __inline__ __m256bh __DEFAULT_FN_ATTRS256
+_mm256_maskz_cvtne2ps_pbh(__mmask16 __U, __m256 __A, __m256 __B) {
+ return (__m256bh)__builtin_ia32_selectw_256((__mmask16)__U,
+ (__v16hi)_mm256_cvtne2ps_pbh(__A, __B),
+ (__v16hi)_mm256_setzero_si256());
+}
+
+/// Convert Packed Single Data to Packed BF16 Data.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCVTNEPS2BF16 </c> instructions.
+///
+/// \param __A
+/// A 128-bit vector of [4 x float].
+/// \returns A 128-bit vector of [8 x bfloat] whose lower 64 bits come from
+/// conversion of __A, and higher 64 bits are 0.
+static __inline__ __m128bh __DEFAULT_FN_ATTRS128
+_mm_cvtneps_pbh(__m128 __A) {
+ return (__m128bh)__builtin_ia32_cvtneps2bf16_128_mask((__v4sf) __A,
+ (__v8hi)_mm_undefined_si128(),
+ (__mmask8)-1);
+}
+
+/// Convert Packed Single Data to Packed BF16 Data.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCVTNEPS2BF16 </c> instructions.
+///
+/// \param __A
+/// A 128-bit vector of [4 x float].
+/// \param __W
+/// A 128-bit vector of [8 x bfloat].
+/// \param __U
+/// A 4-bit mask value specifying what is chosen for each element.
+/// A 1 means conversion of __A. A 0 means element from __W.
+/// \returns A 128-bit vector of [8 x bfloat] whose lower 64 bits come from
+/// conversion of __A, and higher 64 bits are 0.
+static __inline__ __m128bh __DEFAULT_FN_ATTRS128
+_mm_mask_cvtneps_pbh(__m128bh __W, __mmask8 __U, __m128 __A) {
+ return (__m128bh)__builtin_ia32_cvtneps2bf16_128_mask((__v4sf) __A,
+ (__v8hi)__W,
+ (__mmask8)__U);
+}
+
+/// Convert Packed Single Data to Packed BF16 Data.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCVTNEPS2BF16 </c> instructions.
+///
+/// \param __A
+/// A 128-bit vector of [4 x float].
+/// \param __U
+/// A 4-bit mask value specifying what is chosen for each element.
+/// A 1 means conversion of __A. A 0 means element is zero.
+/// \returns A 128-bit vector of [8 x bfloat] whose lower 64 bits come from
+/// conversion of __A, and higher 64 bits are 0.
+static __inline__ __m128bh __DEFAULT_FN_ATTRS128
+_mm_maskz_cvtneps_pbh(__mmask8 __U, __m128 __A) {
+ return (__m128bh)__builtin_ia32_cvtneps2bf16_128_mask((__v4sf) __A,
+ (__v8hi)_mm_setzero_si128(),
+ (__mmask8)__U);
+}
+
+/// Convert Packed Single Data to Packed BF16 Data.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCVTNEPS2BF16 </c> instructions.
+///
+/// \param __A
+/// A 256-bit vector of [8 x float].
+/// \returns A 128-bit vector of [8 x bfloat] comes from conversion of __A.
+static __inline__ __m128bh __DEFAULT_FN_ATTRS256
+_mm256_cvtneps_pbh(__m256 __A) {
+ return (__m128bh)__builtin_ia32_cvtneps2bf16_256_mask((__v8sf)__A,
+ (__v8hi)_mm_undefined_si128(),
+ (__mmask8)-1);
+}
+
+/// Convert Packed Single Data to Packed BF16 Data.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCVTNEPS2BF16 </c> instructions.
+///
+/// \param __A
+/// A 256-bit vector of [8 x float].
+/// \param __W
+/// A 256-bit vector of [8 x bfloat].
+/// \param __U
+/// A 8-bit mask value specifying what is chosen for each element.
+/// A 1 means conversion of __A. A 0 means element from __W.
+/// \returns A 128-bit vector of [8 x bfloat] comes from conversion of __A.
+static __inline__ __m128bh __DEFAULT_FN_ATTRS256
+_mm256_mask_cvtneps_pbh(__m128bh __W, __mmask8 __U, __m256 __A) {
+ return (__m128bh)__builtin_ia32_cvtneps2bf16_256_mask((__v8sf)__A,
+ (__v8hi)__W,
+ (__mmask8)__U);
+}
+
+/// Convert Packed Single Data to Packed BF16 Data.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCVTNEPS2BF16 </c> instructions.
+///
+/// \param __A
+/// A 256-bit vector of [8 x float].
+/// \param __U
+/// A 8-bit mask value specifying what is chosen for each element.
+/// A 1 means conversion of __A. A 0 means element is zero.
+/// \returns A 128-bit vector of [8 x bfloat] comes from conversion of __A.
+static __inline__ __m128bh __DEFAULT_FN_ATTRS256
+_mm256_maskz_cvtneps_pbh(__mmask8 __U, __m256 __A) {
+ return (__m128bh)__builtin_ia32_cvtneps2bf16_256_mask((__v8sf)__A,
+ (__v8hi)_mm_setzero_si128(),
+ (__mmask8)__U);
+}
+
+/// Dot Product of BF16 Pairs Accumulated into Packed Single Precision.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VDPBF16PS </c> instructions.
+///
+/// \param __A
+/// A 128-bit vector of [8 x bfloat].
+/// \param __B
+/// A 128-bit vector of [8 x bfloat].
+/// \param __D
+/// A 128-bit vector of [4 x float].
+/// \returns A 128-bit vector of [4 x float] comes from Dot Product of
+/// __A, __B and __D
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
+_mm_dpbf16_ps(__m128 __D, __m128bh __A, __m128bh __B) {
+ return (__m128)__builtin_ia32_dpbf16ps_128((__v4sf)__D,
+ (__v4si)__A,
+ (__v4si)__B);
+}
+
+/// Dot Product of BF16 Pairs Accumulated into Packed Single Precision.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VDPBF16PS </c> instructions.
+///
+/// \param __A
+/// A 128-bit vector of [8 x bfloat].
+/// \param __B
+/// A 128-bit vector of [8 x bfloat].
+/// \param __D
+/// A 128-bit vector of [4 x float].
+/// \param __U
+/// A 8-bit mask value specifying what is chosen for each element.
+/// A 1 means __A and __B's dot product accumulated with __D. A 0 means __D.
+/// \returns A 128-bit vector of [4 x float] comes from Dot Product of
+/// __A, __B and __D
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
+_mm_mask_dpbf16_ps(__m128 __D, __mmask8 __U, __m128bh __A, __m128bh __B) {
+ return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
+ (__v4sf)_mm_dpbf16_ps(__D, __A, __B),
+ (__v4sf)__D);
+}
+
+/// Dot Product of BF16 Pairs Accumulated into Packed Single Precision.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VDPBF16PS </c> instructions.
+///
+/// \param __A
+/// A 128-bit vector of [8 x bfloat].
+/// \param __B
+/// A 128-bit vector of [8 x bfloat].
+/// \param __D
+/// A 128-bit vector of [4 x float].
+/// \param __U
+/// A 8-bit mask value specifying what is chosen for each element.
+/// A 1 means __A and __B's dot product accumulated with __D. A 0 means 0.
+/// \returns A 128-bit vector of [4 x float] comes from Dot Product of
+/// __A, __B and __D
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
+_mm_maskz_dpbf16_ps(__mmask8 __U, __m128 __D, __m128bh __A, __m128bh __B) {
+ return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
+ (__v4sf)_mm_dpbf16_ps(__D, __A, __B),
+ (__v4sf)_mm_setzero_si128());
+}
+
+/// Dot Product of BF16 Pairs Accumulated into Packed Single Precision.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VDPBF16PS </c> instructions.
+///
+/// \param __A
+/// A 256-bit vector of [16 x bfloat].
+/// \param __B
+/// A 256-bit vector of [16 x bfloat].
+/// \param __D
+/// A 256-bit vector of [8 x float].
+/// \returns A 256-bit vector of [8 x float] comes from Dot Product of
+/// __A, __B and __D
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
+_mm256_dpbf16_ps(__m256 __D, __m256bh __A, __m256bh __B) {
+ return (__m256)__builtin_ia32_dpbf16ps_256((__v8sf)__D,
+ (__v8si)__A,
+ (__v8si)__B);
+}
+
+/// Dot Product of BF16 Pairs Accumulated into Packed Single Precision.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VDPBF16PS </c> instructions.
+///
+/// \param __A
+/// A 256-bit vector of [16 x bfloat].
+/// \param __B
+/// A 256-bit vector of [16 x bfloat].
+/// \param __D
+/// A 256-bit vector of [8 x float].
+/// \param __U
+/// A 16-bit mask value specifying what is chosen for each element.
+/// A 1 means __A and __B's dot product accumulated with __D. A 0 means __D.
+/// \returns A 256-bit vector of [8 x float] comes from Dot Product of
+/// __A, __B and __D
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
+_mm256_mask_dpbf16_ps(__m256 __D, __mmask8 __U, __m256bh __A, __m256bh __B) {
+ return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
+ (__v8sf)_mm256_dpbf16_ps(__D, __A, __B),
+ (__v8sf)__D);
+}
+
+/// Dot Product of BF16 Pairs Accumulated into Packed Single Precision.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VDPBF16PS </c> instructions.
+///
+/// \param __A
+/// A 256-bit vector of [16 x bfloat].
+/// \param __B
+/// A 256-bit vector of [16 x bfloat].
+/// \param __D
+/// A 256-bit vector of [8 x float].
+/// \param __U
+/// A 8-bit mask value specifying what is chosen for each element.
+/// A 1 means __A and __B's dot product accumulated with __D. A 0 means 0.
+/// \returns A 256-bit vector of [8 x float] comes from Dot Product of
+/// __A, __B and __D
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
+_mm256_maskz_dpbf16_ps(__mmask8 __U, __m256 __D, __m256bh __A, __m256bh __B) {
+ return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
+ (__v8sf)_mm256_dpbf16_ps(__D, __A, __B),
+ (__v8sf)_mm256_setzero_si256());
+}
+
+/// Convert One Single float Data to One BF16 Data.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCVTNEPS2BF16 </c> instructions.
+///
+/// \param __A
+/// A float data.
+/// \returns A bf16 data whose sign field and exponent field keep unchanged,
+/// and fraction field is truncated to 7 bits.
+static __inline__ __bfloat16 __DEFAULT_FN_ATTRS128 _mm_cvtness_sbh(float __A) {
+ __v4sf __V = {__A, 0, 0, 0};
+ __v8hi __R = __builtin_ia32_cvtneps2bf16_128_mask(
+ (__v4sf)__V, (__v8hi)_mm_undefined_si128(), (__mmask8)-1);
+ return __R[0];
+}
+
+/// Convert Packed BF16 Data to Packed float Data.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \param __A
+/// A 128-bit vector of [8 x bfloat].
+/// \returns A 256-bit vector of [8 x float] come from convertion of __A
+static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_cvtpbh_ps(__m128bh __A) {
+ return _mm256_castsi256_ps((__m256i)_mm256_slli_epi32(
+ (__m256i)_mm256_cvtepi16_epi32((__m128i)__A), 16));
+}
+
+/// Convert Packed BF16 Data to Packed float Data using zeroing mask.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \param __U
+/// A 8-bit mask. Elements are zeroed out when the corresponding mask
+/// bit is not set.
+/// \param __A
+/// A 128-bit vector of [8 x bfloat].
+/// \returns A 256-bit vector of [8 x float] come from convertion of __A
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
+_mm256_maskz_cvtpbh_ps(__mmask8 __U, __m128bh __A) {
+ return _mm256_castsi256_ps((__m256i)_mm256_slli_epi32(
+ (__m256i)_mm256_maskz_cvtepi16_epi32((__mmask8)__U, (__m128i)__A), 16));
+}
+
+/// Convert Packed BF16 Data to Packed float Data using merging mask.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \param __S
+/// A 256-bit vector of [8 x float]. Elements are copied from __S when
+/// the corresponding mask bit is not set.
+/// \param __U
+/// A 8-bit mask. Elements are zeroed out when the corresponding mask
+/// bit is not set.
+/// \param __A
+/// A 128-bit vector of [8 x bfloat].
+/// \returns A 256-bit vector of [8 x float] come from convertion of __A
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
+_mm256_mask_cvtpbh_ps(__m256 __S, __mmask8 __U, __m128bh __A) {
+ return _mm256_castsi256_ps((__m256i)_mm256_mask_slli_epi32(
+ (__m256i)__S, (__mmask8)__U, (__m256i)_mm256_cvtepi16_epi32((__m128i)__A),
+ 16));
+}
+
+#undef __DEFAULT_FN_ATTRS128
+#undef __DEFAULT_FN_ATTRS256
+
+#endif
diff --git a/lib/Headers/avx512vlbitalgintrin.h b/lib/Headers/avx512vlbitalgintrin.h
index 64860b29254f..5154eae14cbb 100644
--- a/lib/Headers/avx512vlbitalgintrin.h
+++ b/lib/Headers/avx512vlbitalgintrin.h
@@ -1,23 +1,9 @@
/*===---- avx512vlbitalgintrin.h - BITALG intrinsics -----------------------===
*
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/avx512vlbwintrin.h b/lib/Headers/avx512vlbwintrin.h
index 87e0023e8b74..ead09466bc2c 100644
--- a/lib/Headers/avx512vlbwintrin.h
+++ b/lib/Headers/avx512vlbwintrin.h
@@ -1,22 +1,8 @@
/*===---- avx512vlbwintrin.h - AVX512VL and AVX512BW intrinsics ------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
@@ -2301,7 +2287,7 @@ static __inline __m128i __DEFAULT_FN_ATTRS128
_mm_loadu_epi16 (void const *__P)
{
struct __loadu_epi16 {
- __m128i __v;
+ __m128i_u __v;
} __attribute__((__packed__, __may_alias__));
return ((struct __loadu_epi16*)__P)->__v;
}
@@ -2327,7 +2313,7 @@ static __inline __m256i __DEFAULT_FN_ATTRS256
_mm256_loadu_epi16 (void const *__P)
{
struct __loadu_epi16 {
- __m256i __v;
+ __m256i_u __v;
} __attribute__((__packed__, __may_alias__));
return ((struct __loadu_epi16*)__P)->__v;
}
@@ -2353,7 +2339,7 @@ static __inline __m128i __DEFAULT_FN_ATTRS128
_mm_loadu_epi8 (void const *__P)
{
struct __loadu_epi8 {
- __m128i __v;
+ __m128i_u __v;
} __attribute__((__packed__, __may_alias__));
return ((struct __loadu_epi8*)__P)->__v;
}
@@ -2379,7 +2365,7 @@ static __inline __m256i __DEFAULT_FN_ATTRS256
_mm256_loadu_epi8 (void const *__P)
{
struct __loadu_epi8 {
- __m256i __v;
+ __m256i_u __v;
} __attribute__((__packed__, __may_alias__));
return ((struct __loadu_epi8*)__P)->__v;
}
@@ -2405,7 +2391,7 @@ static __inline void __DEFAULT_FN_ATTRS128
_mm_storeu_epi16 (void *__P, __m128i __A)
{
struct __storeu_epi16 {
- __m128i __v;
+ __m128i_u __v;
} __attribute__((__packed__, __may_alias__));
((struct __storeu_epi16*)__P)->__v = __A;
}
@@ -2422,7 +2408,7 @@ static __inline void __DEFAULT_FN_ATTRS256
_mm256_storeu_epi16 (void *__P, __m256i __A)
{
struct __storeu_epi16 {
- __m256i __v;
+ __m256i_u __v;
} __attribute__((__packed__, __may_alias__));
((struct __storeu_epi16*)__P)->__v = __A;
}
@@ -2439,7 +2425,7 @@ static __inline void __DEFAULT_FN_ATTRS128
_mm_storeu_epi8 (void *__P, __m128i __A)
{
struct __storeu_epi8 {
- __m128i __v;
+ __m128i_u __v;
} __attribute__((__packed__, __may_alias__));
((struct __storeu_epi8*)__P)->__v = __A;
}
@@ -2456,7 +2442,7 @@ static __inline void __DEFAULT_FN_ATTRS256
_mm256_storeu_epi8 (void *__P, __m256i __A)
{
struct __storeu_epi8 {
- __m256i __v;
+ __m256i_u __v;
} __attribute__((__packed__, __may_alias__));
((struct __storeu_epi8*)__P)->__v = __A;
}
diff --git a/lib/Headers/avx512vlcdintrin.h b/lib/Headers/avx512vlcdintrin.h
index 903a7c25493f..cc8b72528d01 100644
--- a/lib/Headers/avx512vlcdintrin.h
+++ b/lib/Headers/avx512vlcdintrin.h
@@ -1,22 +1,8 @@
/*===---- avx512vlcdintrin.h - AVX512VL and AVX512CD intrinsics ------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
@@ -60,99 +46,89 @@ _mm256_broadcastmw_epi32 (__mmask16 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_conflict_epi64 (__m128i __A)
{
- return (__m128i) __builtin_ia32_vpconflictdi_128_mask ((__v2di) __A,
- (__v2di) _mm_undefined_si128 (),
- (__mmask8) -1);
+ return (__m128i) __builtin_ia32_vpconflictdi_128 ((__v2di) __A);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_conflict_epi64 (__m128i __W, __mmask8 __U, __m128i __A)
{
- return (__m128i) __builtin_ia32_vpconflictdi_128_mask ((__v2di) __A,
- (__v2di) __W,
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+ (__v2di)_mm_conflict_epi64(__A),
+ (__v2di)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_conflict_epi64 (__mmask8 __U, __m128i __A)
{
- return (__m128i) __builtin_ia32_vpconflictdi_128_mask ((__v2di) __A,
- (__v2di)
- _mm_setzero_si128 (),
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+ (__v2di)_mm_conflict_epi64(__A),
+ (__v2di)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_conflict_epi64 (__m256i __A)
{
- return (__m256i) __builtin_ia32_vpconflictdi_256_mask ((__v4di) __A,
- (__v4di) _mm256_undefined_si256 (),
- (__mmask8) -1);
+ return (__m256i) __builtin_ia32_vpconflictdi_256 ((__v4di) __A);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_conflict_epi64 (__m256i __W, __mmask8 __U, __m256i __A)
{
- return (__m256i) __builtin_ia32_vpconflictdi_256_mask ((__v4di) __A,
- (__v4di) __W,
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+ (__v4di)_mm256_conflict_epi64(__A),
+ (__v4di)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_conflict_epi64 (__mmask8 __U, __m256i __A)
{
- return (__m256i) __builtin_ia32_vpconflictdi_256_mask ((__v4di) __A,
- (__v4di) _mm256_setzero_si256 (),
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+ (__v4di)_mm256_conflict_epi64(__A),
+ (__v4di)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_conflict_epi32 (__m128i __A)
{
- return (__m128i) __builtin_ia32_vpconflictsi_128_mask ((__v4si) __A,
- (__v4si) _mm_undefined_si128 (),
- (__mmask8) -1);
+ return (__m128i) __builtin_ia32_vpconflictsi_128 ((__v4si) __A);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_conflict_epi32 (__m128i __W, __mmask8 __U, __m128i __A)
{
- return (__m128i) __builtin_ia32_vpconflictsi_128_mask ((__v4si) __A,
- (__v4si) __W,
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+ (__v4si)_mm_conflict_epi32(__A),
+ (__v4si)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_conflict_epi32 (__mmask8 __U, __m128i __A)
{
- return (__m128i) __builtin_ia32_vpconflictsi_128_mask ((__v4si) __A,
- (__v4si) _mm_setzero_si128 (),
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+ (__v4si)_mm_conflict_epi32(__A),
+ (__v4si)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_conflict_epi32 (__m256i __A)
{
- return (__m256i) __builtin_ia32_vpconflictsi_256_mask ((__v8si) __A,
- (__v8si) _mm256_undefined_si256 (),
- (__mmask8) -1);
+ return (__m256i) __builtin_ia32_vpconflictsi_256 ((__v8si) __A);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_conflict_epi32 (__m256i __W, __mmask8 __U, __m256i __A)
{
- return (__m256i) __builtin_ia32_vpconflictsi_256_mask ((__v8si) __A,
- (__v8si) __W,
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
+ (__v8si)_mm256_conflict_epi32(__A),
+ (__v8si)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_conflict_epi32 (__mmask8 __U, __m256i __A)
{
- return (__m256i) __builtin_ia32_vpconflictsi_256_mask ((__v8si) __A,
- (__v8si)
- _mm256_setzero_si256 (),
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
+ (__v8si)_mm256_conflict_epi32(__A),
+ (__v8si)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
diff --git a/lib/Headers/avx512vldqintrin.h b/lib/Headers/avx512vldqintrin.h
index 9d13846e8964..95ba574ea821 100644
--- a/lib/Headers/avx512vldqintrin.h
+++ b/lib/Headers/avx512vldqintrin.h
@@ -1,22 +1,8 @@
/*===---- avx512vldqintrin.h - AVX512VL and AVX512DQ intrinsics ------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
@@ -523,23 +509,21 @@ _mm_maskz_cvtepi64_ps (__mmask8 __U, __m128i __A) {
static __inline__ __m128 __DEFAULT_FN_ATTRS256
_mm256_cvtepi64_ps (__m256i __A) {
- return (__m128) __builtin_ia32_cvtqq2ps256_mask ((__v4di) __A,
- (__v4sf) _mm_setzero_ps(),
- (__mmask8) -1);
+ return (__m128)__builtin_convertvector((__v4di)__A, __v4sf);
}
static __inline__ __m128 __DEFAULT_FN_ATTRS256
_mm256_mask_cvtepi64_ps (__m128 __W, __mmask8 __U, __m256i __A) {
- return (__m128) __builtin_ia32_cvtqq2ps256_mask ((__v4di) __A,
- (__v4sf) __W,
- (__mmask8) __U);
+ return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
+ (__v4sf)_mm256_cvtepi64_ps(__A),
+ (__v4sf)__W);
}
static __inline__ __m128 __DEFAULT_FN_ATTRS256
_mm256_maskz_cvtepi64_ps (__mmask8 __U, __m256i __A) {
- return (__m128) __builtin_ia32_cvtqq2ps256_mask ((__v4di) __A,
- (__v4sf) _mm_setzero_ps(),
- (__mmask8) __U);
+ return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
+ (__v4sf)_mm256_cvtepi64_ps(__A),
+ (__v4sf)_mm_setzero_ps());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -771,23 +755,21 @@ _mm_maskz_cvtepu64_ps (__mmask8 __U, __m128i __A) {
static __inline__ __m128 __DEFAULT_FN_ATTRS256
_mm256_cvtepu64_ps (__m256i __A) {
- return (__m128) __builtin_ia32_cvtuqq2ps256_mask ((__v4di) __A,
- (__v4sf) _mm_setzero_ps(),
- (__mmask8) -1);
+ return (__m128)__builtin_convertvector((__v4du)__A, __v4sf);
}
static __inline__ __m128 __DEFAULT_FN_ATTRS256
_mm256_mask_cvtepu64_ps (__m128 __W, __mmask8 __U, __m256i __A) {
- return (__m128) __builtin_ia32_cvtuqq2ps256_mask ((__v4di) __A,
- (__v4sf) __W,
- (__mmask8) __U);
+ return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
+ (__v4sf)_mm256_cvtepu64_ps(__A),
+ (__v4sf)__W);
}
static __inline__ __m128 __DEFAULT_FN_ATTRS256
_mm256_maskz_cvtepu64_ps (__mmask8 __U, __m256i __A) {
- return (__m128) __builtin_ia32_cvtuqq2ps256_mask ((__v4di) __A,
- (__v4sf) _mm_setzero_ps(),
- (__mmask8) __U);
+ return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
+ (__v4sf)_mm256_cvtepu64_ps(__A),
+ (__v4sf)_mm_setzero_ps());
}
#define _mm_range_pd(A, B, C) \
diff --git a/lib/Headers/avx512vlintrin.h b/lib/Headers/avx512vlintrin.h
index a2cdc0a96e59..9494fc8a6e59 100644
--- a/lib/Headers/avx512vlintrin.h
+++ b/lib/Headers/avx512vlintrin.h
@@ -1,22 +1,8 @@
/*===---- avx512vlintrin.h - AVX512VL intrinsics ---------------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
@@ -5513,7 +5499,7 @@ static __inline __m128i __DEFAULT_FN_ATTRS128
_mm_loadu_epi64 (void const *__P)
{
struct __loadu_epi64 {
- __m128i __v;
+ __m128i_u __v;
} __attribute__((__packed__, __may_alias__));
return ((struct __loadu_epi64*)__P)->__v;
}
@@ -5539,7 +5525,7 @@ static __inline __m256i __DEFAULT_FN_ATTRS256
_mm256_loadu_epi64 (void const *__P)
{
struct __loadu_epi64 {
- __m256i __v;
+ __m256i_u __v;
} __attribute__((__packed__, __may_alias__));
return ((struct __loadu_epi64*)__P)->__v;
}
@@ -5565,7 +5551,7 @@ static __inline __m128i __DEFAULT_FN_ATTRS128
_mm_loadu_epi32 (void const *__P)
{
struct __loadu_epi32 {
- __m128i __v;
+ __m128i_u __v;
} __attribute__((__packed__, __may_alias__));
return ((struct __loadu_epi32*)__P)->__v;
}
@@ -5591,7 +5577,7 @@ static __inline __m256i __DEFAULT_FN_ATTRS256
_mm256_loadu_epi32 (void const *__P)
{
struct __loadu_epi32 {
- __m256i __v;
+ __m256i_u __v;
} __attribute__((__packed__, __may_alias__));
return ((struct __loadu_epi32*)__P)->__v;
}
@@ -5717,7 +5703,7 @@ static __inline void __DEFAULT_FN_ATTRS128
_mm_storeu_epi64 (void *__P, __m128i __A)
{
struct __storeu_epi64 {
- __m128i __v;
+ __m128i_u __v;
} __attribute__((__packed__, __may_alias__));
((struct __storeu_epi64*)__P)->__v = __A;
}
@@ -5734,7 +5720,7 @@ static __inline void __DEFAULT_FN_ATTRS256
_mm256_storeu_epi64 (void *__P, __m256i __A)
{
struct __storeu_epi64 {
- __m256i __v;
+ __m256i_u __v;
} __attribute__((__packed__, __may_alias__));
((struct __storeu_epi64*)__P)->__v = __A;
}
@@ -5751,7 +5737,7 @@ static __inline void __DEFAULT_FN_ATTRS128
_mm_storeu_epi32 (void *__P, __m128i __A)
{
struct __storeu_epi32 {
- __m128i __v;
+ __m128i_u __v;
} __attribute__((__packed__, __may_alias__));
((struct __storeu_epi32*)__P)->__v = __A;
}
@@ -5768,7 +5754,7 @@ static __inline void __DEFAULT_FN_ATTRS256
_mm256_storeu_epi32 (void *__P, __m256i __A)
{
struct __storeu_epi32 {
- __m256i __v;
+ __m256i_u __v;
} __attribute__((__packed__, __may_alias__));
((struct __storeu_epi32*)__P)->__v = __A;
}
@@ -7000,7 +6986,7 @@ _mm_mask_cvtsepi32_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A)
__builtin_ia32_pmovsdb128mem_mask ((__v16qi *) __P, (__v4si) __A, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
+static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_cvtsepi32_epi8 (__m256i __A)
{
return (__m128i) __builtin_ia32_pmovsdb256_mask ((__v8si) __A,
@@ -7023,7 +7009,7 @@ _mm256_maskz_cvtsepi32_epi8 (__mmask8 __M, __m256i __A)
__M);
}
-static __inline__ void __DEFAULT_FN_ATTRS128
+static __inline__ void __DEFAULT_FN_ATTRS256
_mm256_mask_cvtsepi32_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A)
{
__builtin_ia32_pmovsdb256mem_mask ((__v16qi *) __P, (__v8si) __A, __M);
@@ -7581,7 +7567,7 @@ _mm_maskz_cvtepi32_epi8 (__mmask8 __M, __m128i __A)
__M);
}
-static __inline__ void __DEFAULT_FN_ATTRS256
+static __inline__ void __DEFAULT_FN_ATTRS128
_mm_mask_cvtepi32_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A)
{
__builtin_ia32_pmovdb128mem_mask ((__v16qi *) __P, (__v4si) __A, __M);
@@ -8425,22 +8411,6 @@ _mm256_maskz_cvtph_ps (__mmask8 __U, __m128i __A)
(__mmask8) __U);
}
-static __inline __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvtps_ph (__m128i __W, __mmask8 __U, __m128 __A)
-{
- return (__m128i) __builtin_ia32_vcvtps2ph_mask ((__v4sf) __A, _MM_FROUND_CUR_DIRECTION,
- (__v8hi) __W,
- (__mmask8) __U);
-}
-
-static __inline __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtps_ph (__mmask8 __U, __m128 __A)
-{
- return (__m128i) __builtin_ia32_vcvtps2ph_mask ((__v4sf) __A, _MM_FROUND_CUR_DIRECTION,
- (__v8hi) _mm_setzero_si128 (),
- (__mmask8) __U);
-}
-
#define _mm_mask_cvt_roundps_ph(W, U, A, I) \
(__m128i)__builtin_ia32_vcvtps2ph_mask((__v4sf)(__m128)(A), (int)(I), \
(__v8hi)(__m128i)(W), \
@@ -8451,21 +8421,9 @@ _mm_maskz_cvtps_ph (__mmask8 __U, __m128 __A)
(__v8hi)_mm_setzero_si128(), \
(__mmask8)(U))
-static __inline __m128i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtps_ph (__m128i __W, __mmask8 __U, __m256 __A)
-{
- return (__m128i) __builtin_ia32_vcvtps2ph256_mask ((__v8sf) __A, _MM_FROUND_CUR_DIRECTION,
- (__v8hi) __W,
- (__mmask8) __U);
-}
+#define _mm_mask_cvtps_ph _mm_mask_cvt_roundps_ph
+#define _mm_maskz_cvtps_ph _mm_maskz_cvt_roundps_ph
-static __inline __m128i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtps_ph ( __mmask8 __U, __m256 __A)
-{
- return (__m128i) __builtin_ia32_vcvtps2ph256_mask ((__v8sf) __A, _MM_FROUND_CUR_DIRECTION,
- (__v8hi) _mm_setzero_si128(),
- (__mmask8) __U);
-}
#define _mm256_mask_cvt_roundps_ph(W, U, A, I) \
(__m128i)__builtin_ia32_vcvtps2ph256_mask((__v8sf)(__m256)(A), (int)(I), \
(__v8hi)(__m128i)(W), \
@@ -8476,6 +8434,9 @@ _mm256_maskz_cvtps_ph ( __mmask8 __U, __m256 __A)
(__v8hi)_mm_setzero_si128(), \
(__mmask8)(U))
+#define _mm256_mask_cvtps_ph _mm256_mask_cvt_roundps_ph
+#define _mm256_maskz_cvtps_ph _mm256_maskz_cvt_roundps_ph
+
#undef __DEFAULT_FN_ATTRS128
#undef __DEFAULT_FN_ATTRS256
diff --git a/lib/Headers/avx512vlvbmi2intrin.h b/lib/Headers/avx512vlvbmi2intrin.h
index 632d14fb55aa..a40f926de75a 100644
--- a/lib/Headers/avx512vlvbmi2intrin.h
+++ b/lib/Headers/avx512vlvbmi2intrin.h
@@ -1,23 +1,9 @@
/*===------------- avx512vlvbmi2intrin.h - VBMI2 intrinsics -----------------===
*
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/avx512vlvnniintrin.h b/lib/Headers/avx512vlvnniintrin.h
index 62382268ec9f..b7c8fa08c653 100644
--- a/lib/Headers/avx512vlvnniintrin.h
+++ b/lib/Headers/avx512vlvnniintrin.h
@@ -1,23 +1,9 @@
/*===------------- avx512vlvnniintrin.h - VNNI intrinsics ------------------===
*
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/avx512vlvp2intersectintrin.h b/lib/Headers/avx512vlvp2intersectintrin.h
new file mode 100644
index 000000000000..3e0815e5d46f
--- /dev/null
+++ b/lib/Headers/avx512vlvp2intersectintrin.h
@@ -0,0 +1,121 @@
+/*===------ avx512vlvp2intersectintrin.h - VL VP2INTERSECT intrinsics ------===
+ *
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __IMMINTRIN_H
+#error "Never use <avx512vlvp2intersectintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef _AVX512VLVP2INTERSECT_H
+#define _AVX512VLVP2INTERSECT_H
+
+#define __DEFAULT_FN_ATTRS128 \
+ __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512vp2intersect"), \
+ __min_vector_width__(128)))
+
+#define __DEFAULT_FN_ATTRS256 \
+ __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512vp2intersect"), \
+ __min_vector_width__(256)))
+/// Store, in an even/odd pair of mask registers, the indicators of the
+/// locations of value matches between dwords in operands __a and __b.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VP2INTERSECTD </c> instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32].
+/// \param __b
+/// A 256-bit vector of [8 x i32]
+/// \param __m0
+/// A pointer point to 8-bit mask
+/// \param __m1
+/// A pointer point to 8-bit mask
+static __inline__ void __DEFAULT_FN_ATTRS256
+_mm256_2intersect_epi32(__m256i __a, __m256i __b, __mmask8 *__m0, __mmask8 *__m1) {
+ __builtin_ia32_vp2intersect_d_256((__v8si)__a, (__v8si)__b, __m0, __m1);
+}
+
+/// Store, in an even/odd pair of mask registers, the indicators of the
+/// locations of value matches between quadwords in operands __a and __b.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VP2INTERSECTQ </c> instruction.
+///
+/// \param __a
+/// A 256-bit vector of [4 x i64].
+/// \param __b
+/// A 256-bit vector of [4 x i64]
+/// \param __m0
+/// A pointer point to 8-bit mask
+/// \param __m1
+/// A pointer point to 8-bit mask
+static __inline__ void __DEFAULT_FN_ATTRS256
+_mm256_2intersect_epi64(__m256i __a, __m256i __b, __mmask8 *__m0, __mmask8 *__m1) {
+ __builtin_ia32_vp2intersect_q_256((__v4di)__a, (__v4di)__b, __m0, __m1);
+}
+
+/// Store, in an even/odd pair of mask registers, the indicators of the
+/// locations of value matches between dwords in operands __a and __b.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VP2INTERSECTD </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [4 x i32].
+/// \param __b
+/// A 128-bit vector of [4 x i32]
+/// \param __m0
+/// A pointer point to 8-bit mask
+/// \param __m1
+/// A pointer point to 8-bit mask
+static __inline__ void __DEFAULT_FN_ATTRS128
+_mm_2intersect_epi32(__m128i __a, __m128i __b, __mmask8 *__m0, __mmask8 *__m1) {
+ __builtin_ia32_vp2intersect_d_128((__v4si)__a, (__v4si)__b, __m0, __m1);
+}
+
+/// Store, in an even/odd pair of mask registers, the indicators of the
+/// locations of value matches between quadwords in operands __a and __b.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VP2INTERSECTQ </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x i64].
+/// \param __b
+/// A 128-bit vector of [2 x i64]
+/// \param __m0
+/// A pointer point to 8-bit mask
+/// \param __m1
+/// A pointer point to 8-bit mask
+static __inline__ void __DEFAULT_FN_ATTRS128
+_mm_2intersect_epi64(__m128i __a, __m128i __b, __mmask8 *__m0, __mmask8 *__m1) {
+ __builtin_ia32_vp2intersect_q_128((__v2di)__a, (__v2di)__b, __m0, __m1);
+}
+
+#undef __DEFAULT_FN_ATTRS128
+#undef __DEFAULT_FN_ATTRS256
+
+#endif
diff --git a/lib/Headers/avx512vnniintrin.h b/lib/Headers/avx512vnniintrin.h
index 620ef5a78959..9935a119aaca 100644
--- a/lib/Headers/avx512vnniintrin.h
+++ b/lib/Headers/avx512vnniintrin.h
@@ -1,23 +1,9 @@
/*===------------- avx512vnniintrin.h - VNNI intrinsics ------------------===
*
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/avx512vp2intersectintrin.h b/lib/Headers/avx512vp2intersectintrin.h
new file mode 100644
index 000000000000..5d3cb48cfd20
--- /dev/null
+++ b/lib/Headers/avx512vp2intersectintrin.h
@@ -0,0 +1,77 @@
+/*===------- avx512vpintersectintrin.h - VP2INTERSECT intrinsics ------------===
+ *
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __IMMINTRIN_H
+#error "Never use <avx512vp2intersect.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef _AVX512VP2INTERSECT_H
+#define _AVX512VP2INTERSECT_H
+
+#define __DEFAULT_FN_ATTRS \
+ __attribute__((__always_inline__, __nodebug__, __target__("avx512vp2intersect"), \
+ __min_vector_width__(512)))
+
+/// Store, in an even/odd pair of mask registers, the indicators of the
+/// locations of value matches between dwords in operands __a and __b.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VP2INTERSECTD </c> instruction.
+///
+/// \param __a
+/// A 512-bit vector of [16 x i32].
+/// \param __b
+/// A 512-bit vector of [16 x i32]
+/// \param __m0
+/// A pointer point to 16-bit mask
+/// \param __m1
+/// A pointer point to 16-bit mask
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm512_2intersect_epi32(__m512i __a, __m512i __b, __mmask16 *__m0, __mmask16 *__m1) {
+ __builtin_ia32_vp2intersect_d_512((__v16si)__a, (__v16si)__b, __m0, __m1);
+}
+
+/// Store, in an even/odd pair of mask registers, the indicators of the
+/// locations of value matches between quadwords in operands __a and __b.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VP2INTERSECTQ </c> instruction.
+///
+/// \param __a
+/// A 512-bit vector of [8 x i64].
+/// \param __b
+/// A 512-bit vector of [8 x i64]
+/// \param __m0
+/// A pointer point to 8-bit mask
+/// \param __m1
+/// A pointer point to 8-bit mask
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm512_2intersect_epi64(__m512i __a, __m512i __b, __mmask8 *__m0, __mmask8 *__m1) {
+ __builtin_ia32_vp2intersect_q_512((__v8di)__a, (__v8di)__b, __m0, __m1);
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif
diff --git a/lib/Headers/avx512vpopcntdqintrin.h b/lib/Headers/avx512vpopcntdqintrin.h
index c99f5945699e..bb435e623330 100644
--- a/lib/Headers/avx512vpopcntdqintrin.h
+++ b/lib/Headers/avx512vpopcntdqintrin.h
@@ -1,23 +1,9 @@
/*===----- avx512vpopcntdqintrin.h - AVX512VPOPCNTDQ intrinsics-------------===
*
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/avx512vpopcntdqvlintrin.h b/lib/Headers/avx512vpopcntdqvlintrin.h
index 681a75fa07cd..a3cb9b6bccb3 100644
--- a/lib/Headers/avx512vpopcntdqvlintrin.h
+++ b/lib/Headers/avx512vpopcntdqvlintrin.h
@@ -1,23 +1,9 @@
/*===---- avx512vpopcntdqintrin.h - AVX512VPOPCNTDQ intrinsics -------------===
*
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/avxintrin.h b/lib/Headers/avxintrin.h
index cb15396b3faf..a01240b9d157 100644
--- a/lib/Headers/avxintrin.h
+++ b/lib/Headers/avxintrin.h
@@ -1,22 +1,8 @@
/*===---- avxintrin.h - AVX intrinsics -------------------------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
@@ -45,9 +31,13 @@ typedef unsigned char __v32qu __attribute__ ((__vector_size__ (32)));
* appear in the interface though. */
typedef signed char __v32qs __attribute__((__vector_size__(32)));
-typedef float __m256 __attribute__ ((__vector_size__ (32)));
-typedef double __m256d __attribute__((__vector_size__(32)));
-typedef long long __m256i __attribute__((__vector_size__(32)));
+typedef float __m256 __attribute__ ((__vector_size__ (32), __aligned__(32)));
+typedef double __m256d __attribute__((__vector_size__(32), __aligned__(32)));
+typedef long long __m256i __attribute__((__vector_size__(32), __aligned__(32)));
+
+typedef float __m256_u __attribute__ ((__vector_size__ (32), __aligned__(1)));
+typedef double __m256d_u __attribute__((__vector_size__(32), __aligned__(1)));
+typedef long long __m256i_u __attribute__((__vector_size__(32), __aligned__(1)));
/* Define the default attributes for the functions in this file. */
#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx"), __min_vector_width__(256)))
@@ -3113,7 +3103,7 @@ static __inline __m256d __DEFAULT_FN_ATTRS
_mm256_loadu_pd(double const *__p)
{
struct __loadu_pd {
- __m256d __v;
+ __m256d_u __v;
} __attribute__((__packed__, __may_alias__));
return ((struct __loadu_pd*)__p)->__v;
}
@@ -3133,7 +3123,7 @@ static __inline __m256 __DEFAULT_FN_ATTRS
_mm256_loadu_ps(float const *__p)
{
struct __loadu_ps {
- __m256 __v;
+ __m256_u __v;
} __attribute__((__packed__, __may_alias__));
return ((struct __loadu_ps*)__p)->__v;
}
@@ -3166,10 +3156,10 @@ _mm256_load_si256(__m256i const *__p)
/// A pointer to a 256-bit integer vector containing integer values.
/// \returns A 256-bit integer vector containing the moved values.
static __inline __m256i __DEFAULT_FN_ATTRS
-_mm256_loadu_si256(__m256i const *__p)
+_mm256_loadu_si256(__m256i_u const *__p)
{
struct __loadu_si256 {
- __m256i __v;
+ __m256i_u __v;
} __attribute__((__packed__, __may_alias__));
return ((struct __loadu_si256*)__p)->__v;
}
@@ -3246,7 +3236,7 @@ static __inline void __DEFAULT_FN_ATTRS
_mm256_storeu_pd(double *__p, __m256d __a)
{
struct __storeu_pd {
- __m256d __v;
+ __m256d_u __v;
} __attribute__((__packed__, __may_alias__));
((struct __storeu_pd*)__p)->__v = __a;
}
@@ -3266,7 +3256,7 @@ static __inline void __DEFAULT_FN_ATTRS
_mm256_storeu_ps(float *__p, __m256 __a)
{
struct __storeu_ps {
- __m256 __v;
+ __m256_u __v;
} __attribute__((__packed__, __may_alias__));
((struct __storeu_ps*)__p)->__v = __a;
}
@@ -3301,10 +3291,10 @@ _mm256_store_si256(__m256i *__p, __m256i __a)
/// \param __a
/// A 256-bit integer vector containing the values to be moved.
static __inline void __DEFAULT_FN_ATTRS
-_mm256_storeu_si256(__m256i *__p, __m256i __a)
+_mm256_storeu_si256(__m256i_u *__p, __m256i __a)
{
struct __storeu_si256 {
- __m256i __v;
+ __m256i_u __v;
} __attribute__((__packed__, __may_alias__));
((struct __storeu_si256*)__p)->__v = __a;
}
@@ -4834,7 +4824,7 @@ _mm256_loadu2_m128d(double const *__addr_hi, double const *__addr_lo)
/// address of the memory location does not have to be aligned.
/// \returns A 256-bit integer vector containing the concatenated result.
static __inline __m256i __DEFAULT_FN_ATTRS
-_mm256_loadu2_m128i(__m128i const *__addr_hi, __m128i const *__addr_lo)
+_mm256_loadu2_m128i(__m128i_u const *__addr_hi, __m128i_u const *__addr_lo)
{
__m256i __v256 = _mm256_castsi128_si256(_mm_loadu_si128(__addr_lo));
return _mm256_insertf128_si256(__v256, _mm_loadu_si128(__addr_hi), 1);
@@ -4918,7 +4908,7 @@ _mm256_storeu2_m128d(double *__addr_hi, double *__addr_lo, __m256d __a)
/// \param __a
/// A 256-bit integer vector.
static __inline void __DEFAULT_FN_ATTRS
-_mm256_storeu2_m128i(__m128i *__addr_hi, __m128i *__addr_lo, __m256i __a)
+_mm256_storeu2_m128i(__m128i_u *__addr_hi, __m128i_u *__addr_lo, __m256i __a)
{
__m128i __v128;
diff --git a/lib/Headers/bmi2intrin.h b/lib/Headers/bmi2intrin.h
index fdae82cf2ba7..0b56aed5f4cb 100644
--- a/lib/Headers/bmi2intrin.h
+++ b/lib/Headers/bmi2intrin.h
@@ -1,22 +1,8 @@
/*===---- bmi2intrin.h - BMI2 intrinsics -----------------------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/bmiintrin.h b/lib/Headers/bmiintrin.h
index 56c20b78d340..b7af62f609ae 100644
--- a/lib/Headers/bmiintrin.h
+++ b/lib/Headers/bmiintrin.h
@@ -1,22 +1,8 @@
/*===---- bmiintrin.h - BMI intrinsics -------------------------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/cetintrin.h b/lib/Headers/cetintrin.h
index 120c95424da1..4290e9d7355b 100644
--- a/lib/Headers/cetintrin.h
+++ b/lib/Headers/cetintrin.h
@@ -1,22 +1,8 @@
/*===---- cetintrin.h - CET intrinsic --------------------------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/cldemoteintrin.h b/lib/Headers/cldemoteintrin.h
index fa78148ebf02..2413e7dea7a1 100644
--- a/lib/Headers/cldemoteintrin.h
+++ b/lib/Headers/cldemoteintrin.h
@@ -1,22 +1,8 @@
/*===---- cldemoteintrin.h - CLDEMOTE intrinsic ----------------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/clflushoptintrin.h b/lib/Headers/clflushoptintrin.h
index 79bb4589fc75..060eb36f30f9 100644
--- a/lib/Headers/clflushoptintrin.h
+++ b/lib/Headers/clflushoptintrin.h
@@ -1,22 +1,8 @@
/*===---- clflushoptintrin.h - CLFLUSHOPT intrinsic ------------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/clwbintrin.h b/lib/Headers/clwbintrin.h
index c09286ba6748..3360d203f7d7 100644
--- a/lib/Headers/clwbintrin.h
+++ b/lib/Headers/clwbintrin.h
@@ -1,22 +1,8 @@
/*===---- clwbintrin.h - CLWB intrinsic ------------------------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/clzerointrin.h b/lib/Headers/clzerointrin.h
index f4e920839bee..a180984a3f28 100644
--- a/lib/Headers/clzerointrin.h
+++ b/lib/Headers/clzerointrin.h
@@ -1,22 +1,8 @@
/*===----------------------- clzerointrin.h - CLZERO ----------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/cpuid.h b/lib/Headers/cpuid.h
index fce6af52dd3f..02ffac26c0b3 100644
--- a/lib/Headers/cpuid.h
+++ b/lib/Headers/cpuid.h
@@ -1,22 +1,8 @@
/*===---- cpuid.h - X86 cpu model detection --------------------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
@@ -191,6 +177,7 @@
#define bit_CLDEMOTE 0x02000000
#define bit_MOVDIRI 0x08000000
#define bit_MOVDIR64B 0x10000000
+#define bit_ENQCMD 0x20000000
/* Features in %edx for leaf 7 sub-leaf 0 */
#define bit_AVX5124VNNIW 0x00000004
@@ -198,6 +185,9 @@
#define bit_PCONFIG 0x00040000
#define bit_IBT 0x00100000
+/* Features in %eax for leaf 7 sub-leaf 1 */
+#define bit_AVX512BF16 0x00000020
+
/* Features in %eax for leaf 13 sub-leaf 1 */
#define bit_XSAVEOPT 0x00000001
#define bit_XSAVEC 0x00000002
diff --git a/lib/Headers/emmintrin.h b/lib/Headers/emmintrin.h
index 6d61f9719944..3d55f5f2710f 100644
--- a/lib/Headers/emmintrin.h
+++ b/lib/Headers/emmintrin.h
@@ -1,22 +1,8 @@
/*===---- emmintrin.h - SSE2 intrinsics ------------------------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
@@ -26,8 +12,11 @@
#include <xmmintrin.h>
-typedef double __m128d __attribute__((__vector_size__(16)));
-typedef long long __m128i __attribute__((__vector_size__(16)));
+typedef double __m128d __attribute__((__vector_size__(16), __aligned__(16)));
+typedef long long __m128i __attribute__((__vector_size__(16), __aligned__(16)));
+
+typedef double __m128d_u __attribute__((__vector_size__(16), __aligned__(1)));
+typedef long long __m128i_u __attribute__((__vector_size__(16), __aligned__(1)));
/* Type defines. */
typedef double __v2df __attribute__ ((__vector_size__ (16)));
@@ -1652,7 +1641,7 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_loadu_pd(double const *__dp)
{
struct __loadu_pd {
- __m128d __v;
+ __m128d_u __v;
} __attribute__((__packed__, __may_alias__));
return ((struct __loadu_pd*)__dp)->__v;
}
@@ -2042,7 +2031,7 @@ static __inline__ void __DEFAULT_FN_ATTRS
_mm_storeu_pd(double *__dp, __m128d __a)
{
struct __storeu_pd {
- __m128d __v;
+ __m128d_u __v;
} __attribute__((__packed__, __may_alias__));
((struct __storeu_pd*)__dp)->__v = __a;
}
@@ -2316,11 +2305,7 @@ _mm_adds_epu16(__m128i __a, __m128i __b)
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_avg_epu8(__m128i __a, __m128i __b)
{
- typedef unsigned short __v16hu __attribute__ ((__vector_size__ (32)));
- return (__m128i)__builtin_convertvector(
- ((__builtin_convertvector((__v16qu)__a, __v16hu) +
- __builtin_convertvector((__v16qu)__b, __v16hu)) + 1)
- >> 1, __v16qu);
+ return (__m128i)__builtin_ia32_pavgb128((__v16qi)__a, (__v16qi)__b);
}
/// Computes the rounded avarages of corresponding elements of two
@@ -2340,11 +2325,7 @@ _mm_avg_epu8(__m128i __a, __m128i __b)
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_avg_epu16(__m128i __a, __m128i __b)
{
- typedef unsigned int __v8su __attribute__ ((__vector_size__ (32)));
- return (__m128i)__builtin_convertvector(
- ((__builtin_convertvector((__v8hu)__a, __v8su) +
- __builtin_convertvector((__v8hu)__b, __v8su)) + 1)
- >> 1, __v8hu);
+ return (__m128i)__builtin_ia32_pavgw128((__v8hi)__a, (__v8hi)__b);
}
/// Multiplies the corresponding elements of two 128-bit signed [8 x i16]
@@ -3564,10 +3545,10 @@ _mm_load_si128(__m128i const *__p)
/// A pointer to a memory location containing integer values.
/// \returns A 128-bit integer vector containing the moved values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_loadu_si128(__m128i const *__p)
+_mm_loadu_si128(__m128i_u const *__p)
{
struct __loadu_si128 {
- __m128i __v;
+ __m128i_u __v;
} __attribute__((__packed__, __may_alias__));
return ((struct __loadu_si128*)__p)->__v;
}
@@ -3585,7 +3566,7 @@ _mm_loadu_si128(__m128i const *__p)
/// \returns A 128-bit vector of [2 x i64]. The lower order bits contain the
/// moved value. The higher order bits are cleared.
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_loadl_epi64(__m128i const *__p)
+_mm_loadl_epi64(__m128i_u const *__p)
{
struct __mm_loadl_epi64_struct {
long long __u;
@@ -4027,10 +4008,10 @@ _mm_store_si128(__m128i *__p, __m128i __b)
/// \param __b
/// A 128-bit integer vector containing the values to be moved.
static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storeu_si128(__m128i *__p, __m128i __b)
+_mm_storeu_si128(__m128i_u *__p, __m128i __b)
{
struct __storeu_si128 {
- __m128i __v;
+ __m128i_u __v;
} __attribute__((__packed__, __may_alias__));
((struct __storeu_si128*)__p)->__v = __b;
}
@@ -4139,7 +4120,7 @@ _mm_maskmoveu_si128(__m128i __d, __m128i __n, char *__p)
/// A 128-bit integer vector of [2 x i64]. The lower 64 bits contain the
/// value to be stored.
static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storel_epi64(__m128i *__p, __m128i __a)
+_mm_storel_epi64(__m128i_u *__p, __m128i __a)
{
struct __mm_storel_epi64_struct {
long long __u;
diff --git a/lib/Headers/enqcmdintrin.h b/lib/Headers/enqcmdintrin.h
new file mode 100644
index 000000000000..30af67f6b460
--- /dev/null
+++ b/lib/Headers/enqcmdintrin.h
@@ -0,0 +1,63 @@
+/*===------------------ enqcmdintrin.h - enqcmd intrinsics -----------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <enqcmdintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __ENQCMDINTRIN_H
+#define __ENQCMDINTRIN_H
+
+/* Define the default attributes for the functions in this file */
+#define _DEFAULT_FN_ATTRS \
+ __attribute__((__always_inline__, __nodebug__, __target__("enqcmd")))
+
+/// Reads 64-byte command pointed by \a __src, formats 64-byte enqueue store
+/// data, and performs 64-byte enqueue store to memory pointed by \a __dst.
+/// This intrinsics may only be used in User mode.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsics corresponds to the <c> ENQCMD </c> instruction.
+///
+/// \param __dst
+/// Pointer to the destination of the enqueue store.
+/// \param __src
+/// Pointer to 64-byte command data.
+/// \returns If the command data is successfully written to \a __dst then 0 is
+/// returned. Otherwise 1 is returned.
+static __inline__ int _DEFAULT_FN_ATTRS
+_enqcmd (void *__dst, const void *__src)
+{
+ return __builtin_ia32_enqcmd(__dst, __src);
+}
+
+/// Reads 64-byte command pointed by \a __src, formats 64-byte enqueue store
+/// data, and performs 64-byte enqueue store to memory pointed by \a __dst
+/// This intrinsic may only be used in Privileged mode.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsics corresponds to the <c> ENQCMDS </c> instruction.
+///
+/// \param __dst
+/// Pointer to the destination of the enqueue store.
+/// \param __src
+/// Pointer to 64-byte command data.
+/// \returns If the command data is successfully written to \a __dst then 0 is
+/// returned. Otherwise 1 is returned.
+static __inline__ int _DEFAULT_FN_ATTRS
+_enqcmds (void *__dst, const void *__src)
+{
+ return __builtin_ia32_enqcmds(__dst, __src);
+}
+
+#undef _DEFAULT_FN_ATTRS
+
+#endif /* __ENQCMDINTRIN_H */
diff --git a/lib/Headers/f16cintrin.h b/lib/Headers/f16cintrin.h
index 3d35f28eb356..109b604adae3 100644
--- a/lib/Headers/f16cintrin.h
+++ b/lib/Headers/f16cintrin.h
@@ -1,22 +1,8 @@
/*===---- f16cintrin.h - F16C intrinsics -----------------------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
@@ -52,9 +38,9 @@
static __inline float __DEFAULT_FN_ATTRS128
_cvtsh_ss(unsigned short __a)
{
- __v8hi v = {(short)__a, 0, 0, 0, 0, 0, 0, 0};
- __v4sf r = __builtin_ia32_vcvtph2ps(v);
- return r[0];
+ __v8hi __v = {(short)__a, 0, 0, 0, 0, 0, 0, 0};
+ __v4sf __r = __builtin_ia32_vcvtph2ps(__v);
+ return __r[0];
}
/// Converts a 32-bit single-precision float value to a 16-bit
diff --git a/lib/Headers/float.h b/lib/Headers/float.h
index 56215cd624d7..ed610b24aa10 100644
--- a/lib/Headers/float.h
+++ b/lib/Headers/float.h
@@ -1,22 +1,8 @@
/*===---- float.h - Characteristics of floating point types ----------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
@@ -51,7 +37,7 @@
# undef FLT_MANT_DIG
# undef DBL_MANT_DIG
# undef LDBL_MANT_DIG
-# if __STDC_VERSION__ >= 199901L || !defined(__STRICT_ANSI__)
+# if __STDC_VERSION__ >= 199901L || !defined(__STRICT_ANSI__) || __cplusplus >= 201103L
# undef DECIMAL_DIG
# endif
# undef FLT_DIG
@@ -78,7 +64,7 @@
# undef FLT_MIN
# undef DBL_MIN
# undef LDBL_MIN
-# if __STDC_VERSION__ >= 201112L || !defined(__STRICT_ANSI__)
+# if __STDC_VERSION__ >= 201112L || !defined(__STRICT_ANSI__) || __cplusplus >= 201703L
# undef FLT_TRUE_MIN
# undef DBL_TRUE_MIN
# undef LDBL_TRUE_MIN
@@ -101,7 +87,7 @@
#define DBL_MANT_DIG __DBL_MANT_DIG__
#define LDBL_MANT_DIG __LDBL_MANT_DIG__
-#if __STDC_VERSION__ >= 199901L || !defined(__STRICT_ANSI__)
+#if __STDC_VERSION__ >= 199901L || !defined(__STRICT_ANSI__) || __cplusplus >= 201103L
# define DECIMAL_DIG __DECIMAL_DIG__
#endif
@@ -137,7 +123,7 @@
#define DBL_MIN __DBL_MIN__
#define LDBL_MIN __LDBL_MIN__
-#if __STDC_VERSION__ >= 201112L || !defined(__STRICT_ANSI__)
+#if __STDC_VERSION__ >= 201112L || !defined(__STRICT_ANSI__) || __cplusplus >= 201703L
# define FLT_TRUE_MIN __FLT_DENORM_MIN__
# define DBL_TRUE_MIN __DBL_DENORM_MIN__
# define LDBL_TRUE_MIN __LDBL_DENORM_MIN__
diff --git a/lib/Headers/fma4intrin.h b/lib/Headers/fma4intrin.h
index 7bae2f4a3155..694801b3e8dd 100644
--- a/lib/Headers/fma4intrin.h
+++ b/lib/Headers/fma4intrin.h
@@ -1,22 +1,8 @@
/*===---- fma4intrin.h - FMA4 intrinsics -----------------------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/fmaintrin.h b/lib/Headers/fmaintrin.h
index 094d13afea09..d889b7c5e270 100644
--- a/lib/Headers/fmaintrin.h
+++ b/lib/Headers/fmaintrin.h
@@ -1,22 +1,8 @@
/*===---- fmaintrin.h - FMA intrinsics -------------------------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/fxsrintrin.h b/lib/Headers/fxsrintrin.h
index 704b5ad60aa5..afee6aa97674 100644
--- a/lib/Headers/fxsrintrin.h
+++ b/lib/Headers/fxsrintrin.h
@@ -1,22 +1,8 @@
/*===---- fxsrintrin.h - FXSR intrinsic ------------------------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/gfniintrin.h b/lib/Headers/gfniintrin.h
index 804d4f3d068c..9bff0fcb603e 100644
--- a/lib/Headers/gfniintrin.h
+++ b/lib/Headers/gfniintrin.h
@@ -1,23 +1,9 @@
/*===----------------- gfniintrin.h - GFNI intrinsics ----------------------===
*
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/htmintrin.h b/lib/Headers/htmintrin.h
index 69c8d7bb57f0..49c2b9860742 100644
--- a/lib/Headers/htmintrin.h
+++ b/lib/Headers/htmintrin.h
@@ -1,22 +1,8 @@
/*===---- htmintrin.h - Standard header for PowerPC HTM ---------------===*\
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
\*===----------------------------------------------------------------------===*/
diff --git a/lib/Headers/htmxlintrin.h b/lib/Headers/htmxlintrin.h
index 049dbd61df75..6ef6f4b34279 100644
--- a/lib/Headers/htmxlintrin.h
+++ b/lib/Headers/htmxlintrin.h
@@ -1,22 +1,8 @@
/*===---- htmxlintrin.h - XL compiler HTM execution intrinsics-------------===*\
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
\*===----------------------------------------------------------------------===*/
diff --git a/lib/Headers/ia32intrin.h b/lib/Headers/ia32intrin.h
index f8972e3053a3..8e38df73187d 100644
--- a/lib/Headers/ia32intrin.h
+++ b/lib/Headers/ia32intrin.h
@@ -1,22 +1,8 @@
/* ===-------- ia32intrin.h ---------------------------------------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
@@ -28,6 +14,160 @@
#ifndef __IA32INTRIN_H
#define __IA32INTRIN_H
+/** Find the first set bit starting from the lsb. Result is undefined if
+ * input is 0.
+ *
+ * \headerfile <x86intrin.h>
+ *
+ * This intrinsic corresponds to the <c> BSF </c> instruction or the
+ * <c> TZCNT </c> instruction.
+ *
+ * \param __A
+ * A 32-bit integer operand.
+ * \returns A 32-bit integer containing the bit number.
+ */
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+__bsfd(int __A) {
+ return __builtin_ctz(__A);
+}
+
+/** Find the first set bit starting from the msb. Result is undefined if
+ * input is 0.
+ *
+ * \headerfile <x86intrin.h>
+ *
+ * This intrinsic corresponds to the <c> BSR </c> instruction or the
+ * <c> LZCNT </c> instruction and an <c> XOR </c>.
+ *
+ * \param __A
+ * A 32-bit integer operand.
+ * \returns A 32-bit integer containing the bit number.
+ */
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+__bsrd(int __A) {
+ return 31 - __builtin_clz(__A);
+}
+
+/** Swaps the bytes in the input. Converting little endian to big endian or
+ * vice versa.
+ *
+ * \headerfile <x86intrin.h>
+ *
+ * This intrinsic corresponds to the <c> BSWAP </c> instruction.
+ *
+ * \param __A
+ * A 32-bit integer operand.
+ * \returns A 32-bit integer containing the swapped bytes.
+ */
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+__bswapd(int __A) {
+ return __builtin_bswap32(__A);
+}
+
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+_bswap(int __A) {
+ return __builtin_bswap32(__A);
+}
+
+#define _bit_scan_forward(A) __bsfd((A))
+#define _bit_scan_reverse(A) __bsrd((A))
+
+#ifdef __x86_64__
+/** Find the first set bit starting from the lsb. Result is undefined if
+ * input is 0.
+ *
+ * \headerfile <x86intrin.h>
+ *
+ * This intrinsic corresponds to the <c> BSF </c> instruction or the
+ * <c> TZCNT </c> instruction.
+ *
+ * \param __A
+ * A 64-bit integer operand.
+ * \returns A 32-bit integer containing the bit number.
+ */
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+__bsfq(long long __A) {
+ return __builtin_ctzll(__A);
+}
+
+/** Find the first set bit starting from the msb. Result is undefined if
+ * input is 0.
+ *
+ * \headerfile <x86intrin.h>
+ *
+ * This intrinsic corresponds to the <c> BSR </c> instruction or the
+ * <c> LZCNT </c> instruction and an <c> XOR </c>.
+ *
+ * \param __A
+ * A 64-bit integer operand.
+ * \returns A 32-bit integer containing the bit number.
+ */
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+__bsrq(long long __A) {
+ return 63 - __builtin_clzll(__A);
+}
+
+/** Swaps the bytes in the input. Converting little endian to big endian or
+ * vice versa.
+ *
+ * \headerfile <x86intrin.h>
+ *
+ * This intrinsic corresponds to the <c> BSWAP </c> instruction.
+ *
+ * \param __A
+ * A 64-bit integer operand.
+ * \returns A 64-bit integer containing the swapped bytes.
+ */
+static __inline__ long long __attribute__((__always_inline__, __nodebug__))
+__bswapq(long long __A) {
+ return __builtin_bswap64(__A);
+}
+
+#define _bswap64(A) __bswapq((A))
+#endif
+
+/** Counts the number of bits in the source operand having a value of 1.
+ *
+ * \headerfile <x86intrin.h>
+ *
+ * This intrinsic corresponds to the <c> POPCNT </c> instruction or a
+ * a sequence of arithmetic and logic ops to calculate it.
+ *
+ * \param __A
+ * An unsigned 32-bit integer operand.
+ * \returns A 32-bit integer containing the number of bits with value 1 in the
+ * source operand.
+ */
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+__popcntd(unsigned int __A)
+{
+ return __builtin_popcount(__A);
+}
+
+#define _popcnt32(A) __popcntd((A))
+
+#ifdef __x86_64__
+/** Counts the number of bits in the source operand having a value of 1.
+ *
+ * \headerfile <x86intrin.h>
+ *
+ * This intrinsic corresponds to the <c> POPCNT </c> instruction or a
+ * a sequence of arithmetic and logic ops to calculate it.
+ *
+ * \param __A
+ * An unsigned 64-bit integer operand.
+ * \returns A 64-bit integer containing the number of bits with value 1 in the
+ * source operand.
+ */
+static __inline__ long long __attribute__((__always_inline__, __nodebug__))
+__popcntq(unsigned long long __A)
+{
+ return __builtin_popcountll(__A);
+}
+
+#define _popcnt64(A) __popcntq((A))
+#endif /* __x86_64__ */
+
#ifdef __x86_64__
static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
__readeflags(void)
@@ -55,6 +195,92 @@ __writeeflags(unsigned int __f)
}
#endif /* !__x86_64__ */
+/** Adds the unsigned integer operand to the CRC-32C checksum of the
+ * unsigned char operand.
+ *
+ * \headerfile <x86intrin.h>
+ *
+ * This intrinsic corresponds to the <c> CRC32B </c> instruction.
+ *
+ * \param __C
+ * An unsigned integer operand to add to the CRC-32C checksum of operand
+ * \a __D.
+ * \param __D
+ * An unsigned 8-bit integer operand used to compute the CRC-32C checksum.
+ * \returns The result of adding operand \a __C to the CRC-32C checksum of
+ * operand \a __D.
+ */
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __target__("sse4.2")))
+__crc32b(unsigned int __C, unsigned char __D)
+{
+ return __builtin_ia32_crc32qi(__C, __D);
+}
+
+/** Adds the unsigned integer operand to the CRC-32C checksum of the
+ * unsigned short operand.
+ *
+ * \headerfile <x86intrin.h>
+ *
+ * This intrinsic corresponds to the <c> CRC32W </c> instruction.
+ *
+ * \param __C
+ * An unsigned integer operand to add to the CRC-32C checksum of operand
+ * \a __D.
+ * \param __D
+ * An unsigned 16-bit integer operand used to compute the CRC-32C checksum.
+ * \returns The result of adding operand \a __C to the CRC-32C checksum of
+ * operand \a __D.
+ */
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __target__("sse4.2")))
+__crc32w(unsigned int __C, unsigned short __D)
+{
+ return __builtin_ia32_crc32hi(__C, __D);
+}
+
+/** Adds the unsigned integer operand to the CRC-32C checksum of the
+ * second unsigned integer operand.
+ *
+ * \headerfile <x86intrin.h>
+ *
+ * This intrinsic corresponds to the <c> CRC32D </c> instruction.
+ *
+ * \param __C
+ * An unsigned integer operand to add to the CRC-32C checksum of operand
+ * \a __D.
+ * \param __D
+ * An unsigned 32-bit integer operand used to compute the CRC-32C checksum.
+ * \returns The result of adding operand \a __C to the CRC-32C checksum of
+ * operand \a __D.
+ */
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __target__("sse4.2")))
+__crc32d(unsigned int __C, unsigned int __D)
+{
+ return __builtin_ia32_crc32si(__C, __D);
+}
+
+#ifdef __x86_64__
+/** Adds the unsigned integer operand to the CRC-32C checksum of the
+ * unsigned 64-bit integer operand.
+ *
+ * \headerfile <x86intrin.h>
+ *
+ * This intrinsic corresponds to the <c> CRC32Q </c> instruction.
+ *
+ * \param __C
+ * An unsigned integer operand to add to the CRC-32C checksum of operand
+ * \a __D.
+ * \param __D
+ * An unsigned 64-bit integer operand used to compute the CRC-32C checksum.
+ * \returns The result of adding operand \a __C to the CRC-32C checksum of
+ * operand \a __D.
+ */
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__, __target__("sse4.2")))
+__crc32q(unsigned long long __C, unsigned long long __D)
+{
+ return __builtin_ia32_crc32di(__C, __D);
+}
+#endif /* __x86_64__ */
+
static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
__rdpmc(int __A) {
return __builtin_ia32_rdpmc(__A);
@@ -75,4 +301,64 @@ _wbinvd(void) {
__builtin_ia32_wbinvd();
}
+static __inline__ unsigned char __attribute__((__always_inline__, __nodebug__))
+__rolb(unsigned char __X, int __C) {
+ return __builtin_rotateleft8(__X, __C);
+}
+
+static __inline__ unsigned char __attribute__((__always_inline__, __nodebug__))
+__rorb(unsigned char __X, int __C) {
+ return __builtin_rotateright8(__X, __C);
+}
+
+static __inline__ unsigned short __attribute__((__always_inline__, __nodebug__))
+__rolw(unsigned short __X, int __C) {
+ return __builtin_rotateleft16(__X, __C);
+}
+
+static __inline__ unsigned short __attribute__((__always_inline__, __nodebug__))
+__rorw(unsigned short __X, int __C) {
+ return __builtin_rotateright16(__X, __C);
+}
+
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+__rold(unsigned int __X, int __C) {
+ return __builtin_rotateleft32(__X, __C);
+}
+
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+__rord(unsigned int __X, int __C) {
+ return __builtin_rotateright32(__X, __C);
+}
+
+#ifdef __x86_64__
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+__rolq(unsigned long long __X, int __C) {
+ return __builtin_rotateleft64(__X, __C);
+}
+
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+__rorq(unsigned long long __X, int __C) {
+ return __builtin_rotateright64(__X, __C);
+}
+#endif /* __x86_64__ */
+
+#ifndef _MSC_VER
+/* These are already provided as builtins for MSVC. */
+/* Select the correct function based on the size of long. */
+#ifdef __LP64__
+#define _lrotl(a,b) __rolq((a), (b))
+#define _lrotr(a,b) __rorq((a), (b))
+#else
+#define _lrotl(a,b) __rold((a), (b))
+#define _lrotr(a,b) __rord((a), (b))
+#endif
+#define _rotl(a,b) __rold((a), (b))
+#define _rotr(a,b) __rord((a), (b))
+#endif // _MSC_VER
+
+/* These are not builtins so need to be provided in all modes. */
+#define _rotwl(a,b) __rolw((a), (b))
+#define _rotwr(a,b) __rorw((a), (b))
+
#endif /* __IA32INTRIN_H */
diff --git a/lib/Headers/immintrin.h b/lib/Headers/immintrin.h
index 7d0722ec7652..7555ad82fac7 100644
--- a/lib/Headers/immintrin.h
+++ b/lib/Headers/immintrin.h
@@ -1,22 +1,8 @@
/*===---- immintrin.h - Intel intrinsics -----------------------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
@@ -195,6 +181,15 @@
#include <avx512pfintrin.h>
#endif
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512BF16__)
+#include <avx512bf16intrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || \
+ (defined(__AVX512VL__) && defined(__AVX512BF16__))
+#include <avx512vlbf16intrin.h>
+#endif
+
#if !defined(_MSC_VER) || __has_feature(modules) || defined(__PKU__)
#include <pkuintrin.h>
#endif
@@ -241,18 +236,6 @@ _rdrand64_step(unsigned long long *__p)
#endif
#endif /* __RDRND__ */
-/* __bit_scan_forward */
-static __inline__ int __attribute__((__always_inline__, __nodebug__))
-_bit_scan_forward(int __A) {
- return __builtin_ctz(__A);
-}
-
-/* __bit_scan_reverse */
-static __inline__ int __attribute__((__always_inline__, __nodebug__))
-_bit_scan_reverse(int __A) {
- return 31 - __builtin_clz(__A);
-}
-
#if !defined(_MSC_VER) || __has_feature(modules) || defined(__FSGSBASE__)
#ifdef __x86_64__
static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
@@ -378,9 +361,8 @@ _storebe_i64(void * __P, long long __D) {
#include <fxsrintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__XSAVE__)
+/* No feature check desired due to internal MSC_VER checks */
#include <xsaveintrin.h>
-#endif
#if !defined(_MSC_VER) || __has_feature(modules) || defined(__XSAVEOPT__)
#include <xsaveoptintrin.h>
@@ -439,7 +421,21 @@ _storebe_i64(void * __P, long long __D) {
#include <invpcidintrin.h>
#endif
-#ifdef _MSC_VER
+#if !defined(_MSC_VER) || __has_feature(modules) || \
+ defined(__AVX512VP2INTERSECT__)
+#include <avx512vp2intersectintrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || \
+ (defined(__AVX512VL__) && defined(__AVX512VP2INTERSECT__))
+#include <avx512vlvp2intersectintrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__ENQCMD__)
+#include <enqcmdintrin.h>
+#endif
+
+#if defined(_MSC_VER) && __has_extension(gnu_asm)
/* Define the default attributes for these intrinsics */
#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
#ifdef __cplusplus
@@ -521,6 +517,6 @@ _InterlockedCompareExchange64_HLERelease(__int64 volatile *_Destination,
#undef __DEFAULT_FN_ATTRS
-#endif /* _MSC_VER */
+#endif /* defined(_MSC_VER) && __has_extension(gnu_asm) */
#endif /* __IMMINTRIN_H */
diff --git a/lib/Headers/intrin.h b/lib/Headers/intrin.h
index c86f41faeb88..9786ba147fca 100644
--- a/lib/Headers/intrin.h
+++ b/lib/Headers/intrin.h
@@ -1,22 +1,8 @@
/* ===-------- intrin.h ---------------------------------------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
@@ -200,10 +186,6 @@ __attribute__((__deprecated__("use other intrinsics or C++11 atomics instead")))
_WriteBarrier(void);
unsigned __int32 xbegin(void);
void _xend(void);
-static __inline__
-#define _XCR_XFEATURE_ENABLED_MASK 0
-unsigned __int64 __cdecl _xgetbv(unsigned int);
-void __cdecl _xsetbv(unsigned int, unsigned __int64);
/* These additional intrinsics are turned on in x64/amd64/x86_64 mode. */
#ifdef __x86_64__
@@ -539,12 +521,6 @@ __cpuidex(int __info[4], int __level, int __ecx) {
__asm__ ("cpuid" : "=a"(__info[0]), "=b" (__info[1]), "=c"(__info[2]), "=d"(__info[3])
: "a"(__level), "c"(__ecx));
}
-static __inline__ unsigned __int64 __cdecl __DEFAULT_FN_ATTRS
-_xgetbv(unsigned int __xcr_no) {
- unsigned int __eax, __edx;
- __asm__ ("xgetbv" : "=a" (__eax), "=d" (__edx) : "c" (__xcr_no));
- return ((unsigned __int64)__edx << 32) | __eax;
-}
static __inline__ void __DEFAULT_FN_ATTRS
__halt(void) {
__asm__ volatile ("hlt");
@@ -564,18 +540,12 @@ __nop(void) {
#if defined(__aarch64__)
unsigned __int64 __getReg(int);
long _InterlockedAdd(long volatile *Addend, long Value);
-int _ReadStatusReg(int);
-void _WriteStatusReg(int, int);
+__int64 _ReadStatusReg(int);
+void _WriteStatusReg(int, __int64);
-static inline unsigned short _byteswap_ushort (unsigned short val) {
- return __builtin_bswap16(val);
-}
-static inline unsigned long _byteswap_ulong (unsigned long val) {
- return __builtin_bswap32(val);
-}
-static inline unsigned __int64 _byteswap_uint64 (unsigned __int64 val) {
- return __builtin_bswap64(val);
-}
+unsigned short __cdecl _byteswap_ushort(unsigned short val);
+unsigned long __cdecl _byteswap_ulong (unsigned long val);
+unsigned __int64 __cdecl _byteswap_uint64(unsigned __int64 val);
#endif
/*----------------------------------------------------------------------------*\
diff --git a/lib/Headers/inttypes.h b/lib/Headers/inttypes.h
index 1d8eabab0f8e..1c894c4aca49 100644
--- a/lib/Headers/inttypes.h
+++ b/lib/Headers/inttypes.h
@@ -1,27 +1,18 @@
/*===---- inttypes.h - Standard header for integer printf macros ----------===*\
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
\*===----------------------------------------------------------------------===*/
#ifndef __CLANG_INTTYPES_H
+// AIX system headers need inttypes.h to be re-enterable while _STD_TYPES_T
+// is defined until an inclusion of it without _STD_TYPES_T occurs, in which
+// case the header guard macro is defined.
+#if !defined(_AIX) || !defined(_STD_TYPES_T)
#define __CLANG_INTTYPES_H
+#endif
#if defined(_MSC_VER) && _MSC_VER < 1800
#error MSVC does not have inttypes.h prior to Visual Studio 2013
diff --git a/lib/Headers/invpcidintrin.h b/lib/Headers/invpcidintrin.h
index c30a19fa3d22..48dae0a86f92 100644
--- a/lib/Headers/invpcidintrin.h
+++ b/lib/Headers/invpcidintrin.h
@@ -1,22 +1,8 @@
/*===------------- invpcidintrin.h - INVPCID intrinsic ---------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/iso646.h b/lib/Headers/iso646.h
index dca13c5babe5..e0a20c6f1891 100644
--- a/lib/Headers/iso646.h
+++ b/lib/Headers/iso646.h
@@ -1,24 +1,8 @@
/*===---- iso646.h - Standard header for alternate spellings of operators---===
*
- * Copyright (c) 2008 Eli Friedman
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/limits.h b/lib/Headers/limits.h
index f04187ced26d..c653580bac4e 100644
--- a/lib/Headers/limits.h
+++ b/lib/Headers/limits.h
@@ -1,24 +1,8 @@
/*===---- limits.h - Standard header for integer sizes --------------------===*\
*
- * Copyright (c) 2009 Chris Lattner
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
\*===----------------------------------------------------------------------===*/
diff --git a/lib/Headers/lwpintrin.h b/lib/Headers/lwpintrin.h
index 0b28d7358295..d8ab0db03768 100644
--- a/lib/Headers/lwpintrin.h
+++ b/lib/Headers/lwpintrin.h
@@ -1,22 +1,8 @@
/*===---- lwpintrin.h - LWP intrinsics -------------------------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/lzcntintrin.h b/lib/Headers/lzcntintrin.h
index 35c1651cc4a8..f4ddce9d0e68 100644
--- a/lib/Headers/lzcntintrin.h
+++ b/lib/Headers/lzcntintrin.h
@@ -1,22 +1,8 @@
/*===---- lzcntintrin.h - LZCNT intrinsics ---------------------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/mm3dnow.h b/lib/Headers/mm3dnow.h
index b0288757a396..22ab13aa3340 100644
--- a/lib/Headers/mm3dnow.h
+++ b/lib/Headers/mm3dnow.h
@@ -1,22 +1,8 @@
/*===---- mm3dnow.h - 3DNow! intrinsics ------------------------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/mm_malloc.h b/lib/Headers/mm_malloc.h
index 305afd31adda..0ea32517aea8 100644
--- a/lib/Headers/mm_malloc.h
+++ b/lib/Headers/mm_malloc.h
@@ -1,22 +1,8 @@
/*===---- mm_malloc.h - Allocating and Freeing Aligned Memory Blocks -------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/mmintrin.h b/lib/Headers/mmintrin.h
index a73539942a92..79a8b55016b1 100644
--- a/lib/Headers/mmintrin.h
+++ b/lib/Headers/mmintrin.h
@@ -1,22 +1,8 @@
/*===---- mmintrin.h - MMX intrinsics --------------------------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
@@ -24,7 +10,7 @@
#ifndef __MMINTRIN_H
#define __MMINTRIN_H
-typedef long long __m64 __attribute__((__vector_size__(8)));
+typedef long long __m64 __attribute__((__vector_size__(8), __aligned__(8)));
typedef long long __v1di __attribute__((__vector_size__(8)));
typedef int __v2si __attribute__((__vector_size__(8)));
diff --git a/lib/Headers/module.modulemap b/lib/Headers/module.modulemap
index 1d1af57fd030..7954a77a4125 100644
--- a/lib/Headers/module.modulemap
+++ b/lib/Headers/module.modulemap
@@ -1,22 +1,8 @@
/*===---- module.modulemap - intrinsics module map -------------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
@@ -168,4 +154,5 @@ module _Builtin_stddef_max_align_t [system] [extern_c] {
module opencl_c {
requires opencl
header "opencl-c.h"
+ header "opencl-c-base.h"
}
diff --git a/lib/Headers/movdirintrin.h b/lib/Headers/movdirintrin.h
index ec20c53709bc..30c4d02c832d 100644
--- a/lib/Headers/movdirintrin.h
+++ b/lib/Headers/movdirintrin.h
@@ -1,22 +1,8 @@
/*===------------------------- movdirintrin.h ------------------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/msa.h b/lib/Headers/msa.h
index da680f5ca9ee..19ea6071aa93 100644
--- a/lib/Headers/msa.h
+++ b/lib/Headers/msa.h
@@ -1,22 +1,8 @@
/*===---- msa.h - MIPS MSA intrinsics --------------------------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/mwaitxintrin.h b/lib/Headers/mwaitxintrin.h
index 2921eadfa540..bca395b0e0d1 100644
--- a/lib/Headers/mwaitxintrin.h
+++ b/lib/Headers/mwaitxintrin.h
@@ -1,22 +1,8 @@
/*===---- mwaitxintrin.h - MONITORX/MWAITX intrinsics ----------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/nmmintrin.h b/lib/Headers/nmmintrin.h
index 348fb8c7c18f..672aea496681 100644
--- a/lib/Headers/nmmintrin.h
+++ b/lib/Headers/nmmintrin.h
@@ -1,22 +1,8 @@
/*===---- nmmintrin.h - SSE4 intrinsics ------------------------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/opencl-c-base.h b/lib/Headers/opencl-c-base.h
new file mode 100644
index 000000000000..a82954ddd326
--- /dev/null
+++ b/lib/Headers/opencl-c-base.h
@@ -0,0 +1,578 @@
+//===----- opencl-c-base.h - OpenCL C language base definitions -----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _OPENCL_BASE_H_
+#define _OPENCL_BASE_H_
+
+// built-in scalar data types:
+
+/**
+ * An unsigned 8-bit integer.
+ */
+typedef unsigned char uchar;
+
+/**
+ * An unsigned 16-bit integer.
+ */
+typedef unsigned short ushort;
+
+/**
+ * An unsigned 32-bit integer.
+ */
+typedef unsigned int uint;
+
+/**
+ * An unsigned 64-bit integer.
+ */
+typedef unsigned long ulong;
+
+/**
+ * The unsigned integer type of the result of the sizeof operator. This
+ * is a 32-bit unsigned integer if CL_DEVICE_ADDRESS_BITS
+ * defined in table 4.3 is 32-bits and is a 64-bit unsigned integer if
+ * CL_DEVICE_ADDRESS_BITS is 64-bits.
+ */
+typedef __SIZE_TYPE__ size_t;
+
+/**
+ * A signed integer type that is the result of subtracting two pointers.
+ * This is a 32-bit signed integer if CL_DEVICE_ADDRESS_BITS
+ * defined in table 4.3 is 32-bits and is a 64-bit signed integer if
+ * CL_DEVICE_ADDRESS_BITS is 64-bits.
+ */
+typedef __PTRDIFF_TYPE__ ptrdiff_t;
+
+/**
+ * A signed integer type with the property that any valid pointer to
+ * void can be converted to this type, then converted back to pointer
+ * to void, and the result will compare equal to the original pointer.
+ */
+typedef __INTPTR_TYPE__ intptr_t;
+
+/**
+ * An unsigned integer type with the property that any valid pointer to
+ * void can be converted to this type, then converted back to pointer
+ * to void, and the result will compare equal to the original pointer.
+ */
+typedef __UINTPTR_TYPE__ uintptr_t;
+
+// built-in vector data types:
+typedef char char2 __attribute__((ext_vector_type(2)));
+typedef char char3 __attribute__((ext_vector_type(3)));
+typedef char char4 __attribute__((ext_vector_type(4)));
+typedef char char8 __attribute__((ext_vector_type(8)));
+typedef char char16 __attribute__((ext_vector_type(16)));
+typedef uchar uchar2 __attribute__((ext_vector_type(2)));
+typedef uchar uchar3 __attribute__((ext_vector_type(3)));
+typedef uchar uchar4 __attribute__((ext_vector_type(4)));
+typedef uchar uchar8 __attribute__((ext_vector_type(8)));
+typedef uchar uchar16 __attribute__((ext_vector_type(16)));
+typedef short short2 __attribute__((ext_vector_type(2)));
+typedef short short3 __attribute__((ext_vector_type(3)));
+typedef short short4 __attribute__((ext_vector_type(4)));
+typedef short short8 __attribute__((ext_vector_type(8)));
+typedef short short16 __attribute__((ext_vector_type(16)));
+typedef ushort ushort2 __attribute__((ext_vector_type(2)));
+typedef ushort ushort3 __attribute__((ext_vector_type(3)));
+typedef ushort ushort4 __attribute__((ext_vector_type(4)));
+typedef ushort ushort8 __attribute__((ext_vector_type(8)));
+typedef ushort ushort16 __attribute__((ext_vector_type(16)));
+typedef int int2 __attribute__((ext_vector_type(2)));
+typedef int int3 __attribute__((ext_vector_type(3)));
+typedef int int4 __attribute__((ext_vector_type(4)));
+typedef int int8 __attribute__((ext_vector_type(8)));
+typedef int int16 __attribute__((ext_vector_type(16)));
+typedef uint uint2 __attribute__((ext_vector_type(2)));
+typedef uint uint3 __attribute__((ext_vector_type(3)));
+typedef uint uint4 __attribute__((ext_vector_type(4)));
+typedef uint uint8 __attribute__((ext_vector_type(8)));
+typedef uint uint16 __attribute__((ext_vector_type(16)));
+typedef long long2 __attribute__((ext_vector_type(2)));
+typedef long long3 __attribute__((ext_vector_type(3)));
+typedef long long4 __attribute__((ext_vector_type(4)));
+typedef long long8 __attribute__((ext_vector_type(8)));
+typedef long long16 __attribute__((ext_vector_type(16)));
+typedef ulong ulong2 __attribute__((ext_vector_type(2)));
+typedef ulong ulong3 __attribute__((ext_vector_type(3)));
+typedef ulong ulong4 __attribute__((ext_vector_type(4)));
+typedef ulong ulong8 __attribute__((ext_vector_type(8)));
+typedef ulong ulong16 __attribute__((ext_vector_type(16)));
+typedef float float2 __attribute__((ext_vector_type(2)));
+typedef float float3 __attribute__((ext_vector_type(3)));
+typedef float float4 __attribute__((ext_vector_type(4)));
+typedef float float8 __attribute__((ext_vector_type(8)));
+typedef float float16 __attribute__((ext_vector_type(16)));
+#ifdef cl_khr_fp16
+#pragma OPENCL EXTENSION cl_khr_fp16 : enable
+typedef half half2 __attribute__((ext_vector_type(2)));
+typedef half half3 __attribute__((ext_vector_type(3)));
+typedef half half4 __attribute__((ext_vector_type(4)));
+typedef half half8 __attribute__((ext_vector_type(8)));
+typedef half half16 __attribute__((ext_vector_type(16)));
+#endif
+#ifdef cl_khr_fp64
+#if __OPENCL_C_VERSION__ < CL_VERSION_1_2
+#pragma OPENCL EXTENSION cl_khr_fp64 : enable
+#endif
+typedef double double2 __attribute__((ext_vector_type(2)));
+typedef double double3 __attribute__((ext_vector_type(3)));
+typedef double double4 __attribute__((ext_vector_type(4)));
+typedef double double8 __attribute__((ext_vector_type(8)));
+typedef double double16 __attribute__((ext_vector_type(16)));
+#endif
+
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#define NULL ((void*)0)
+#endif
+
+/**
+ * Value of maximum non-infinite single-precision floating-point
+ * number.
+ */
+#define MAXFLOAT 0x1.fffffep127f
+
+/**
+ * A positive float constant expression. HUGE_VALF evaluates
+ * to +infinity. Used as an error value returned by the built-in
+ * math functions.
+ */
+#define HUGE_VALF (__builtin_huge_valf())
+
+/**
+ * A positive double constant expression. HUGE_VAL evaluates
+ * to +infinity. Used as an error value returned by the built-in
+ * math functions.
+ */
+#define HUGE_VAL (__builtin_huge_val())
+
+/**
+ * A constant expression of type float representing positive or
+ * unsigned infinity.
+ */
+#define INFINITY (__builtin_inff())
+
+/**
+ * A constant expression of type float representing a quiet NaN.
+ */
+#define NAN as_float(INT_MAX)
+
+#define FP_ILOGB0 INT_MIN
+#define FP_ILOGBNAN INT_MAX
+
+#define FLT_DIG 6
+#define FLT_MANT_DIG 24
+#define FLT_MAX_10_EXP +38
+#define FLT_MAX_EXP +128
+#define FLT_MIN_10_EXP -37
+#define FLT_MIN_EXP -125
+#define FLT_RADIX 2
+#define FLT_MAX 0x1.fffffep127f
+#define FLT_MIN 0x1.0p-126f
+#define FLT_EPSILON 0x1.0p-23f
+
+#define M_E_F 2.71828182845904523536028747135266250f
+#define M_LOG2E_F 1.44269504088896340735992468100189214f
+#define M_LOG10E_F 0.434294481903251827651128918916605082f
+#define M_LN2_F 0.693147180559945309417232121458176568f
+#define M_LN10_F 2.30258509299404568401799145468436421f
+#define M_PI_F 3.14159265358979323846264338327950288f
+#define M_PI_2_F 1.57079632679489661923132169163975144f
+#define M_PI_4_F 0.785398163397448309615660845819875721f
+#define M_1_PI_F 0.318309886183790671537767526745028724f
+#define M_2_PI_F 0.636619772367581343075535053490057448f
+#define M_2_SQRTPI_F 1.12837916709551257389615890312154517f
+#define M_SQRT2_F 1.41421356237309504880168872420969808f
+#define M_SQRT1_2_F 0.707106781186547524400844362104849039f
+
+#define DBL_DIG 15
+#define DBL_MANT_DIG 53
+#define DBL_MAX_10_EXP +308
+#define DBL_MAX_EXP +1024
+#define DBL_MIN_10_EXP -307
+#define DBL_MIN_EXP -1021
+#define DBL_RADIX 2
+#define DBL_MAX 0x1.fffffffffffffp1023
+#define DBL_MIN 0x1.0p-1022
+#define DBL_EPSILON 0x1.0p-52
+
+#define M_E 0x1.5bf0a8b145769p+1
+#define M_LOG2E 0x1.71547652b82fep+0
+#define M_LOG10E 0x1.bcb7b1526e50ep-2
+#define M_LN2 0x1.62e42fefa39efp-1
+#define M_LN10 0x1.26bb1bbb55516p+1
+#define M_PI 0x1.921fb54442d18p+1
+#define M_PI_2 0x1.921fb54442d18p+0
+#define M_PI_4 0x1.921fb54442d18p-1
+#define M_1_PI 0x1.45f306dc9c883p-2
+#define M_2_PI 0x1.45f306dc9c883p-1
+#define M_2_SQRTPI 0x1.20dd750429b6dp+0
+#define M_SQRT2 0x1.6a09e667f3bcdp+0
+#define M_SQRT1_2 0x1.6a09e667f3bcdp-1
+
+#ifdef cl_khr_fp16
+
+#define HALF_DIG 3
+#define HALF_MANT_DIG 11
+#define HALF_MAX_10_EXP +4
+#define HALF_MAX_EXP +16
+#define HALF_MIN_10_EXP -4
+#define HALF_MIN_EXP -13
+#define HALF_RADIX 2
+#define HALF_MAX ((0x1.ffcp15h))
+#define HALF_MIN ((0x1.0p-14h))
+#define HALF_EPSILON ((0x1.0p-10h))
+
+#define M_E_H 2.71828182845904523536028747135266250h
+#define M_LOG2E_H 1.44269504088896340735992468100189214h
+#define M_LOG10E_H 0.434294481903251827651128918916605082h
+#define M_LN2_H 0.693147180559945309417232121458176568h
+#define M_LN10_H 2.30258509299404568401799145468436421h
+#define M_PI_H 3.14159265358979323846264338327950288h
+#define M_PI_2_H 1.57079632679489661923132169163975144h
+#define M_PI_4_H 0.785398163397448309615660845819875721h
+#define M_1_PI_H 0.318309886183790671537767526745028724h
+#define M_2_PI_H 0.636619772367581343075535053490057448h
+#define M_2_SQRTPI_H 1.12837916709551257389615890312154517h
+#define M_SQRT2_H 1.41421356237309504880168872420969808h
+#define M_SQRT1_2_H 0.707106781186547524400844362104849039h
+
+#endif //cl_khr_fp16
+
+#define CHAR_BIT 8
+#define SCHAR_MAX 127
+#define SCHAR_MIN (-128)
+#define UCHAR_MAX 255
+#define CHAR_MAX SCHAR_MAX
+#define CHAR_MIN SCHAR_MIN
+#define USHRT_MAX 65535
+#define SHRT_MAX 32767
+#define SHRT_MIN (-32768)
+#define UINT_MAX 0xffffffff
+#define INT_MAX 2147483647
+#define INT_MIN (-2147483647-1)
+#define ULONG_MAX 0xffffffffffffffffUL
+#define LONG_MAX 0x7fffffffffffffffL
+#define LONG_MIN (-0x7fffffffffffffffL-1)
+
+// OpenCL v1.1 s6.11.8, v1.2 s6.12.8, v2.0 s6.13.8 - Synchronization Functions
+
+// Flag type and values for barrier, mem_fence, read_mem_fence, write_mem_fence
+typedef uint cl_mem_fence_flags;
+
+/**
+ * Queue a memory fence to ensure correct
+ * ordering of memory operations to local memory
+ */
+#define CLK_LOCAL_MEM_FENCE 0x01
+
+/**
+ * Queue a memory fence to ensure correct
+ * ordering of memory operations to global memory
+ */
+#define CLK_GLOBAL_MEM_FENCE 0x02
+
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+typedef enum memory_scope {
+ memory_scope_work_item = __OPENCL_MEMORY_SCOPE_WORK_ITEM,
+ memory_scope_work_group = __OPENCL_MEMORY_SCOPE_WORK_GROUP,
+ memory_scope_device = __OPENCL_MEMORY_SCOPE_DEVICE,
+ memory_scope_all_svm_devices = __OPENCL_MEMORY_SCOPE_ALL_SVM_DEVICES,
+#if defined(cl_intel_subgroups) || defined(cl_khr_subgroups)
+ memory_scope_sub_group = __OPENCL_MEMORY_SCOPE_SUB_GROUP
+#endif
+} memory_scope;
+
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+/**
+ * Queue a memory fence to ensure correct ordering of memory
+ * operations between work-items of a work-group to
+ * image memory.
+ */
+#define CLK_IMAGE_MEM_FENCE 0x04
+
+#ifndef ATOMIC_VAR_INIT
+#define ATOMIC_VAR_INIT(x) (x)
+#endif //ATOMIC_VAR_INIT
+#define ATOMIC_FLAG_INIT 0
+
+// enum values aligned with what clang uses in EmitAtomicExpr()
+typedef enum memory_order
+{
+ memory_order_relaxed = __ATOMIC_RELAXED,
+ memory_order_acquire = __ATOMIC_ACQUIRE,
+ memory_order_release = __ATOMIC_RELEASE,
+ memory_order_acq_rel = __ATOMIC_ACQ_REL,
+ memory_order_seq_cst = __ATOMIC_SEQ_CST
+} memory_order;
+
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+// OpenCL v1.1 s6.11.3, v1.2 s6.12.14, v2.0 s6.13.14 - Image Read and Write Functions
+
+// These values need to match the runtime equivalent
+//
+// Addressing Mode.
+//
+#define CLK_ADDRESS_NONE 0
+#define CLK_ADDRESS_CLAMP_TO_EDGE 2
+#define CLK_ADDRESS_CLAMP 4
+#define CLK_ADDRESS_REPEAT 6
+#define CLK_ADDRESS_MIRRORED_REPEAT 8
+
+//
+// Coordination Normalization
+//
+#define CLK_NORMALIZED_COORDS_FALSE 0
+#define CLK_NORMALIZED_COORDS_TRUE 1
+
+//
+// Filtering Mode.
+//
+#define CLK_FILTER_NEAREST 0x10
+#define CLK_FILTER_LINEAR 0x20
+
+#ifdef cl_khr_gl_msaa_sharing
+#pragma OPENCL EXTENSION cl_khr_gl_msaa_sharing : enable
+#endif //cl_khr_gl_msaa_sharing
+
+//
+// Channel Datatype.
+//
+#define CLK_SNORM_INT8 0x10D0
+#define CLK_SNORM_INT16 0x10D1
+#define CLK_UNORM_INT8 0x10D2
+#define CLK_UNORM_INT16 0x10D3
+#define CLK_UNORM_SHORT_565 0x10D4
+#define CLK_UNORM_SHORT_555 0x10D5
+#define CLK_UNORM_INT_101010 0x10D6
+#define CLK_SIGNED_INT8 0x10D7
+#define CLK_SIGNED_INT16 0x10D8
+#define CLK_SIGNED_INT32 0x10D9
+#define CLK_UNSIGNED_INT8 0x10DA
+#define CLK_UNSIGNED_INT16 0x10DB
+#define CLK_UNSIGNED_INT32 0x10DC
+#define CLK_HALF_FLOAT 0x10DD
+#define CLK_FLOAT 0x10DE
+#define CLK_UNORM_INT24 0x10DF
+
+// Channel order, numbering must be aligned with cl_channel_order in cl.h
+//
+#define CLK_R 0x10B0
+#define CLK_A 0x10B1
+#define CLK_RG 0x10B2
+#define CLK_RA 0x10B3
+#define CLK_RGB 0x10B4
+#define CLK_RGBA 0x10B5
+#define CLK_BGRA 0x10B6
+#define CLK_ARGB 0x10B7
+#define CLK_INTENSITY 0x10B8
+#define CLK_LUMINANCE 0x10B9
+#define CLK_Rx 0x10BA
+#define CLK_RGx 0x10BB
+#define CLK_RGBx 0x10BC
+#define CLK_DEPTH 0x10BD
+#define CLK_DEPTH_STENCIL 0x10BE
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#define CLK_sRGB 0x10BF
+#define CLK_sRGBx 0x10C0
+#define CLK_sRGBA 0x10C1
+#define CLK_sBGRA 0x10C2
+#define CLK_ABGR 0x10C3
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+// OpenCL v2.0 s6.13.16 - Pipe Functions
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#define CLK_NULL_RESERVE_ID (__builtin_astype(((void*)(__SIZE_MAX__)), reserve_id_t))
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+
+// OpenCL v2.0 s6.13.17 - Enqueue Kernels
+#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+#define CL_COMPLETE 0x0
+#define CL_RUNNING 0x1
+#define CL_SUBMITTED 0x2
+#define CL_QUEUED 0x3
+
+#define CLK_SUCCESS 0
+#define CLK_ENQUEUE_FAILURE -101
+#define CLK_INVALID_QUEUE -102
+#define CLK_INVALID_NDRANGE -160
+#define CLK_INVALID_EVENT_WAIT_LIST -57
+#define CLK_DEVICE_QUEUE_FULL -161
+#define CLK_INVALID_ARG_SIZE -51
+#define CLK_EVENT_ALLOCATION_FAILURE -100
+#define CLK_OUT_OF_RESOURCES -5
+
+#define CLK_NULL_QUEUE 0
+#define CLK_NULL_EVENT (__builtin_astype(((void*)(__SIZE_MAX__)), clk_event_t))
+
+// execution model related definitions
+#define CLK_ENQUEUE_FLAGS_NO_WAIT 0x0
+#define CLK_ENQUEUE_FLAGS_WAIT_KERNEL 0x1
+#define CLK_ENQUEUE_FLAGS_WAIT_WORK_GROUP 0x2
+
+typedef int kernel_enqueue_flags_t;
+typedef int clk_profiling_info;
+
+// Profiling info name (see capture_event_profiling_info)
+#define CLK_PROFILING_COMMAND_EXEC_TIME 0x1
+
+#define MAX_WORK_DIM 3
+
+typedef struct {
+ unsigned int workDimension;
+ size_t globalWorkOffset[MAX_WORK_DIM];
+ size_t globalWorkSize[MAX_WORK_DIM];
+ size_t localWorkSize[MAX_WORK_DIM];
+} ndrange_t;
+
+#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+
+#ifdef cl_intel_device_side_avc_motion_estimation
+#pragma OPENCL EXTENSION cl_intel_device_side_avc_motion_estimation : begin
+
+#define CLK_AVC_ME_MAJOR_16x16_INTEL 0x0
+#define CLK_AVC_ME_MAJOR_16x8_INTEL 0x1
+#define CLK_AVC_ME_MAJOR_8x16_INTEL 0x2
+#define CLK_AVC_ME_MAJOR_8x8_INTEL 0x3
+
+#define CLK_AVC_ME_MINOR_8x8_INTEL 0x0
+#define CLK_AVC_ME_MINOR_8x4_INTEL 0x1
+#define CLK_AVC_ME_MINOR_4x8_INTEL 0x2
+#define CLK_AVC_ME_MINOR_4x4_INTEL 0x3
+
+#define CLK_AVC_ME_MAJOR_FORWARD_INTEL 0x0
+#define CLK_AVC_ME_MAJOR_BACKWARD_INTEL 0x1
+#define CLK_AVC_ME_MAJOR_BIDIRECTIONAL_INTEL 0x2
+
+#define CLK_AVC_ME_PARTITION_MASK_ALL_INTEL 0x0
+#define CLK_AVC_ME_PARTITION_MASK_16x16_INTEL 0x7E
+#define CLK_AVC_ME_PARTITION_MASK_16x8_INTEL 0x7D
+#define CLK_AVC_ME_PARTITION_MASK_8x16_INTEL 0x7B
+#define CLK_AVC_ME_PARTITION_MASK_8x8_INTEL 0x77
+#define CLK_AVC_ME_PARTITION_MASK_8x4_INTEL 0x6F
+#define CLK_AVC_ME_PARTITION_MASK_4x8_INTEL 0x5F
+#define CLK_AVC_ME_PARTITION_MASK_4x4_INTEL 0x3F
+
+#define CLK_AVC_ME_SLICE_TYPE_PRED_INTEL 0x0
+#define CLK_AVC_ME_SLICE_TYPE_BPRED_INTEL 0x1
+#define CLK_AVC_ME_SLICE_TYPE_INTRA_INTEL 0x2
+
+#define CLK_AVC_ME_SEARCH_WINDOW_EXHAUSTIVE_INTEL 0x0
+#define CLK_AVC_ME_SEARCH_WINDOW_SMALL_INTEL 0x1
+#define CLK_AVC_ME_SEARCH_WINDOW_TINY_INTEL 0x2
+#define CLK_AVC_ME_SEARCH_WINDOW_EXTRA_TINY_INTEL 0x3
+#define CLK_AVC_ME_SEARCH_WINDOW_DIAMOND_INTEL 0x4
+#define CLK_AVC_ME_SEARCH_WINDOW_LARGE_DIAMOND_INTEL 0x5
+#define CLK_AVC_ME_SEARCH_WINDOW_RESERVED0_INTEL 0x6
+#define CLK_AVC_ME_SEARCH_WINDOW_RESERVED1_INTEL 0x7
+#define CLK_AVC_ME_SEARCH_WINDOW_CUSTOM_INTEL 0x8
+
+#define CLK_AVC_ME_SAD_ADJUST_MODE_NONE_INTEL 0x0
+#define CLK_AVC_ME_SAD_ADJUST_MODE_HAAR_INTEL 0x2
+
+#define CLK_AVC_ME_SUBPIXEL_MODE_INTEGER_INTEL 0x0
+#define CLK_AVC_ME_SUBPIXEL_MODE_HPEL_INTEL 0x1
+#define CLK_AVC_ME_SUBPIXEL_MODE_QPEL_INTEL 0x3
+
+#define CLK_AVC_ME_COST_PRECISION_QPEL_INTEL 0x0
+#define CLK_AVC_ME_COST_PRECISION_HPEL_INTEL 0x1
+#define CLK_AVC_ME_COST_PRECISION_PEL_INTEL 0x2
+#define CLK_AVC_ME_COST_PRECISION_DPEL_INTEL 0x3
+
+#define CLK_AVC_ME_BIDIR_WEIGHT_QUARTER_INTEL 0x10
+#define CLK_AVC_ME_BIDIR_WEIGHT_THIRD_INTEL 0x15
+#define CLK_AVC_ME_BIDIR_WEIGHT_HALF_INTEL 0x20
+#define CLK_AVC_ME_BIDIR_WEIGHT_TWO_THIRD_INTEL 0x2B
+#define CLK_AVC_ME_BIDIR_WEIGHT_THREE_QUARTER_INTEL 0x30
+
+#define CLK_AVC_ME_BORDER_REACHED_LEFT_INTEL 0x0
+#define CLK_AVC_ME_BORDER_REACHED_RIGHT_INTEL 0x2
+#define CLK_AVC_ME_BORDER_REACHED_TOP_INTEL 0x4
+#define CLK_AVC_ME_BORDER_REACHED_BOTTOM_INTEL 0x8
+
+#define CLK_AVC_ME_INTRA_16x16_INTEL 0x0
+#define CLK_AVC_ME_INTRA_8x8_INTEL 0x1
+#define CLK_AVC_ME_INTRA_4x4_INTEL 0x2
+
+#define CLK_AVC_ME_SKIP_BLOCK_PARTITION_16x16_INTEL 0x0
+#define CLK_AVC_ME_SKIP_BLOCK_PARTITION_8x8_INTEL 0x4000
+
+#define CLK_AVC_ME_SKIP_BLOCK_16x16_FORWARD_ENABLE_INTEL (0x1 << 24)
+#define CLK_AVC_ME_SKIP_BLOCK_16x16_BACKWARD_ENABLE_INTEL (0x2 << 24)
+#define CLK_AVC_ME_SKIP_BLOCK_16x16_DUAL_ENABLE_INTEL (0x3 << 24)
+#define CLK_AVC_ME_SKIP_BLOCK_8x8_FORWARD_ENABLE_INTEL (0x55 << 24)
+#define CLK_AVC_ME_SKIP_BLOCK_8x8_BACKWARD_ENABLE_INTEL (0xAA << 24)
+#define CLK_AVC_ME_SKIP_BLOCK_8x8_DUAL_ENABLE_INTEL (0xFF << 24)
+#define CLK_AVC_ME_SKIP_BLOCK_8x8_0_FORWARD_ENABLE_INTEL (0x1 << 24)
+#define CLK_AVC_ME_SKIP_BLOCK_8x8_0_BACKWARD_ENABLE_INTEL (0x2 << 24)
+#define CLK_AVC_ME_SKIP_BLOCK_8x8_1_FORWARD_ENABLE_INTEL (0x1 << 26)
+#define CLK_AVC_ME_SKIP_BLOCK_8x8_1_BACKWARD_ENABLE_INTEL (0x2 << 26)
+#define CLK_AVC_ME_SKIP_BLOCK_8x8_2_FORWARD_ENABLE_INTEL (0x1 << 28)
+#define CLK_AVC_ME_SKIP_BLOCK_8x8_2_BACKWARD_ENABLE_INTEL (0x2 << 28)
+#define CLK_AVC_ME_SKIP_BLOCK_8x8_3_FORWARD_ENABLE_INTEL (0x1 << 30)
+#define CLK_AVC_ME_SKIP_BLOCK_8x8_3_BACKWARD_ENABLE_INTEL (0x2 << 30)
+
+#define CLK_AVC_ME_BLOCK_BASED_SKIP_4x4_INTEL 0x00
+#define CLK_AVC_ME_BLOCK_BASED_SKIP_8x8_INTEL 0x80
+
+#define CLK_AVC_ME_INTRA_LUMA_PARTITION_MASK_ALL_INTEL 0x0
+#define CLK_AVC_ME_INTRA_LUMA_PARTITION_MASK_16x16_INTEL 0x6
+#define CLK_AVC_ME_INTRA_LUMA_PARTITION_MASK_8x8_INTEL 0x5
+#define CLK_AVC_ME_INTRA_LUMA_PARTITION_MASK_4x4_INTEL 0x3
+
+#define CLK_AVC_ME_INTRA_NEIGHBOR_LEFT_MASK_ENABLE_INTEL 0x60
+#define CLK_AVC_ME_INTRA_NEIGHBOR_UPPER_MASK_ENABLE_INTEL 0x10
+#define CLK_AVC_ME_INTRA_NEIGHBOR_UPPER_RIGHT_MASK_ENABLE_INTEL 0x8
+#define CLK_AVC_ME_INTRA_NEIGHBOR_UPPER_LEFT_MASK_ENABLE_INTEL 0x4
+
+#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_VERTICAL_INTEL 0x0
+#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_HORIZONTAL_INTEL 0x1
+#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_DC_INTEL 0x2
+#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_DIAGONAL_DOWN_LEFT_INTEL 0x3
+#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_DIAGONAL_DOWN_RIGHT_INTEL 0x4
+#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_PLANE_INTEL 0x4
+#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_VERTICAL_RIGHT_INTEL 0x5
+#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_HORIZONTAL_DOWN_INTEL 0x6
+#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_VERTICAL_LEFT_INTEL 0x7
+#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_HORIZONTAL_UP_INTEL 0x8
+#define CLK_AVC_ME_CHROMA_PREDICTOR_MODE_DC_INTEL 0x0
+#define CLK_AVC_ME_CHROMA_PREDICTOR_MODE_HORIZONTAL_INTEL 0x1
+#define CLK_AVC_ME_CHROMA_PREDICTOR_MODE_VERTICAL_INTEL 0x2
+#define CLK_AVC_ME_CHROMA_PREDICTOR_MODE_PLANE_INTEL 0x3
+
+#define CLK_AVC_ME_FRAME_FORWARD_INTEL 0x1
+#define CLK_AVC_ME_FRAME_BACKWARD_INTEL 0x2
+#define CLK_AVC_ME_FRAME_DUAL_INTEL 0x3
+
+#define CLK_AVC_ME_INTERLACED_SCAN_TOP_FIELD_INTEL 0x0
+#define CLK_AVC_ME_INTERLACED_SCAN_BOTTOM_FIELD_INTEL 0x1
+
+#define CLK_AVC_ME_INITIALIZE_INTEL 0x0
+
+#define CLK_AVC_IME_PAYLOAD_INITIALIZE_INTEL 0x0
+#define CLK_AVC_REF_PAYLOAD_INITIALIZE_INTEL 0x0
+#define CLK_AVC_SIC_PAYLOAD_INITIALIZE_INTEL 0x0
+
+#define CLK_AVC_IME_RESULT_INITIALIZE_INTEL 0x0
+#define CLK_AVC_REF_RESULT_INITIALIZE_INTEL 0x0
+#define CLK_AVC_SIC_RESULT_INITIALIZE_INTEL 0x0
+
+#define CLK_AVC_IME_RESULT_SINGLE_REFERENCE_STREAMOUT_INITIALIZE_INTEL 0x0
+#define CLK_AVC_IME_RESULT_SINGLE_REFERENCE_STREAMIN_INITIALIZE_INTEL 0x0
+#define CLK_AVC_IME_RESULT_DUAL_REFERENCE_STREAMOUT_INITIALIZE_INTEL 0x0
+#define CLK_AVC_IME_RESULT_DUAL_REFERENCE_STREAMIN_INITIALIZE_INTEL 0x0
+
+#pragma OPENCL EXTENSION cl_intel_device_side_avc_motion_estimation : end
+#endif // cl_intel_device_side_avc_motion_estimation
+
+#endif //_OPENCL_BASE_H_
diff --git a/lib/Headers/opencl-c.h b/lib/Headers/opencl-c.h
index 160bae807174..4207c53ccedb 100644
--- a/lib/Headers/opencl-c.h
+++ b/lib/Headers/opencl-c.h
@@ -1,15 +1,16 @@
//===--- opencl-c.h - OpenCL C language builtin function header -----------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef _OPENCL_H_
#define _OPENCL_H_
+#include "opencl-c-base.h"
+
#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
#ifndef cl_khr_depth_images
#define cl_khr_depth_images
@@ -23,9 +24,6 @@
#endif //__OPENCL_C_VERSION__ < CL_VERSION_2_0
#if __OPENCL_C_VERSION__ >= CL_VERSION_1_2
-#ifndef cl_intel_planar_yuv
-#define cl_intel_planar_yuv
-#endif // cl_intel_planar_yuv
#pragma OPENCL EXTENSION cl_intel_planar_yuv : begin
#pragma OPENCL EXTENSION cl_intel_planar_yuv : end
#endif // __OPENCL_C_VERSION__ >= CL_VERSION_1_2
@@ -37,255 +35,6 @@
#define __purefn __attribute__((pure))
#define __cnfn __attribute__((const))
-// built-in scalar data types:
-
-/**
- * An unsigned 8-bit integer.
- */
-typedef unsigned char uchar;
-
-/**
- * An unsigned 16-bit integer.
- */
-typedef unsigned short ushort;
-
-/**
- * An unsigned 32-bit integer.
- */
-typedef unsigned int uint;
-
-/**
- * An unsigned 64-bit integer.
- */
-typedef unsigned long ulong;
-
-/**
- * The unsigned integer type of the result of the sizeof operator. This
- * is a 32-bit unsigned integer if CL_DEVICE_ADDRESS_BITS
- * defined in table 4.3 is 32-bits and is a 64-bit unsigned integer if
- * CL_DEVICE_ADDRESS_BITS is 64-bits.
- */
-typedef __SIZE_TYPE__ size_t;
-
-/**
- * A signed integer type that is the result of subtracting two pointers.
- * This is a 32-bit signed integer if CL_DEVICE_ADDRESS_BITS
- * defined in table 4.3 is 32-bits and is a 64-bit signed integer if
- * CL_DEVICE_ADDRESS_BITS is 64-bits.
- */
-typedef __PTRDIFF_TYPE__ ptrdiff_t;
-
-/**
-* A signed integer type with the property that any valid pointer to
-* void can be converted to this type, then converted back to pointer
-* to void, and the result will compare equal to the original pointer.
-*/
-typedef __INTPTR_TYPE__ intptr_t;
-
-/**
-* An unsigned integer type with the property that any valid pointer to
-* void can be converted to this type, then converted back to pointer
-* to void, and the result will compare equal to the original pointer.
-*/
-typedef __UINTPTR_TYPE__ uintptr_t;
-
-// built-in vector data types:
-typedef char char2 __attribute__((ext_vector_type(2)));
-typedef char char3 __attribute__((ext_vector_type(3)));
-typedef char char4 __attribute__((ext_vector_type(4)));
-typedef char char8 __attribute__((ext_vector_type(8)));
-typedef char char16 __attribute__((ext_vector_type(16)));
-typedef uchar uchar2 __attribute__((ext_vector_type(2)));
-typedef uchar uchar3 __attribute__((ext_vector_type(3)));
-typedef uchar uchar4 __attribute__((ext_vector_type(4)));
-typedef uchar uchar8 __attribute__((ext_vector_type(8)));
-typedef uchar uchar16 __attribute__((ext_vector_type(16)));
-typedef short short2 __attribute__((ext_vector_type(2)));
-typedef short short3 __attribute__((ext_vector_type(3)));
-typedef short short4 __attribute__((ext_vector_type(4)));
-typedef short short8 __attribute__((ext_vector_type(8)));
-typedef short short16 __attribute__((ext_vector_type(16)));
-typedef ushort ushort2 __attribute__((ext_vector_type(2)));
-typedef ushort ushort3 __attribute__((ext_vector_type(3)));
-typedef ushort ushort4 __attribute__((ext_vector_type(4)));
-typedef ushort ushort8 __attribute__((ext_vector_type(8)));
-typedef ushort ushort16 __attribute__((ext_vector_type(16)));
-typedef int int2 __attribute__((ext_vector_type(2)));
-typedef int int3 __attribute__((ext_vector_type(3)));
-typedef int int4 __attribute__((ext_vector_type(4)));
-typedef int int8 __attribute__((ext_vector_type(8)));
-typedef int int16 __attribute__((ext_vector_type(16)));
-typedef uint uint2 __attribute__((ext_vector_type(2)));
-typedef uint uint3 __attribute__((ext_vector_type(3)));
-typedef uint uint4 __attribute__((ext_vector_type(4)));
-typedef uint uint8 __attribute__((ext_vector_type(8)));
-typedef uint uint16 __attribute__((ext_vector_type(16)));
-typedef long long2 __attribute__((ext_vector_type(2)));
-typedef long long3 __attribute__((ext_vector_type(3)));
-typedef long long4 __attribute__((ext_vector_type(4)));
-typedef long long8 __attribute__((ext_vector_type(8)));
-typedef long long16 __attribute__((ext_vector_type(16)));
-typedef ulong ulong2 __attribute__((ext_vector_type(2)));
-typedef ulong ulong3 __attribute__((ext_vector_type(3)));
-typedef ulong ulong4 __attribute__((ext_vector_type(4)));
-typedef ulong ulong8 __attribute__((ext_vector_type(8)));
-typedef ulong ulong16 __attribute__((ext_vector_type(16)));
-typedef float float2 __attribute__((ext_vector_type(2)));
-typedef float float3 __attribute__((ext_vector_type(3)));
-typedef float float4 __attribute__((ext_vector_type(4)));
-typedef float float8 __attribute__((ext_vector_type(8)));
-typedef float float16 __attribute__((ext_vector_type(16)));
-#ifdef cl_khr_fp16
-#pragma OPENCL EXTENSION cl_khr_fp16 : enable
-typedef half half2 __attribute__((ext_vector_type(2)));
-typedef half half3 __attribute__((ext_vector_type(3)));
-typedef half half4 __attribute__((ext_vector_type(4)));
-typedef half half8 __attribute__((ext_vector_type(8)));
-typedef half half16 __attribute__((ext_vector_type(16)));
-#endif
-#ifdef cl_khr_fp64
-#if __OPENCL_C_VERSION__ < CL_VERSION_1_2
-#pragma OPENCL EXTENSION cl_khr_fp64 : enable
-#endif
-typedef double double2 __attribute__((ext_vector_type(2)));
-typedef double double3 __attribute__((ext_vector_type(3)));
-typedef double double4 __attribute__((ext_vector_type(4)));
-typedef double double8 __attribute__((ext_vector_type(8)));
-typedef double double16 __attribute__((ext_vector_type(16)));
-#endif
-
-#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
-#define NULL ((void*)0)
-#endif
-
-/**
- * Value of maximum non-infinite single-precision floating-point
- * number.
- */
-#define MAXFLOAT 0x1.fffffep127f
-
-/**
- * A positive float constant expression. HUGE_VALF evaluates
- * to +infinity. Used as an error value returned by the built-in
- * math functions.
- */
-#define HUGE_VALF (__builtin_huge_valf())
-
-/**
- * A positive double constant expression. HUGE_VAL evaluates
- * to +infinity. Used as an error value returned by the built-in
- * math functions.
- */
-#define HUGE_VAL (__builtin_huge_val())
-
-/**
- * A constant expression of type float representing positive or
- * unsigned infinity.
- */
-#define INFINITY (__builtin_inff())
-
-/**
- * A constant expression of type float representing a quiet NaN.
- */
-#define NAN as_float(INT_MAX)
-
-#define FP_ILOGB0 INT_MIN
-#define FP_ILOGBNAN INT_MAX
-
-#define FLT_DIG 6
-#define FLT_MANT_DIG 24
-#define FLT_MAX_10_EXP +38
-#define FLT_MAX_EXP +128
-#define FLT_MIN_10_EXP -37
-#define FLT_MIN_EXP -125
-#define FLT_RADIX 2
-#define FLT_MAX 0x1.fffffep127f
-#define FLT_MIN 0x1.0p-126f
-#define FLT_EPSILON 0x1.0p-23f
-
-#define M_E_F 2.71828182845904523536028747135266250f
-#define M_LOG2E_F 1.44269504088896340735992468100189214f
-#define M_LOG10E_F 0.434294481903251827651128918916605082f
-#define M_LN2_F 0.693147180559945309417232121458176568f
-#define M_LN10_F 2.30258509299404568401799145468436421f
-#define M_PI_F 3.14159265358979323846264338327950288f
-#define M_PI_2_F 1.57079632679489661923132169163975144f
-#define M_PI_4_F 0.785398163397448309615660845819875721f
-#define M_1_PI_F 0.318309886183790671537767526745028724f
-#define M_2_PI_F 0.636619772367581343075535053490057448f
-#define M_2_SQRTPI_F 1.12837916709551257389615890312154517f
-#define M_SQRT2_F 1.41421356237309504880168872420969808f
-#define M_SQRT1_2_F 0.707106781186547524400844362104849039f
-
-#define DBL_DIG 15
-#define DBL_MANT_DIG 53
-#define DBL_MAX_10_EXP +308
-#define DBL_MAX_EXP +1024
-#define DBL_MIN_10_EXP -307
-#define DBL_MIN_EXP -1021
-#define DBL_RADIX 2
-#define DBL_MAX 0x1.fffffffffffffp1023
-#define DBL_MIN 0x1.0p-1022
-#define DBL_EPSILON 0x1.0p-52
-
-#define M_E 0x1.5bf0a8b145769p+1
-#define M_LOG2E 0x1.71547652b82fep+0
-#define M_LOG10E 0x1.bcb7b1526e50ep-2
-#define M_LN2 0x1.62e42fefa39efp-1
-#define M_LN10 0x1.26bb1bbb55516p+1
-#define M_PI 0x1.921fb54442d18p+1
-#define M_PI_2 0x1.921fb54442d18p+0
-#define M_PI_4 0x1.921fb54442d18p-1
-#define M_1_PI 0x1.45f306dc9c883p-2
-#define M_2_PI 0x1.45f306dc9c883p-1
-#define M_2_SQRTPI 0x1.20dd750429b6dp+0
-#define M_SQRT2 0x1.6a09e667f3bcdp+0
-#define M_SQRT1_2 0x1.6a09e667f3bcdp-1
-
-#ifdef cl_khr_fp16
-
-#define HALF_DIG 3
-#define HALF_MANT_DIG 11
-#define HALF_MAX_10_EXP +4
-#define HALF_MAX_EXP +16
-#define HALF_MIN_10_EXP -4
-#define HALF_MIN_EXP -13
-#define HALF_RADIX 2
-#define HALF_MAX ((0x1.ffcp15h))
-#define HALF_MIN ((0x1.0p-14h))
-#define HALF_EPSILON ((0x1.0p-10h))
-
-#define M_E_H 2.71828182845904523536028747135266250h
-#define M_LOG2E_H 1.44269504088896340735992468100189214h
-#define M_LOG10E_H 0.434294481903251827651128918916605082h
-#define M_LN2_H 0.693147180559945309417232121458176568h
-#define M_LN10_H 2.30258509299404568401799145468436421h
-#define M_PI_H 3.14159265358979323846264338327950288h
-#define M_PI_2_H 1.57079632679489661923132169163975144h
-#define M_PI_4_H 0.785398163397448309615660845819875721h
-#define M_1_PI_H 0.318309886183790671537767526745028724h
-#define M_2_PI_H 0.636619772367581343075535053490057448h
-#define M_2_SQRTPI_H 1.12837916709551257389615890312154517h
-#define M_SQRT2_H 1.41421356237309504880168872420969808h
-#define M_SQRT1_2_H 0.707106781186547524400844362104849039h
-
-#endif //cl_khr_fp16
-
-#define CHAR_BIT 8
-#define SCHAR_MAX 127
-#define SCHAR_MIN (-128)
-#define UCHAR_MAX 255
-#define CHAR_MAX SCHAR_MAX
-#define CHAR_MIN SCHAR_MIN
-#define USHRT_MAX 65535
-#define SHRT_MAX 32767
-#define SHRT_MIN (-32768)
-#define UINT_MAX 0xffffffff
-#define INT_MAX 2147483647
-#define INT_MIN (-2147483647-1)
-#define ULONG_MAX 0xffffffffffffffffUL
-#define LONG_MAX 0x7fffffffffffffffL
-#define LONG_MIN (-0x7fffffffffffffffL-1)
// OpenCL v1.1/1.2/2.0 s6.2.3 - Explicit conversions
@@ -9598,8 +9347,6 @@ long8 __ovld __cnfn clamp(long8 x, long8 minval, long8 maxval);
ulong8 __ovld __cnfn clamp(ulong8 x, ulong8 minval, ulong8 maxval);
long16 __ovld __cnfn clamp(long16 x, long16 minval, long16 maxval);
ulong16 __ovld __cnfn clamp(ulong16 x, ulong16 minval, ulong16 maxval);
-char __ovld __cnfn clamp(char x, char minval, char maxval);
-uchar __ovld __cnfn clamp(uchar x, uchar minval, uchar maxval);
char2 __ovld __cnfn clamp(char2 x, char minval, char maxval);
uchar2 __ovld __cnfn clamp(uchar2 x, uchar minval, uchar maxval);
char3 __ovld __cnfn clamp(char3 x, char minval, char maxval);
@@ -9610,8 +9357,6 @@ char8 __ovld __cnfn clamp(char8 x, char minval, char maxval);
uchar8 __ovld __cnfn clamp(uchar8 x, uchar minval, uchar maxval);
char16 __ovld __cnfn clamp(char16 x, char minval, char maxval);
uchar16 __ovld __cnfn clamp(uchar16 x, uchar minval, uchar maxval);
-short __ovld __cnfn clamp(short x, short minval, short maxval);
-ushort __ovld __cnfn clamp(ushort x, ushort minval, ushort maxval);
short2 __ovld __cnfn clamp(short2 x, short minval, short maxval);
ushort2 __ovld __cnfn clamp(ushort2 x, ushort minval, ushort maxval);
short3 __ovld __cnfn clamp(short3 x, short minval, short maxval);
@@ -9622,8 +9367,6 @@ short8 __ovld __cnfn clamp(short8 x, short minval, short maxval);
ushort8 __ovld __cnfn clamp(ushort8 x, ushort minval, ushort maxval);
short16 __ovld __cnfn clamp(short16 x, short minval, short maxval);
ushort16 __ovld __cnfn clamp(ushort16 x, ushort minval, ushort maxval);
-int __ovld __cnfn clamp(int x, int minval, int maxval);
-uint __ovld __cnfn clamp(uint x, uint minval, uint maxval);
int2 __ovld __cnfn clamp(int2 x, int minval, int maxval);
uint2 __ovld __cnfn clamp(uint2 x, uint minval, uint maxval);
int3 __ovld __cnfn clamp(int3 x, int minval, int maxval);
@@ -9634,8 +9377,6 @@ int8 __ovld __cnfn clamp(int8 x, int minval, int maxval);
uint8 __ovld __cnfn clamp(uint8 x, uint minval, uint maxval);
int16 __ovld __cnfn clamp(int16 x, int minval, int maxval);
uint16 __ovld __cnfn clamp(uint16 x, uint minval, uint maxval);
-long __ovld __cnfn clamp(long x, long minval, long maxval);
-ulong __ovld __cnfn clamp(ulong x, ulong minval, ulong maxval);
long2 __ovld __cnfn clamp(long2 x, long minval, long maxval);
ulong2 __ovld __cnfn clamp(ulong2 x, ulong minval, ulong maxval);
long3 __ovld __cnfn clamp(long3 x, long minval, long maxval);
@@ -9911,8 +9652,6 @@ long8 __ovld __cnfn max(long8 x, long8 y);
ulong8 __ovld __cnfn max(ulong8 x, ulong8 y);
long16 __ovld __cnfn max(long16 x, long16 y);
ulong16 __ovld __cnfn max(ulong16 x, ulong16 y);
-char __ovld __cnfn max(char x, char y);
-uchar __ovld __cnfn max(uchar x, uchar y);
char2 __ovld __cnfn max(char2 x, char y);
uchar2 __ovld __cnfn max(uchar2 x, uchar y);
char3 __ovld __cnfn max(char3 x, char y);
@@ -9923,8 +9662,6 @@ char8 __ovld __cnfn max(char8 x, char y);
uchar8 __ovld __cnfn max(uchar8 x, uchar y);
char16 __ovld __cnfn max(char16 x, char y);
uchar16 __ovld __cnfn max(uchar16 x, uchar y);
-short __ovld __cnfn max(short x, short y);
-ushort __ovld __cnfn max(ushort x, ushort y);
short2 __ovld __cnfn max(short2 x, short y);
ushort2 __ovld __cnfn max(ushort2 x, ushort y);
short3 __ovld __cnfn max(short3 x, short y);
@@ -9935,8 +9672,6 @@ short8 __ovld __cnfn max(short8 x, short y);
ushort8 __ovld __cnfn max(ushort8 x, ushort y);
short16 __ovld __cnfn max(short16 x, short y);
ushort16 __ovld __cnfn max(ushort16 x, ushort y);
-int __ovld __cnfn max(int x, int y);
-uint __ovld __cnfn max(uint x, uint y);
int2 __ovld __cnfn max(int2 x, int y);
uint2 __ovld __cnfn max(uint2 x, uint y);
int3 __ovld __cnfn max(int3 x, int y);
@@ -9947,8 +9682,6 @@ int8 __ovld __cnfn max(int8 x, int y);
uint8 __ovld __cnfn max(uint8 x, uint y);
int16 __ovld __cnfn max(int16 x, int y);
uint16 __ovld __cnfn max(uint16 x, uint y);
-long __ovld __cnfn max(long x, long y);
-ulong __ovld __cnfn max(ulong x, ulong y);
long2 __ovld __cnfn max(long2 x, long y);
ulong2 __ovld __cnfn max(ulong2 x, ulong y);
long3 __ovld __cnfn max(long3 x, long y);
@@ -10011,8 +9744,6 @@ long8 __ovld __cnfn min(long8 x, long8 y);
ulong8 __ovld __cnfn min(ulong8 x, ulong8 y);
long16 __ovld __cnfn min(long16 x, long16 y);
ulong16 __ovld __cnfn min(ulong16 x, ulong16 y);
-char __ovld __cnfn min(char x, char y);
-uchar __ovld __cnfn min(uchar x, uchar y);
char2 __ovld __cnfn min(char2 x, char y);
uchar2 __ovld __cnfn min(uchar2 x, uchar y);
char3 __ovld __cnfn min(char3 x, char y);
@@ -10023,8 +9754,6 @@ char8 __ovld __cnfn min(char8 x, char y);
uchar8 __ovld __cnfn min(uchar8 x, uchar y);
char16 __ovld __cnfn min(char16 x, char y);
uchar16 __ovld __cnfn min(uchar16 x, uchar y);
-short __ovld __cnfn min(short x, short y);
-ushort __ovld __cnfn min(ushort x, ushort y);
short2 __ovld __cnfn min(short2 x, short y);
ushort2 __ovld __cnfn min(ushort2 x, ushort y);
short3 __ovld __cnfn min(short3 x, short y);
@@ -10035,8 +9764,6 @@ short8 __ovld __cnfn min(short8 x, short y);
ushort8 __ovld __cnfn min(ushort8 x, ushort y);
short16 __ovld __cnfn min(short16 x, short y);
ushort16 __ovld __cnfn min(ushort16 x, ushort y);
-int __ovld __cnfn min(int x, int y);
-uint __ovld __cnfn min(uint x, uint y);
int2 __ovld __cnfn min(int2 x, int y);
uint2 __ovld __cnfn min(uint2 x, uint y);
int3 __ovld __cnfn min(int3 x, int y);
@@ -10047,8 +9774,6 @@ int8 __ovld __cnfn min(int8 x, int y);
uint8 __ovld __cnfn min(uint8 x, uint y);
int16 __ovld __cnfn min(int16 x, int y);
uint16 __ovld __cnfn min(uint16 x, uint y);
-long __ovld __cnfn min(long x, long y);
-ulong __ovld __cnfn min(ulong x, ulong y);
long2 __ovld __cnfn min(long2 x, long y);
ulong2 __ovld __cnfn min(ulong2 x, ulong y);
long3 __ovld __cnfn min(long3 x, long y);
@@ -10627,7 +10352,6 @@ half3 __ovld __cnfn step(half3 edge, half3 x);
half4 __ovld __cnfn step(half4 edge, half4 x);
half8 __ovld __cnfn step(half8 edge, half8 x);
half16 __ovld __cnfn step(half16 edge, half16 x);
-half __ovld __cnfn step(half edge, half x);
half2 __ovld __cnfn step(half edge, half2 x);
half3 __ovld __cnfn step(half edge, half3 x);
half4 __ovld __cnfn step(half edge, half4 x);
@@ -10679,7 +10403,6 @@ half3 __ovld __cnfn smoothstep(half3 edge0, half3 edge1, half3 x);
half4 __ovld __cnfn smoothstep(half4 edge0, half4 edge1, half4 x);
half8 __ovld __cnfn smoothstep(half8 edge0, half8 edge1, half8 x);
half16 __ovld __cnfn smoothstep(half16 edge0, half16 edge1, half16 x);
-half __ovld __cnfn smoothstep(half edge0, half edge1, half x);
half2 __ovld __cnfn smoothstep(half edge0, half edge1, half2 x);
half3 __ovld __cnfn smoothstep(half edge0, half edge1, half3 x);
half4 __ovld __cnfn smoothstep(half edge0, half edge1, half4 x);
@@ -12777,30 +12500,6 @@ void __ovld vstorea_half16_rtn(double16 data,size_t offset, __private half *p);
// OpenCL v1.1 s6.11.8, v1.2 s6.12.8, v2.0 s6.13.8 - Synchronization Functions
-// Flag type and values for barrier, mem_fence, read_mem_fence, write_mem_fence
-typedef uint cl_mem_fence_flags;
-
-/**
- * Queue a memory fence to ensure correct
- * ordering of memory operations to local memory
- */
-#define CLK_LOCAL_MEM_FENCE 0x01
-
-/**
- * Queue a memory fence to ensure correct
- * ordering of memory operations to global memory
- */
-#define CLK_GLOBAL_MEM_FENCE 0x02
-
-#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
-/**
- * Queue a memory fence to ensure correct ordering of memory
- * operations between work-items of a work-group to
- * image memory.
- */
-#define CLK_IMAGE_MEM_FENCE 0x04
-#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
-
/**
* All work-items in a work-group executing the kernel
* on a processor must execute this function before any
@@ -12834,17 +12533,6 @@ typedef uint cl_mem_fence_flags;
void __ovld __conv barrier(cl_mem_fence_flags flags);
#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
-
-typedef enum memory_scope {
- memory_scope_work_item = __OPENCL_MEMORY_SCOPE_WORK_ITEM,
- memory_scope_work_group = __OPENCL_MEMORY_SCOPE_WORK_GROUP,
- memory_scope_device = __OPENCL_MEMORY_SCOPE_DEVICE,
- memory_scope_all_svm_devices = __OPENCL_MEMORY_SCOPE_ALL_SVM_DEVICES,
-#if defined(cl_intel_subgroups) || defined(cl_khr_subgroups)
- memory_scope_sub_group = __OPENCL_MEMORY_SCOPE_SUB_GROUP
-#endif
-} memory_scope;
-
void __ovld __conv work_group_barrier(cl_mem_fence_flags flags, memory_scope scope);
void __ovld __conv work_group_barrier(cl_mem_fence_flags flags);
#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
@@ -13341,6 +13029,10 @@ int __ovld atomic_add(volatile __global int *p, int val);
unsigned int __ovld atomic_add(volatile __global unsigned int *p, unsigned int val);
int __ovld atomic_add(volatile __local int *p, int val);
unsigned int __ovld atomic_add(volatile __local unsigned int *p, unsigned int val);
+#ifdef __OPENCL_CPP_VERSION__
+int __ovld atomic_add(volatile int *p, int val);
+unsigned int __ovld atomic_add(volatile unsigned int *p, unsigned int val);
+#endif
#if defined(cl_khr_global_int32_base_atomics)
int __ovld atom_add(volatile __global int *p, int val);
@@ -13367,6 +13059,10 @@ int __ovld atomic_sub(volatile __global int *p, int val);
unsigned int __ovld atomic_sub(volatile __global unsigned int *p, unsigned int val);
int __ovld atomic_sub(volatile __local int *p, int val);
unsigned int __ovld atomic_sub(volatile __local unsigned int *p, unsigned int val);
+#ifdef __OPENCL_CPP_VERSION__
+int __ovld atomic_sub(volatile int *p, int val);
+unsigned int __ovld atomic_sub(volatile unsigned int *p, unsigned int val);
+#endif
#if defined(cl_khr_global_int32_base_atomics)
int __ovld atom_sub(volatile __global int *p, int val);
@@ -13395,6 +13091,11 @@ int __ovld atomic_xchg(volatile __local int *p, int val);
unsigned int __ovld atomic_xchg(volatile __local unsigned int *p, unsigned int val);
float __ovld atomic_xchg(volatile __global float *p, float val);
float __ovld atomic_xchg(volatile __local float *p, float val);
+#ifdef __OPENCL_CPP_VERSION__
+int __ovld atomic_xchg(volatile int *p, int val);
+unsigned int __ovld atomic_xchg(volatile unsigned int *p, unsigned int val);
+float __ovld atomic_xchg(volatile float *p, float val);
+#endif
#if defined(cl_khr_global_int32_base_atomics)
int __ovld atom_xchg(volatile __global int *p, int val);
@@ -13422,6 +13123,10 @@ int __ovld atomic_inc(volatile __global int *p);
unsigned int __ovld atomic_inc(volatile __global unsigned int *p);
int __ovld atomic_inc(volatile __local int *p);
unsigned int __ovld atomic_inc(volatile __local unsigned int *p);
+#ifdef __OPENCL_CPP_VERSION__
+int __ovld atomic_inc(volatile int *p);
+unsigned int __ovld atomic_inc(volatile unsigned int *p);
+#endif
#if defined(cl_khr_global_int32_base_atomics)
int __ovld atom_inc(volatile __global int *p);
@@ -13449,6 +13154,10 @@ int __ovld atomic_dec(volatile __global int *p);
unsigned int __ovld atomic_dec(volatile __global unsigned int *p);
int __ovld atomic_dec(volatile __local int *p);
unsigned int __ovld atomic_dec(volatile __local unsigned int *p);
+#ifdef __OPENCL_CPP_VERSION__
+int __ovld atomic_dec(volatile int *p);
+unsigned int __ovld atomic_dec(volatile unsigned int *p);
+#endif
#if defined(cl_khr_global_int32_base_atomics)
int __ovld atom_dec(volatile __global int *p);
@@ -13477,6 +13186,10 @@ int __ovld atomic_cmpxchg(volatile __global int *p, int cmp, int val);
unsigned int __ovld atomic_cmpxchg(volatile __global unsigned int *p, unsigned int cmp, unsigned int val);
int __ovld atomic_cmpxchg(volatile __local int *p, int cmp, int val);
unsigned int __ovld atomic_cmpxchg(volatile __local unsigned int *p, unsigned int cmp, unsigned int val);
+#ifdef __OPENCL_CPP_VERSION__
+int __ovld atomic_cmpxchg(volatile int *p, int cmp, int val);
+unsigned int __ovld atomic_cmpxchg(volatile unsigned int *p, unsigned int cmp, unsigned int val);
+#endif
#if defined(cl_khr_global_int32_base_atomics)
int __ovld atom_cmpxchg(volatile __global int *p, int cmp, int val);
@@ -13505,6 +13218,10 @@ int __ovld atomic_min(volatile __global int *p, int val);
unsigned int __ovld atomic_min(volatile __global unsigned int *p, unsigned int val);
int __ovld atomic_min(volatile __local int *p, int val);
unsigned int __ovld atomic_min(volatile __local unsigned int *p, unsigned int val);
+#ifdef __OPENCL_CPP_VERSION__
+int __ovld atomic_min(volatile int *p, int val);
+unsigned int __ovld atomic_min(volatile unsigned int *p, unsigned int val);
+#endif
#if defined(cl_khr_global_int32_extended_atomics)
int __ovld atom_min(volatile __global int *p, int val);
@@ -13533,6 +13250,10 @@ int __ovld atomic_max(volatile __global int *p, int val);
unsigned int __ovld atomic_max(volatile __global unsigned int *p, unsigned int val);
int __ovld atomic_max(volatile __local int *p, int val);
unsigned int __ovld atomic_max(volatile __local unsigned int *p, unsigned int val);
+#ifdef __OPENCL_CPP_VERSION__
+int __ovld atomic_max(volatile int *p, int val);
+unsigned int __ovld atomic_max(volatile unsigned int *p, unsigned int val);
+#endif
#if defined(cl_khr_global_int32_extended_atomics)
int __ovld atom_max(volatile __global int *p, int val);
@@ -13560,6 +13281,10 @@ int __ovld atomic_and(volatile __global int *p, int val);
unsigned int __ovld atomic_and(volatile __global unsigned int *p, unsigned int val);
int __ovld atomic_and(volatile __local int *p, int val);
unsigned int __ovld atomic_and(volatile __local unsigned int *p, unsigned int val);
+#ifdef __OPENCL_CPP_VERSION__
+int __ovld atomic_and(volatile int *p, int val);
+unsigned int __ovld atomic_and(volatile unsigned int *p, unsigned int val);
+#endif
#if defined(cl_khr_global_int32_extended_atomics)
int __ovld atom_and(volatile __global int *p, int val);
@@ -13587,6 +13312,10 @@ int __ovld atomic_or(volatile __global int *p, int val);
unsigned int __ovld atomic_or(volatile __global unsigned int *p, unsigned int val);
int __ovld atomic_or(volatile __local int *p, int val);
unsigned int __ovld atomic_or(volatile __local unsigned int *p, unsigned int val);
+#ifdef __OPENCL_CPP_VERSION__
+int __ovld atomic_or(volatile int *p, int val);
+unsigned int __ovld atomic_or(volatile unsigned int *p, unsigned int val);
+#endif
#if defined(cl_khr_global_int32_extended_atomics)
int __ovld atom_or(volatile __global int *p, int val);
@@ -13614,6 +13343,10 @@ int __ovld atomic_xor(volatile __global int *p, int val);
unsigned int __ovld atomic_xor(volatile __global unsigned int *p, unsigned int val);
int __ovld atomic_xor(volatile __local int *p, int val);
unsigned int __ovld atomic_xor(volatile __local unsigned int *p, unsigned int val);
+#ifdef __OPENCL_CPP_VERSION__
+int __ovld atomic_xor(volatile int *p, int val);
+unsigned int __ovld atomic_xor(volatile unsigned int *p, unsigned int val);
+#endif
#if defined(cl_khr_global_int32_extended_atomics)
int __ovld atom_xor(volatile __global int *p, int val);
@@ -13639,20 +13372,6 @@ unsigned long __ovld atom_xor(volatile __local unsigned long *p, unsigned long v
// OpenCL v2.0 s6.13.11 - Atomics Functions
#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
-#ifndef ATOMIC_VAR_INIT
-#define ATOMIC_VAR_INIT(x) (x)
-#endif //ATOMIC_VAR_INIT
-#define ATOMIC_FLAG_INIT 0
-
-// enum values aligned with what clang uses in EmitAtomicExpr()
-typedef enum memory_order
-{
- memory_order_relaxed = __ATOMIC_RELAXED,
- memory_order_acquire = __ATOMIC_ACQUIRE,
- memory_order_release = __ATOMIC_RELEASE,
- memory_order_acq_rel = __ATOMIC_ACQ_REL,
- memory_order_seq_cst = __ATOMIC_SEQ_CST
-} memory_order;
// double atomics support requires extensions cl_khr_int64_base_atomics and cl_khr_int64_extended_atomics
#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
@@ -14475,28 +14194,6 @@ int printf(__constant const char* st, ...) __attribute__((format(printf, 1, 2)))
// OpenCL v1.1 s6.11.3, v1.2 s6.12.14, v2.0 s6.13.14 - Image Read and Write Functions
-// These values need to match the runtime equivalent
-//
-// Addressing Mode.
-//
-#define CLK_ADDRESS_NONE 0
-#define CLK_ADDRESS_CLAMP_TO_EDGE 2
-#define CLK_ADDRESS_CLAMP 4
-#define CLK_ADDRESS_REPEAT 6
-#define CLK_ADDRESS_MIRRORED_REPEAT 8
-
-//
-// Coordination Normalization
-//
-#define CLK_NORMALIZED_COORDS_FALSE 0
-#define CLK_NORMALIZED_COORDS_TRUE 1
-
-//
-// Filtering Mode.
-//
-#define CLK_FILTER_NEAREST 0x10
-#define CLK_FILTER_LINEAR 0x20
-
#ifdef cl_khr_gl_msaa_sharing
#pragma OPENCL EXTENSION cl_khr_gl_msaa_sharing : enable
#endif //cl_khr_gl_msaa_sharing
@@ -14712,30 +14409,6 @@ float4 __purefn __ovld read_imagef(read_only image3d_t image, sampler_t sampler,
int4 __purefn __ovld read_imagei(read_only image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
uint4 __purefn __ovld read_imageui(read_only image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
-float4 __purefn __ovld read_imagef(read_only image1d_t image, sampler_t sampler, float coord, float lod);
-int4 __purefn __ovld read_imagei(read_only image1d_t image, sampler_t sampler, float coord, float lod);
-uint4 __purefn __ovld read_imageui(read_only image1d_t image, sampler_t sampler, float coord, float lod);
-
-float4 __purefn __ovld read_imagef(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
-int4 __purefn __ovld read_imagei(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
-uint4 __purefn __ovld read_imageui(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
-
-float4 __purefn __ovld read_imagef(read_only image2d_t image, sampler_t sampler, float2 coord, float lod);
-int4 __purefn __ovld read_imagei(read_only image2d_t image, sampler_t sampler, float2 coord, float lod);
-uint4 __purefn __ovld read_imageui(read_only image2d_t image, sampler_t sampler, float2 coord, float lod);
-
-float __purefn __ovld read_imagef(read_only image2d_depth_t image, sampler_t sampler, float2 coord, float lod);
-
-float4 __purefn __ovld read_imagef(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
-int4 __purefn __ovld read_imagei(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
-uint4 __purefn __ovld read_imageui(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
-
-float __purefn __ovld read_imagef(read_only image2d_array_depth_t image, sampler_t sampler, float4 coord, float lod);
-
-float4 __purefn __ovld read_imagef(read_only image3d_t image, sampler_t sampler, float4 coord, float lod);
-int4 __purefn __ovld read_imagei(read_only image3d_t image, sampler_t sampler, float4 coord, float lod);
-uint4 __purefn __ovld read_imageui(read_only image3d_t image, sampler_t sampler, float4 coord, float lod);
-
#endif //cl_khr_mipmap_image
#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
@@ -14895,29 +14568,6 @@ float4 __purefn __ovld read_imagef(read_write image3d_t image, sampler_t sampler
int4 __purefn __ovld read_imagei(read_write image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
uint4 __purefn __ovld read_imageui(read_write image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
-float4 __purefn __ovld read_imagef(read_write image1d_t image, sampler_t sampler, float coord, float lod);
-int4 __purefn __ovld read_imagei(read_write image1d_t image, sampler_t sampler, float coord, float lod);
-uint4 __purefn __ovld read_imageui(read_write image1d_t image, sampler_t sampler, float coord, float lod);
-
-float4 __purefn __ovld read_imagef(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
-int4 __purefn __ovld read_imagei(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
-uint4 __purefn __ovld read_imageui(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
-
-float4 __purefn __ovld read_imagef(read_write image2d_t image, sampler_t sampler, float2 coord, float lod);
-int4 __purefn __ovld read_imagei(read_write image2d_t image, sampler_t sampler, float2 coord, float lod);
-uint4 __purefn __ovld read_imageui(read_write image2d_t image, sampler_t sampler, float2 coord, float lod);
-
-float __purefn __ovld read_imagef(read_write image2d_depth_t image, sampler_t sampler, float2 coord, float lod);
-
-float4 __purefn __ovld read_imagef(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
-int4 __purefn __ovld read_imagei(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
-uint4 __purefn __ovld read_imageui(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
-
-float __purefn __ovld read_imagef(read_write image2d_array_depth_t image, sampler_t sampler, float4 coord, float lod);
-
-float4 __purefn __ovld read_imagef(read_write image3d_t image, sampler_t sampler, float4 coord, float lod);
-int4 __purefn __ovld read_imagei(read_write image3d_t image, sampler_t sampler, float4 coord, float lod);
-uint4 __purefn __ovld read_imageui(read_write image3d_t image, sampler_t sampler, float4 coord, float lod);
#endif //cl_khr_mipmap_image
#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
@@ -15332,26 +14982,6 @@ int __ovld get_image_num_mip_levels(read_write image2d_depth_t image);
* CLK_FLOAT
*/
-//
-// Channel Datatype.
-//
-#define CLK_SNORM_INT8 0x10D0
-#define CLK_SNORM_INT16 0x10D1
-#define CLK_UNORM_INT8 0x10D2
-#define CLK_UNORM_INT16 0x10D3
-#define CLK_UNORM_SHORT_565 0x10D4
-#define CLK_UNORM_SHORT_555 0x10D5
-#define CLK_UNORM_INT_101010 0x10D6
-#define CLK_SIGNED_INT8 0x10D7
-#define CLK_SIGNED_INT16 0x10D8
-#define CLK_SIGNED_INT32 0x10D9
-#define CLK_UNSIGNED_INT8 0x10DA
-#define CLK_UNSIGNED_INT16 0x10DB
-#define CLK_UNSIGNED_INT32 0x10DC
-#define CLK_HALF_FLOAT 0x10DD
-#define CLK_FLOAT 0x10DE
-#define CLK_UNORM_INT24 0x10DF
-
int __ovld __cnfn get_image_channel_data_type(read_only image1d_t image);
int __ovld __cnfn get_image_channel_data_type(read_only image1d_buffer_t image);
int __ovld __cnfn get_image_channel_data_type(read_only image2d_t image);
@@ -15423,30 +15053,6 @@ int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_msaa_dept
* CLK_INTENSITY
* CLK_LUMINANCE
*/
-// Channel order, numbering must be aligned with cl_channel_order in cl.h
-//
-#define CLK_R 0x10B0
-#define CLK_A 0x10B1
-#define CLK_RG 0x10B2
-#define CLK_RA 0x10B3
-#define CLK_RGB 0x10B4
-#define CLK_RGBA 0x10B5
-#define CLK_BGRA 0x10B6
-#define CLK_ARGB 0x10B7
-#define CLK_INTENSITY 0x10B8
-#define CLK_LUMINANCE 0x10B9
-#define CLK_Rx 0x10BA
-#define CLK_RGx 0x10BB
-#define CLK_RGBx 0x10BC
-#define CLK_DEPTH 0x10BD
-#define CLK_DEPTH_STENCIL 0x10BE
-#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
-#define CLK_sRGB 0x10BF
-#define CLK_sRGBx 0x10C0
-#define CLK_sRGBA 0x10C1
-#define CLK_sBGRA 0x10C2
-#define CLK_ABGR 0x10C3
-#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
int __ovld __cnfn get_image_channel_order(read_only image1d_t image);
int __ovld __cnfn get_image_channel_order(read_only image1d_buffer_t image);
@@ -15605,20 +15211,17 @@ size_t __ovld __cnfn get_image_array_size(read_write image2d_array_msaa_depth_t
#if defined(cl_khr_gl_msaa_sharing)
int __ovld get_image_num_samples(read_only image2d_msaa_t image);
int __ovld get_image_num_samples(read_only image2d_msaa_depth_t image);
-int __ovld get_image_num_samples(read_only image2d_array_msaa_depth_t image);
int __ovld get_image_num_samples(read_only image2d_array_msaa_t image);
int __ovld get_image_num_samples(read_only image2d_array_msaa_depth_t image);
int __ovld get_image_num_samples(write_only image2d_msaa_t image);
int __ovld get_image_num_samples(write_only image2d_msaa_depth_t image);
-int __ovld get_image_num_samples(write_only image2d_array_msaa_depth_t image);
int __ovld get_image_num_samples(write_only image2d_array_msaa_t image);
int __ovld get_image_num_samples(write_only image2d_array_msaa_depth_t image);
#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
int __ovld get_image_num_samples(read_write image2d_msaa_t image);
int __ovld get_image_num_samples(read_write image2d_msaa_depth_t image);
-int __ovld get_image_num_samples(read_write image2d_array_msaa_depth_t image);
int __ovld get_image_num_samples(read_write image2d_array_msaa_t image);
int __ovld get_image_num_samples(read_write image2d_array_msaa_depth_t image);
#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
@@ -15728,7 +15331,6 @@ double __ovld __conv work_group_scan_inclusive_max(double x);
// OpenCL v2.0 s6.13.16 - Pipe Functions
#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
-#define CLK_NULL_RESERVE_ID (__builtin_astype(((void*)(__SIZE_MAX__)), reserve_id_t))
bool __ovld is_valid_reserve_id(reserve_id_t reserve_id);
#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
@@ -15736,44 +15338,6 @@ bool __ovld is_valid_reserve_id(reserve_id_t reserve_id);
// OpenCL v2.0 s6.13.17 - Enqueue Kernels
#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
-#define CL_COMPLETE 0x0
-#define CL_RUNNING 0x1
-#define CL_SUBMITTED 0x2
-#define CL_QUEUED 0x3
-
-#define CLK_SUCCESS 0
-#define CLK_ENQUEUE_FAILURE -101
-#define CLK_INVALID_QUEUE -102
-#define CLK_INVALID_NDRANGE -160
-#define CLK_INVALID_EVENT_WAIT_LIST -57
-#define CLK_DEVICE_QUEUE_FULL -161
-#define CLK_INVALID_ARG_SIZE -51
-#define CLK_EVENT_ALLOCATION_FAILURE -100
-#define CLK_OUT_OF_RESOURCES -5
-
-#define CLK_NULL_QUEUE 0
-#define CLK_NULL_EVENT (__builtin_astype(((void*)(__SIZE_MAX__)), clk_event_t))
-
-// execution model related definitions
-#define CLK_ENQUEUE_FLAGS_NO_WAIT 0x0
-#define CLK_ENQUEUE_FLAGS_WAIT_KERNEL 0x1
-#define CLK_ENQUEUE_FLAGS_WAIT_WORK_GROUP 0x2
-
-typedef int kernel_enqueue_flags_t;
-typedef int clk_profiling_info;
-
-// Profiling info name (see capture_event_profiling_info)
-#define CLK_PROFILING_COMMAND_EXEC_TIME 0x1
-
-#define MAX_WORK_DIM 3
-
-typedef struct {
- unsigned int workDimension;
- size_t globalWorkOffset[MAX_WORK_DIM];
- size_t globalWorkSize[MAX_WORK_DIM];
- size_t localWorkSize[MAX_WORK_DIM];
-} ndrange_t;
-
ndrange_t __ovld ndrange_1D(size_t);
ndrange_t __ovld ndrange_1D(size_t, size_t);
ndrange_t __ovld ndrange_1D(size_t, size_t, size_t);
@@ -16216,138 +15780,6 @@ void __ovld __conv intel_sub_group_block_write_us8( __global ushort* p, u
#ifdef cl_intel_device_side_avc_motion_estimation
#pragma OPENCL EXTENSION cl_intel_device_side_avc_motion_estimation : begin
-#define CLK_AVC_ME_MAJOR_16x16_INTEL 0x0
-#define CLK_AVC_ME_MAJOR_16x8_INTEL 0x1
-#define CLK_AVC_ME_MAJOR_8x16_INTEL 0x2
-#define CLK_AVC_ME_MAJOR_8x8_INTEL 0x3
-
-#define CLK_AVC_ME_MINOR_8x8_INTEL 0x0
-#define CLK_AVC_ME_MINOR_8x4_INTEL 0x1
-#define CLK_AVC_ME_MINOR_4x8_INTEL 0x2
-#define CLK_AVC_ME_MINOR_4x4_INTEL 0x3
-
-#define CLK_AVC_ME_MAJOR_FORWARD_INTEL 0x0
-#define CLK_AVC_ME_MAJOR_BACKWARD_INTEL 0x1
-#define CLK_AVC_ME_MAJOR_BIDIRECTIONAL_INTEL 0x2
-
-#define CLK_AVC_ME_PARTITION_MASK_ALL_INTEL 0x0
-#define CLK_AVC_ME_PARTITION_MASK_16x16_INTEL 0x7E
-#define CLK_AVC_ME_PARTITION_MASK_16x8_INTEL 0x7D
-#define CLK_AVC_ME_PARTITION_MASK_8x16_INTEL 0x7B
-#define CLK_AVC_ME_PARTITION_MASK_8x8_INTEL 0x77
-#define CLK_AVC_ME_PARTITION_MASK_8x4_INTEL 0x6F
-#define CLK_AVC_ME_PARTITION_MASK_4x8_INTEL 0x5F
-#define CLK_AVC_ME_PARTITION_MASK_4x4_INTEL 0x3F
-
-#define CLK_AVC_ME_SLICE_TYPE_PRED_INTEL 0x0
-#define CLK_AVC_ME_SLICE_TYPE_BPRED_INTEL 0x1
-#define CLK_AVC_ME_SLICE_TYPE_INTRA_INTEL 0x2
-
-#define CLK_AVC_ME_SEARCH_WINDOW_EXHAUSTIVE_INTEL 0x0
-#define CLK_AVC_ME_SEARCH_WINDOW_SMALL_INTEL 0x1
-#define CLK_AVC_ME_SEARCH_WINDOW_TINY_INTEL 0x2
-#define CLK_AVC_ME_SEARCH_WINDOW_EXTRA_TINY_INTEL 0x3
-#define CLK_AVC_ME_SEARCH_WINDOW_DIAMOND_INTEL 0x4
-#define CLK_AVC_ME_SEARCH_WINDOW_LARGE_DIAMOND_INTEL 0x5
-#define CLK_AVC_ME_SEARCH_WINDOW_RESERVED0_INTEL 0x6
-#define CLK_AVC_ME_SEARCH_WINDOW_RESERVED1_INTEL 0x7
-#define CLK_AVC_ME_SEARCH_WINDOW_CUSTOM_INTEL 0x8
-
-#define CLK_AVC_ME_SAD_ADJUST_MODE_NONE_INTEL 0x0
-#define CLK_AVC_ME_SAD_ADJUST_MODE_HAAR_INTEL 0x2
-
-#define CLK_AVC_ME_SUBPIXEL_MODE_INTEGER_INTEL 0x0
-#define CLK_AVC_ME_SUBPIXEL_MODE_HPEL_INTEL 0x1
-#define CLK_AVC_ME_SUBPIXEL_MODE_QPEL_INTEL 0x3
-
-#define CLK_AVC_ME_COST_PRECISION_QPEL_INTEL 0x0
-#define CLK_AVC_ME_COST_PRECISION_HPEL_INTEL 0x1
-#define CLK_AVC_ME_COST_PRECISION_PEL_INTEL 0x2
-#define CLK_AVC_ME_COST_PRECISION_DPEL_INTEL 0x3
-
-#define CLK_AVC_ME_BIDIR_WEIGHT_QUARTER_INTEL 0x10
-#define CLK_AVC_ME_BIDIR_WEIGHT_THIRD_INTEL 0x15
-#define CLK_AVC_ME_BIDIR_WEIGHT_HALF_INTEL 0x20
-#define CLK_AVC_ME_BIDIR_WEIGHT_TWO_THIRD_INTEL 0x2B
-#define CLK_AVC_ME_BIDIR_WEIGHT_THREE_QUARTER_INTEL 0x30
-
-#define CLK_AVC_ME_BORDER_REACHED_LEFT_INTEL 0x0
-#define CLK_AVC_ME_BORDER_REACHED_RIGHT_INTEL 0x2
-#define CLK_AVC_ME_BORDER_REACHED_TOP_INTEL 0x4
-#define CLK_AVC_ME_BORDER_REACHED_BOTTOM_INTEL 0x8
-
-#define CLK_AVC_ME_INTRA_16x16_INTEL 0x0
-#define CLK_AVC_ME_INTRA_8x8_INTEL 0x1
-#define CLK_AVC_ME_INTRA_4x4_INTEL 0x2
-
-#define CLK_AVC_ME_SKIP_BLOCK_PARTITION_16x16_INTEL 0x0
-#define CLK_AVC_ME_SKIP_BLOCK_PARTITION_8x8_INTEL 0x4000
-
-#define CLK_AVC_ME_SKIP_BLOCK_16x16_FORWARD_ENABLE_INTEL (0x1 << 24)
-#define CLK_AVC_ME_SKIP_BLOCK_16x16_BACKWARD_ENABLE_INTEL (0x2 << 24)
-#define CLK_AVC_ME_SKIP_BLOCK_16x16_DUAL_ENABLE_INTEL (0x3 << 24)
-#define CLK_AVC_ME_SKIP_BLOCK_8x8_FORWARD_ENABLE_INTEL (0x55 << 24)
-#define CLK_AVC_ME_SKIP_BLOCK_8x8_BACKWARD_ENABLE_INTEL (0xAA << 24)
-#define CLK_AVC_ME_SKIP_BLOCK_8x8_DUAL_ENABLE_INTEL (0xFF << 24)
-#define CLK_AVC_ME_SKIP_BLOCK_8x8_0_FORWARD_ENABLE_INTEL (0x1 << 24)
-#define CLK_AVC_ME_SKIP_BLOCK_8x8_0_BACKWARD_ENABLE_INTEL (0x2 << 24)
-#define CLK_AVC_ME_SKIP_BLOCK_8x8_1_FORWARD_ENABLE_INTEL (0x1 << 26)
-#define CLK_AVC_ME_SKIP_BLOCK_8x8_1_BACKWARD_ENABLE_INTEL (0x2 << 26)
-#define CLK_AVC_ME_SKIP_BLOCK_8x8_2_FORWARD_ENABLE_INTEL (0x1 << 28)
-#define CLK_AVC_ME_SKIP_BLOCK_8x8_2_BACKWARD_ENABLE_INTEL (0x2 << 28)
-#define CLK_AVC_ME_SKIP_BLOCK_8x8_3_FORWARD_ENABLE_INTEL (0x1 << 30)
-#define CLK_AVC_ME_SKIP_BLOCK_8x8_3_BACKWARD_ENABLE_INTEL (0x2 << 30)
-
-#define CLK_AVC_ME_BLOCK_BASED_SKIP_4x4_INTEL 0x00
-#define CLK_AVC_ME_BLOCK_BASED_SKIP_8x8_INTEL 0x80
-
-#define CLK_AVC_ME_INTRA_LUMA_PARTITION_MASK_ALL_INTEL 0x0
-#define CLK_AVC_ME_INTRA_LUMA_PARTITION_MASK_16x16_INTEL 0x6
-#define CLK_AVC_ME_INTRA_LUMA_PARTITION_MASK_8x8_INTEL 0x5
-#define CLK_AVC_ME_INTRA_LUMA_PARTITION_MASK_4x4_INTEL 0x3
-
-#define CLK_AVC_ME_INTRA_NEIGHBOR_LEFT_MASK_ENABLE_INTEL 0x60
-#define CLK_AVC_ME_INTRA_NEIGHBOR_UPPER_MASK_ENABLE_INTEL 0x10
-#define CLK_AVC_ME_INTRA_NEIGHBOR_UPPER_RIGHT_MASK_ENABLE_INTEL 0x8
-#define CLK_AVC_ME_INTRA_NEIGHBOR_UPPER_LEFT_MASK_ENABLE_INTEL 0x4
-
-#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_VERTICAL_INTEL 0x0
-#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_HORIZONTAL_INTEL 0x1
-#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_DC_INTEL 0x2
-#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_DIAGONAL_DOWN_LEFT_INTEL 0x3
-#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_DIAGONAL_DOWN_RIGHT_INTEL 0x4
-#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_PLANE_INTEL 0x4
-#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_VERTICAL_RIGHT_INTEL 0x5
-#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_HORIZONTAL_DOWN_INTEL 0x6
-#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_VERTICAL_LEFT_INTEL 0x7
-#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_HORIZONTAL_UP_INTEL 0x8
-#define CLK_AVC_ME_CHROMA_PREDICTOR_MODE_DC_INTEL 0x0
-#define CLK_AVC_ME_CHROMA_PREDICTOR_MODE_HORIZONTAL_INTEL 0x1
-#define CLK_AVC_ME_CHROMA_PREDICTOR_MODE_VERTICAL_INTEL 0x2
-#define CLK_AVC_ME_CHROMA_PREDICTOR_MODE_PLANE_INTEL 0x3
-
-#define CLK_AVC_ME_FRAME_FORWARD_INTEL 0x1
-#define CLK_AVC_ME_FRAME_BACKWARD_INTEL 0x2
-#define CLK_AVC_ME_FRAME_DUAL_INTEL 0x3
-
-#define CLK_AVC_ME_INTERLACED_SCAN_TOP_FIELD_INTEL 0x0
-#define CLK_AVC_ME_INTERLACED_SCAN_BOTTOM_FIELD_INTEL 0x1
-
-#define CLK_AVC_ME_INITIALIZE_INTEL 0x0
-
-#define CLK_AVC_IME_PAYLOAD_INITIALIZE_INTEL 0x0
-#define CLK_AVC_REF_PAYLOAD_INITIALIZE_INTEL 0x0
-#define CLK_AVC_SIC_PAYLOAD_INITIALIZE_INTEL 0x0
-
-#define CLK_AVC_IME_RESULT_INITIALIZE_INTEL 0x0
-#define CLK_AVC_REF_RESULT_INITIALIZE_INTEL 0x0
-#define CLK_AVC_SIC_RESULT_INITIALIZE_INTEL 0x0
-
-#define CLK_AVC_IME_RESULT_SINGLE_REFERENCE_STREAMOUT_INITIALIZE_INTEL 0x0
-#define CLK_AVC_IME_RESULT_SINGLE_REFERENCE_STREAMIN_INITIALIZE_INTEL 0x0
-#define CLK_AVC_IME_RESULT_DUAL_REFERENCE_STREAMOUT_INITIALIZE_INTEL 0x0
-#define CLK_AVC_IME_RESULT_DUAL_REFERENCE_STREAMIN_INITIALIZE_INTEL 0x0
-
// MCE built-in functions
uchar __ovld
intel_sub_group_avc_mce_get_default_inter_base_multi_reference_penalty(
@@ -17034,6 +16466,34 @@ uint8 __ovld amd_sadw(uint8 src0, uint8 src1, uint8 src2);
uint16 __ovld amd_sadw(uint16 src0, uint16 src1, uint16 src2);
#endif // cl_amd_media_ops2
+#if defined(cl_arm_integer_dot_product_int8)
+#pragma OPENCL EXTENSION cl_arm_integer_dot_product_int8 : begin
+uint __ovld arm_dot(uchar4 a, uchar4 b);
+int __ovld arm_dot(char4 a, char4 b);
+#pragma OPENCL EXTENSION cl_arm_integer_dot_product_int8 : end
+#endif // defined(cl_arm_integer_dot_product_int8)
+
+#if defined(cl_arm_integer_dot_product_accumulate_int8)
+#pragma OPENCL EXTENSION cl_arm_integer_dot_product_accumulate_int8 : begin
+uint __ovld arm_dot_acc(uchar4 a, uchar4 b, uint c);
+int __ovld arm_dot_acc(char4 a, char4 b, int c);
+#pragma OPENCL EXTENSION cl_arm_integer_dot_product_accumulate_int8 : end
+#endif // defined(cl_arm_integer_dot_product_accumulate_int8)
+
+#if defined(cl_arm_integer_dot_product_accumulate_int16)
+#pragma OPENCL EXTENSION cl_arm_integer_dot_product_accumulate_int16 : begin
+uint __ovld arm_dot_acc(ushort2 a, ushort2 b, uint c);
+int __ovld arm_dot_acc(short2 a, short2 b, int c);
+#pragma OPENCL EXTENSION cl_arm_integer_dot_product_accumulate_int16 : end
+#endif // defined(cl_arm_integer_dot_product_accumulate_int16)
+
+#if defined(cl_arm_integer_dot_product_accumulate_saturate_int8)
+#pragma OPENCL EXTENSION cl_arm_integer_dot_product_accumulate_saturate_int8 : begin
+uint __ovld arm_dot_acc_sat(uchar4 a, uchar4 b, uint c);
+int __ovld arm_dot_acc_sat(char4 a, char4 b, int c);
+#pragma OPENCL EXTENSION cl_arm_integer_dot_product_accumulate_saturate_int8 : end
+#endif // defined(cl_arm_integer_dot_product_accumulate_saturate_int8)
+
// Disable any extensions we may have enabled previously.
#pragma OPENCL EXTENSION all : disable
diff --git a/lib/Headers/openmp_wrappers/__clang_openmp_math.h b/lib/Headers/openmp_wrappers/__clang_openmp_math.h
new file mode 100644
index 000000000000..5d7ce9a965d3
--- /dev/null
+++ b/lib/Headers/openmp_wrappers/__clang_openmp_math.h
@@ -0,0 +1,35 @@
+/*===---- __clang_openmp_math.h - OpenMP target math support ---------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#if defined(__NVPTX__) && defined(_OPENMP)
+/// TODO:
+/// We are currently reusing the functionality of the Clang-CUDA code path
+/// as an alternative to the host declarations provided by math.h and cmath.
+/// This is suboptimal.
+///
+/// We should instead declare the device functions in a similar way, e.g.,
+/// through OpenMP 5.0 variants, and afterwards populate the module with the
+/// host declarations by unconditionally including the host math.h or cmath,
+/// respectively. This is actually what the Clang-CUDA code path does, using
+/// __device__ instead of variants to avoid redeclarations and get the desired
+/// overload resolution.
+
+#define __CUDA__
+
+#if defined(__cplusplus)
+ #include <__clang_cuda_cmath.h>
+#endif
+
+#undef __CUDA__
+
+/// Magic macro for stopping the math.h/cmath host header from being included.
+#define __CLANG_NO_HOST_MATH__
+
+#endif
+
diff --git a/lib/Headers/openmp_wrappers/__clang_openmp_math_declares.h b/lib/Headers/openmp_wrappers/__clang_openmp_math_declares.h
new file mode 100644
index 000000000000..a422c98bf97d
--- /dev/null
+++ b/lib/Headers/openmp_wrappers/__clang_openmp_math_declares.h
@@ -0,0 +1,33 @@
+/*===---- __clang_openmp_math_declares.h - OpenMP math declares ------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __CLANG_OPENMP_MATH_DECLARES_H__
+#define __CLANG_OPENMP_MATH_DECLARES_H__
+
+#ifndef _OPENMP
+#error "This file is for OpenMP compilation only."
+#endif
+
+#if defined(__NVPTX__) && defined(_OPENMP)
+
+#define __CUDA__
+
+#if defined(__cplusplus)
+ #include <__clang_cuda_math_forward_declares.h>
+#endif
+
+/// Include declarations for libdevice functions.
+#include <__clang_cuda_libdevice_declares.h>
+/// Provide definitions for these functions.
+#include <__clang_cuda_device_functions.h>
+
+#undef __CUDA__
+
+#endif
+#endif
diff --git a/lib/Headers/openmp_wrappers/cmath b/lib/Headers/openmp_wrappers/cmath
new file mode 100644
index 000000000000..a5183a1d8d1b
--- /dev/null
+++ b/lib/Headers/openmp_wrappers/cmath
@@ -0,0 +1,16 @@
+/*===-------------- cmath - Alternative cmath header -----------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#include <__clang_openmp_math.h>
+
+#ifndef __CLANG_NO_HOST_MATH__
+#include_next <cmath>
+#else
+#undef __CLANG_NO_HOST_MATH__
+#endif
diff --git a/lib/Headers/openmp_wrappers/math.h b/lib/Headers/openmp_wrappers/math.h
new file mode 100644
index 000000000000..d2786ecb2424
--- /dev/null
+++ b/lib/Headers/openmp_wrappers/math.h
@@ -0,0 +1,17 @@
+/*===------------- math.h - Alternative math.h header ----------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#include <__clang_openmp_math.h>
+
+#ifndef __CLANG_NO_HOST_MATH__
+#include_next <math.h>
+#else
+#undef __CLANG_NO_HOST_MATH__
+#endif
+
diff --git a/lib/Headers/pconfigintrin.h b/lib/Headers/pconfigintrin.h
index fee3cad38854..d2014b026f36 100644
--- a/lib/Headers/pconfigintrin.h
+++ b/lib/Headers/pconfigintrin.h
@@ -1,22 +1,8 @@
/*===---- pconfigintrin.h - X86 platform configuration ---------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
@@ -30,6 +16,8 @@
#define __PCONFIG_KEY_PROGRAM 0x00000001
+#if __has_extension(gnu_asm)
+
/* Define the default attributes for the functions in this file. */
#define __DEFAULT_FN_ATTRS \
__attribute__((__always_inline__, __nodebug__, __target__("pconfig")))
@@ -47,4 +35,6 @@ _pconfig_u32(unsigned int __leaf, __SIZE_TYPE__ __d[])
#undef __DEFAULT_FN_ATTRS
+#endif /* __has_extension(gnu_asm) */
+
#endif
diff --git a/lib/Headers/pkuintrin.h b/lib/Headers/pkuintrin.h
index 6976924d829e..c62080becb0f 100644
--- a/lib/Headers/pkuintrin.h
+++ b/lib/Headers/pkuintrin.h
@@ -1,23 +1,9 @@
/*===---- pkuintrin.h - PKU intrinsics -------------------------------------===
*
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/pmmintrin.h b/lib/Headers/pmmintrin.h
index 7e1a9eae59f6..c376f298cc30 100644
--- a/lib/Headers/pmmintrin.h
+++ b/lib/Headers/pmmintrin.h
@@ -1,22 +1,8 @@
/*===---- pmmintrin.h - SSE3 intrinsics ------------------------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/popcntintrin.h b/lib/Headers/popcntintrin.h
index 75ceab9e150b..312901014796 100644
--- a/lib/Headers/popcntintrin.h
+++ b/lib/Headers/popcntintrin.h
@@ -1,22 +1,8 @@
/*===---- popcntintrin.h - POPCNT intrinsics -------------------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
@@ -43,22 +29,6 @@ _mm_popcnt_u32(unsigned int __A)
return __builtin_popcount(__A);
}
-/// Counts the number of bits in the source operand having a value of 1.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> POPCNT </c> instruction.
-///
-/// \param __A
-/// A signed 32-bit integer operand.
-/// \returns A 32-bit integer containing the number of bits with value 1 in the
-/// source operand.
-static __inline__ int __DEFAULT_FN_ATTRS
-_popcnt32(int __A)
-{
- return __builtin_popcount(__A);
-}
-
#ifdef __x86_64__
/// Counts the number of bits in the source operand having a value of 1.
///
@@ -75,22 +45,6 @@ _mm_popcnt_u64(unsigned long long __A)
{
return __builtin_popcountll(__A);
}
-
-/// Counts the number of bits in the source operand having a value of 1.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> POPCNT </c> instruction.
-///
-/// \param __A
-/// A signed 64-bit integer operand.
-/// \returns A 64-bit integer containing the number of bits with value 1 in the
-/// source operand.
-static __inline__ long long __DEFAULT_FN_ATTRS
-_popcnt64(long long __A)
-{
- return __builtin_popcountll(__A);
-}
#endif /* __x86_64__ */
#undef __DEFAULT_FN_ATTRS
diff --git a/lib/Headers/ppc_wrappers/emmintrin.h b/lib/Headers/ppc_wrappers/emmintrin.h
new file mode 100644
index 000000000000..617ce24acd3f
--- /dev/null
+++ b/lib/Headers/ppc_wrappers/emmintrin.h
@@ -0,0 +1,2318 @@
+/*===---- emmintrin.h - Implementation of SSE2 intrinsics on PowerPC -------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+/* Implemented from the specification included in the Intel C++ Compiler
+ User Guide and Reference, version 9.0. */
+
+#ifndef NO_WARN_X86_INTRINSICS
+/* This header file is to help porting code using Intel intrinsics
+ explicitly from x86_64 to powerpc64/powerpc64le.
+
+ Since X86 SSE2 intrinsics mainly handles __m128i and __m128d type,
+ PowerPC VMX/VSX ISA is a good match for vector float SIMD operations.
+ However scalar float operations in vector (XMM) registers require
+ the POWER8 VSX ISA (2.07) level. There are differences for data
+ format and placement of float scalars in the vector register, which
+ require extra steps to match SSE2 scalar float semantics on POWER.
+
+ It should be noted that there's much difference between X86_64's
+ MXSCR and PowerISA's FPSCR/VSCR registers. It's recommended to use
+ portable <fenv.h> instead of access MXSCR directly.
+
+ Most SSE2 scalar float intrinsic operations can be performed more
+ efficiently as C language float scalar operations or optimized to
+ use vector SIMD operations. We recommend this for new applications.
+*/
+#error "Please read comment above. Use -DNO_WARN_X86_INTRINSICS to disable this error."
+#endif
+
+#ifndef EMMINTRIN_H_
+#define EMMINTRIN_H_
+
+#include <altivec.h>
+
+/* We need definitions from the SSE header files. */
+#include <xmmintrin.h>
+
+/* SSE2 */
+typedef __vector double __v2df;
+typedef __vector long long __v2di;
+typedef __vector unsigned long long __v2du;
+typedef __vector int __v4si;
+typedef __vector unsigned int __v4su;
+typedef __vector short __v8hi;
+typedef __vector unsigned short __v8hu;
+typedef __vector signed char __v16qi;
+typedef __vector unsigned char __v16qu;
+
+/* The Intel API is flexible enough that we must allow aliasing with other
+ vector types, and their scalar components. */
+typedef long long __m128i __attribute__ ((__vector_size__ (16), __may_alias__));
+typedef double __m128d __attribute__ ((__vector_size__ (16), __may_alias__));
+
+/* Unaligned version of the same types. */
+typedef long long __m128i_u __attribute__ ((__vector_size__ (16), __may_alias__, __aligned__ (1)));
+typedef double __m128d_u __attribute__ ((__vector_size__ (16), __may_alias__, __aligned__ (1)));
+
+/* Define two value permute mask. */
+#define _MM_SHUFFLE2(x,y) (((x) << 1) | (y))
+
+/* Create a vector with element 0 as F and the rest zero. */
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_set_sd (double __F)
+{
+ return __extension__ (__m128d){ __F, 0.0 };
+}
+
+/* Create a vector with both elements equal to F. */
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_set1_pd (double __F)
+{
+ return __extension__ (__m128d){ __F, __F };
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_set_pd1 (double __F)
+{
+ return _mm_set1_pd (__F);
+}
+
+/* Create a vector with the lower value X and upper value W. */
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_set_pd (double __W, double __X)
+{
+ return __extension__ (__m128d){ __X, __W };
+}
+
+/* Create a vector with the lower value W and upper value X. */
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_setr_pd (double __W, double __X)
+{
+ return __extension__ (__m128d){ __W, __X };
+}
+
+/* Create an undefined vector. */
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_undefined_pd (void)
+{
+ __m128d __Y = __Y;
+ return __Y;
+}
+
+/* Create a vector of zeros. */
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_setzero_pd (void)
+{
+ return (__m128d) vec_splats (0);
+}
+
+/* Sets the low DPFP value of A from the low value of B. */
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_move_sd (__m128d __A, __m128d __B)
+{
+ __v2df result = (__v2df) __A;
+ result [0] = ((__v2df) __B)[0];
+ return (__m128d) result;
+}
+
+/* Load two DPFP values from P. The address must be 16-byte aligned. */
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_load_pd (double const *__P)
+{
+ return ((__m128d)vec_ld(0, (__v16qu*)__P));
+}
+
+/* Load two DPFP values from P. The address need not be 16-byte aligned. */
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_loadu_pd (double const *__P)
+{
+ return (vec_vsx_ld(0, __P));
+}
+
+/* Create a vector with all two elements equal to *P. */
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_load1_pd (double const *__P)
+{
+ return (vec_splats (*__P));
+}
+
+/* Create a vector with element 0 as *P and the rest zero. */
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_load_sd (double const *__P)
+{
+ return _mm_set_sd (*__P);
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_load_pd1 (double const *__P)
+{
+ return _mm_load1_pd (__P);
+}
+
+/* Load two DPFP values in reverse order. The address must be aligned. */
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_loadr_pd (double const *__P)
+{
+ __v2df __tmp = _mm_load_pd (__P);
+ return (__m128d)vec_xxpermdi (__tmp, __tmp, 2);
+}
+
+/* Store two DPFP values. The address must be 16-byte aligned. */
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_store_pd (double *__P, __m128d __A)
+{
+ vec_st((__v16qu)__A, 0, (__v16qu*)__P);
+}
+
+/* Store two DPFP values. The address need not be 16-byte aligned. */
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_storeu_pd (double *__P, __m128d __A)
+{
+ *(__m128d_u *)__P = __A;
+}
+
+/* Stores the lower DPFP value. */
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_store_sd (double *__P, __m128d __A)
+{
+ *__P = ((__v2df)__A)[0];
+}
+
+extern __inline double __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtsd_f64 (__m128d __A)
+{
+ return ((__v2df)__A)[0];
+}
+
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_storel_pd (double *__P, __m128d __A)
+{
+ _mm_store_sd (__P, __A);
+}
+
+/* Stores the upper DPFP value. */
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_storeh_pd (double *__P, __m128d __A)
+{
+ *__P = ((__v2df)__A)[1];
+}
+/* Store the lower DPFP value across two words.
+ The address must be 16-byte aligned. */
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_store1_pd (double *__P, __m128d __A)
+{
+ _mm_store_pd (__P, vec_splat (__A, 0));
+}
+
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_store_pd1 (double *__P, __m128d __A)
+{
+ _mm_store1_pd (__P, __A);
+}
+
+/* Store two DPFP values in reverse order. The address must be aligned. */
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_storer_pd (double *__P, __m128d __A)
+{
+ _mm_store_pd (__P, vec_xxpermdi (__A, __A, 2));
+}
+
+/* Intel intrinsic. */
+extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtsi128_si64 (__m128i __A)
+{
+ return ((__v2di)__A)[0];
+}
+
+/* Microsoft intrinsic. */
+extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtsi128_si64x (__m128i __A)
+{
+ return ((__v2di)__A)[0];
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_add_pd (__m128d __A, __m128d __B)
+{
+ return (__m128d) ((__v2df)__A + (__v2df)__B);
+}
+
+/* Add the lower double-precision (64-bit) floating-point element in
+ a and b, store the result in the lower element of dst, and copy
+ the upper element from a to the upper element of dst. */
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_add_sd (__m128d __A, __m128d __B)
+{
+ __A[0] = __A[0] + __B[0];
+ return (__A);
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_sub_pd (__m128d __A, __m128d __B)
+{
+ return (__m128d) ((__v2df)__A - (__v2df)__B);
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_sub_sd (__m128d __A, __m128d __B)
+{
+ __A[0] = __A[0] - __B[0];
+ return (__A);
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mul_pd (__m128d __A, __m128d __B)
+{
+ return (__m128d) ((__v2df)__A * (__v2df)__B);
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mul_sd (__m128d __A, __m128d __B)
+{
+ __A[0] = __A[0] * __B[0];
+ return (__A);
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_div_pd (__m128d __A, __m128d __B)
+{
+ return (__m128d) ((__v2df)__A / (__v2df)__B);
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_div_sd (__m128d __A, __m128d __B)
+{
+ __A[0] = __A[0] / __B[0];
+ return (__A);
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_sqrt_pd (__m128d __A)
+{
+ return (vec_sqrt (__A));
+}
+
+/* Return pair {sqrt (B[0]), A[1]}. */
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_sqrt_sd (__m128d __A, __m128d __B)
+{
+ __v2df c;
+ c = vec_sqrt ((__v2df) _mm_set1_pd (__B[0]));
+ return (__m128d) _mm_setr_pd (c[0], __A[1]);
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_min_pd (__m128d __A, __m128d __B)
+{
+ return (vec_min (__A, __B));
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_min_sd (__m128d __A, __m128d __B)
+{
+ __v2df a, b, c;
+ a = vec_splats (__A[0]);
+ b = vec_splats (__B[0]);
+ c = vec_min (a, b);
+ return (__m128d) _mm_setr_pd (c[0], __A[1]);
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_max_pd (__m128d __A, __m128d __B)
+{
+ return (vec_max (__A, __B));
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_max_sd (__m128d __A, __m128d __B)
+{
+ __v2df a, b, c;
+ a = vec_splats (__A[0]);
+ b = vec_splats (__B[0]);
+ c = vec_max (a, b);
+ return (__m128d) _mm_setr_pd (c[0], __A[1]);
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpeq_pd (__m128d __A, __m128d __B)
+{
+ return ((__m128d)vec_cmpeq ((__v2df) __A, (__v2df) __B));
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmplt_pd (__m128d __A, __m128d __B)
+{
+ return ((__m128d)vec_cmplt ((__v2df) __A, (__v2df) __B));
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmple_pd (__m128d __A, __m128d __B)
+{
+ return ((__m128d)vec_cmple ((__v2df) __A, (__v2df) __B));
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpgt_pd (__m128d __A, __m128d __B)
+{
+ return ((__m128d)vec_cmpgt ((__v2df) __A, (__v2df) __B));
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpge_pd (__m128d __A, __m128d __B)
+{
+ return ((__m128d)vec_cmpge ((__v2df) __A,(__v2df) __B));
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpneq_pd (__m128d __A, __m128d __B)
+{
+ __v2df temp = (__v2df) vec_cmpeq ((__v2df) __A, (__v2df)__B);
+ return ((__m128d)vec_nor (temp, temp));
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpnlt_pd (__m128d __A, __m128d __B)
+{
+ return ((__m128d)vec_cmpge ((__v2df) __A, (__v2df) __B));
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpnle_pd (__m128d __A, __m128d __B)
+{
+ return ((__m128d)vec_cmpgt ((__v2df) __A, (__v2df) __B));
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpngt_pd (__m128d __A, __m128d __B)
+{
+ return ((__m128d)vec_cmple ((__v2df) __A, (__v2df) __B));
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpnge_pd (__m128d __A, __m128d __B)
+{
+ return ((__m128d)vec_cmplt ((__v2df) __A, (__v2df) __B));
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpord_pd (__m128d __A, __m128d __B)
+{
+#if _ARCH_PWR8
+ __v2du c, d;
+ /* Compare against self will return false (0's) if NAN. */
+ c = (__v2du)vec_cmpeq (__A, __A);
+ d = (__v2du)vec_cmpeq (__B, __B);
+#else
+ __v2du a, b;
+ __v2du c, d;
+ const __v2du double_exp_mask = {0x7ff0000000000000, 0x7ff0000000000000};
+ a = (__v2du)vec_abs ((__v2df)__A);
+ b = (__v2du)vec_abs ((__v2df)__B);
+ c = (__v2du)vec_cmpgt (double_exp_mask, a);
+ d = (__v2du)vec_cmpgt (double_exp_mask, b);
+#endif
+ /* A != NAN and B != NAN. */
+ return ((__m128d)vec_and(c, d));
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpunord_pd (__m128d __A, __m128d __B)
+{
+#if _ARCH_PWR8
+ __v2du c, d;
+ /* Compare against self will return false (0's) if NAN. */
+ c = (__v2du)vec_cmpeq ((__v2df)__A, (__v2df)__A);
+ d = (__v2du)vec_cmpeq ((__v2df)__B, (__v2df)__B);
+ /* A == NAN OR B == NAN converts too:
+ NOT(A != NAN) OR NOT(B != NAN). */
+ c = vec_nor (c, c);
+ return ((__m128d)vec_orc(c, d));
+#else
+ __v2du c, d;
+ /* Compare against self will return false (0's) if NAN. */
+ c = (__v2du)vec_cmpeq ((__v2df)__A, (__v2df)__A);
+ d = (__v2du)vec_cmpeq ((__v2df)__B, (__v2df)__B);
+ /* Convert the true ('1's) is NAN. */
+ c = vec_nor (c, c);
+ d = vec_nor (d, d);
+ return ((__m128d)vec_or(c, d));
+#endif
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpeq_sd(__m128d __A, __m128d __B)
+{
+ __v2df a, b, c;
+ /* PowerISA VSX does not allow partial (for just lower double)
+ results. So to insure we don't generate spurious exceptions
+ (from the upper double values) we splat the lower double
+ before we do the operation. */
+ a = vec_splats (__A[0]);
+ b = vec_splats (__B[0]);
+ c = (__v2df) vec_cmpeq(a, b);
+ /* Then we merge the lower double result with the original upper
+ double from __A. */
+ return (__m128d) _mm_setr_pd (c[0], __A[1]);
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmplt_sd (__m128d __A, __m128d __B)
+{
+ __v2df a, b, c;
+ a = vec_splats (__A[0]);
+ b = vec_splats (__B[0]);
+ c = (__v2df) vec_cmplt(a, b);
+ return (__m128d) _mm_setr_pd (c[0], __A[1]);
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmple_sd (__m128d __A, __m128d __B)
+{
+ __v2df a, b, c;
+ a = vec_splats (__A[0]);
+ b = vec_splats (__B[0]);
+ c = (__v2df) vec_cmple(a, b);
+ return (__m128d) _mm_setr_pd (c[0], __A[1]);
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpgt_sd (__m128d __A, __m128d __B)
+{
+ __v2df a, b, c;
+ a = vec_splats (__A[0]);
+ b = vec_splats (__B[0]);
+ c = (__v2df) vec_cmpgt(a, b);
+ return (__m128d) _mm_setr_pd (c[0], __A[1]);
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpge_sd (__m128d __A, __m128d __B)
+{
+ __v2df a, b, c;
+ a = vec_splats (__A[0]);
+ b = vec_splats (__B[0]);
+ c = (__v2df) vec_cmpge(a, b);
+ return (__m128d) _mm_setr_pd (c[0], __A[1]);
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpneq_sd (__m128d __A, __m128d __B)
+{
+ __v2df a, b, c;
+ a = vec_splats (__A[0]);
+ b = vec_splats (__B[0]);
+ c = (__v2df) vec_cmpeq(a, b);
+ c = vec_nor (c, c);
+ return (__m128d) _mm_setr_pd (c[0], __A[1]);
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpnlt_sd (__m128d __A, __m128d __B)
+{
+ __v2df a, b, c;
+ a = vec_splats (__A[0]);
+ b = vec_splats (__B[0]);
+ /* Not less than is just greater than or equal. */
+ c = (__v2df) vec_cmpge(a, b);
+ return (__m128d) _mm_setr_pd (c[0], __A[1]);
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpnle_sd (__m128d __A, __m128d __B)
+{
+ __v2df a, b, c;
+ a = vec_splats (__A[0]);
+ b = vec_splats (__B[0]);
+ /* Not less than or equal is just greater than. */
+ c = (__v2df) vec_cmpge(a, b);
+ return (__m128d) _mm_setr_pd (c[0], __A[1]);
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpngt_sd (__m128d __A, __m128d __B)
+{
+ __v2df a, b, c;
+ a = vec_splats (__A[0]);
+ b = vec_splats (__B[0]);
+ /* Not greater than is just less than or equal. */
+ c = (__v2df) vec_cmple(a, b);
+ return (__m128d) _mm_setr_pd (c[0], __A[1]);
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpnge_sd (__m128d __A, __m128d __B)
+{
+ __v2df a, b, c;
+ a = vec_splats (__A[0]);
+ b = vec_splats (__B[0]);
+ /* Not greater than or equal is just less than. */
+ c = (__v2df) vec_cmplt(a, b);
+ return (__m128d) _mm_setr_pd (c[0], __A[1]);
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpord_sd (__m128d __A, __m128d __B)
+{
+ __v2df r;
+ r = (__v2df)_mm_cmpord_pd (vec_splats (__A[0]), vec_splats (__B[0]));
+ return (__m128d) _mm_setr_pd (r[0], ((__v2df)__A)[1]);
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpunord_sd (__m128d __A, __m128d __B)
+{
+ __v2df r;
+ r = _mm_cmpunord_pd (vec_splats (__A[0]), vec_splats (__B[0]));
+ return (__m128d) _mm_setr_pd (r[0], __A[1]);
+}
+
+/* FIXME
+ The __mm_comi??_sd and __mm_ucomi??_sd implementations below are
+ exactly the same because GCC for PowerPC only generates unordered
+ compares (scalar and vector).
+ Technically __mm_comieq_sp et all should be using the ordered
+ compare and signal for QNaNs. The __mm_ucomieq_sd et all should
+ be OK. */
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_comieq_sd (__m128d __A, __m128d __B)
+{
+ return (__A[0] == __B[0]);
+}
+
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_comilt_sd (__m128d __A, __m128d __B)
+{
+ return (__A[0] < __B[0]);
+}
+
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_comile_sd (__m128d __A, __m128d __B)
+{
+ return (__A[0] <= __B[0]);
+}
+
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_comigt_sd (__m128d __A, __m128d __B)
+{
+ return (__A[0] > __B[0]);
+}
+
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_comige_sd (__m128d __A, __m128d __B)
+{
+ return (__A[0] >= __B[0]);
+}
+
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_comineq_sd (__m128d __A, __m128d __B)
+{
+ return (__A[0] != __B[0]);
+}
+
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_ucomieq_sd (__m128d __A, __m128d __B)
+{
+ return (__A[0] == __B[0]);
+}
+
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_ucomilt_sd (__m128d __A, __m128d __B)
+{
+ return (__A[0] < __B[0]);
+}
+
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_ucomile_sd (__m128d __A, __m128d __B)
+{
+ return (__A[0] <= __B[0]);
+}
+
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_ucomigt_sd (__m128d __A, __m128d __B)
+{
+ return (__A[0] > __B[0]);
+}
+
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_ucomige_sd (__m128d __A, __m128d __B)
+{
+ return (__A[0] >= __B[0]);
+}
+
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_ucomineq_sd (__m128d __A, __m128d __B)
+{
+ return (__A[0] != __B[0]);
+}
+
+/* Create a vector of Qi, where i is the element number. */
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_set_epi64x (long long __q1, long long __q0)
+{
+ return __extension__ (__m128i)(__v2di){ __q0, __q1 };
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_set_epi64 (__m64 __q1, __m64 __q0)
+{
+ return _mm_set_epi64x ((long long)__q1, (long long)__q0);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_set_epi32 (int __q3, int __q2, int __q1, int __q0)
+{
+ return __extension__ (__m128i)(__v4si){ __q0, __q1, __q2, __q3 };
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_set_epi16 (short __q7, short __q6, short __q5, short __q4,
+ short __q3, short __q2, short __q1, short __q0)
+{
+ return __extension__ (__m128i)(__v8hi){
+ __q0, __q1, __q2, __q3, __q4, __q5, __q6, __q7 };
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_set_epi8 (char __q15, char __q14, char __q13, char __q12,
+ char __q11, char __q10, char __q09, char __q08,
+ char __q07, char __q06, char __q05, char __q04,
+ char __q03, char __q02, char __q01, char __q00)
+{
+ return __extension__ (__m128i)(__v16qi){
+ __q00, __q01, __q02, __q03, __q04, __q05, __q06, __q07,
+ __q08, __q09, __q10, __q11, __q12, __q13, __q14, __q15
+ };
+}
+
+/* Set all of the elements of the vector to A. */
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_set1_epi64x (long long __A)
+{
+ return _mm_set_epi64x (__A, __A);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_set1_epi64 (__m64 __A)
+{
+ return _mm_set_epi64 (__A, __A);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_set1_epi32 (int __A)
+{
+ return _mm_set_epi32 (__A, __A, __A, __A);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_set1_epi16 (short __A)
+{
+ return _mm_set_epi16 (__A, __A, __A, __A, __A, __A, __A, __A);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_set1_epi8 (char __A)
+{
+ return _mm_set_epi8 (__A, __A, __A, __A, __A, __A, __A, __A,
+ __A, __A, __A, __A, __A, __A, __A, __A);
+}
+
+/* Create a vector of Qi, where i is the element number.
+ The parameter order is reversed from the _mm_set_epi* functions. */
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_setr_epi64 (__m64 __q0, __m64 __q1)
+{
+ return _mm_set_epi64 (__q1, __q0);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_setr_epi32 (int __q0, int __q1, int __q2, int __q3)
+{
+ return _mm_set_epi32 (__q3, __q2, __q1, __q0);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_setr_epi16 (short __q0, short __q1, short __q2, short __q3,
+ short __q4, short __q5, short __q6, short __q7)
+{
+ return _mm_set_epi16 (__q7, __q6, __q5, __q4, __q3, __q2, __q1, __q0);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_setr_epi8 (char __q00, char __q01, char __q02, char __q03,
+ char __q04, char __q05, char __q06, char __q07,
+ char __q08, char __q09, char __q10, char __q11,
+ char __q12, char __q13, char __q14, char __q15)
+{
+ return _mm_set_epi8 (__q15, __q14, __q13, __q12, __q11, __q10, __q09, __q08,
+ __q07, __q06, __q05, __q04, __q03, __q02, __q01, __q00);
+}
+
+/* Create a vector with element 0 as *P and the rest zero. */
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_load_si128 (__m128i const *__P)
+{
+ return *__P;
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_loadu_si128 (__m128i_u const *__P)
+{
+ return (__m128i) (vec_vsx_ld(0, (signed int const *)__P));
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_loadl_epi64 (__m128i_u const *__P)
+{
+ return _mm_set_epi64 ((__m64)0LL, *(__m64 *)__P);
+}
+
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_store_si128 (__m128i *__P, __m128i __B)
+{
+ vec_st ((__v16qu) __B, 0, (__v16qu*)__P);
+}
+
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_storeu_si128 (__m128i_u *__P, __m128i __B)
+{
+ *__P = __B;
+}
+
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_storel_epi64 (__m128i_u *__P, __m128i __B)
+{
+ *(long long *)__P = ((__v2di)__B)[0];
+}
+
+extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_movepi64_pi64 (__m128i_u __B)
+{
+ return (__m64) ((__v2di)__B)[0];
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_movpi64_epi64 (__m64 __A)
+{
+ return _mm_set_epi64 ((__m64)0LL, __A);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_move_epi64 (__m128i __A)
+{
+ return _mm_set_epi64 ((__m64)0LL, (__m64)__A[0]);
+}
+
+/* Create an undefined vector. */
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_undefined_si128 (void)
+{
+ __m128i __Y = __Y;
+ return __Y;
+}
+
+/* Create a vector of zeros. */
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_setzero_si128 (void)
+{
+ return __extension__ (__m128i)(__v4si){ 0, 0, 0, 0 };
+}
+
+#ifdef _ARCH_PWR8
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtepi32_pd (__m128i __A)
+{
+ __v2di val;
+ /* For LE need to generate Vector Unpack Low Signed Word.
+ Which is generated from unpackh. */
+ val = (__v2di)vec_unpackh ((__v4si)__A);
+
+ return (__m128d)vec_ctf (val, 0);
+}
+#endif
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtepi32_ps (__m128i __A)
+{
+ return ((__m128)vec_ctf((__v4si)__A, 0));
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtpd_epi32 (__m128d __A)
+{
+ __v2df rounded = vec_rint (__A);
+ __v4si result, temp;
+ const __v4si vzero =
+ { 0, 0, 0, 0 };
+
+ /* VSX Vector truncate Double-Precision to integer and Convert to
+ Signed Integer Word format with Saturate. */
+ __asm__(
+ "xvcvdpsxws %x0,%x1"
+ : "=wa" (temp)
+ : "wa" (rounded)
+ : );
+
+#ifdef _ARCH_PWR8
+ temp = vec_mergeo (temp, temp);
+ result = (__v4si) vec_vpkudum ((__vector long long) temp,
+ (__vector long long) vzero);
+#else
+ {
+ const __v16qu pkperm = {0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0a, 0x0b,
+ 0x14, 0x15, 0x16, 0x17, 0x1c, 0x1d, 0x1e, 0x1f };
+ result = (__v4si) vec_perm ((__v16qu) temp, (__v16qu) vzero, pkperm);
+ }
+#endif
+ return (__m128i) result;
+}
+
+extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtpd_pi32 (__m128d __A)
+{
+ __m128i result = _mm_cvtpd_epi32(__A);
+
+ return (__m64) result[0];
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtpd_ps (__m128d __A)
+{
+ __v4sf result;
+ __v4si temp;
+ const __v4si vzero = { 0, 0, 0, 0 };
+
+ __asm__(
+ "xvcvdpsp %x0,%x1"
+ : "=wa" (temp)
+ : "wa" (__A)
+ : );
+
+#ifdef _ARCH_PWR8
+ temp = vec_mergeo (temp, temp);
+ result = (__v4sf) vec_vpkudum ((__vector long long) temp,
+ (__vector long long) vzero);
+#else
+ {
+ const __v16qu pkperm = {0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0a, 0x0b,
+ 0x14, 0x15, 0x16, 0x17, 0x1c, 0x1d, 0x1e, 0x1f };
+ result = (__v4sf) vec_perm ((__v16qu) temp, (__v16qu) vzero, pkperm);
+ }
+#endif
+ return ((__m128)result);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvttpd_epi32 (__m128d __A)
+{
+ __v4si result;
+ __v4si temp;
+ const __v4si vzero = { 0, 0, 0, 0 };
+
+ /* VSX Vector truncate Double-Precision to integer and Convert to
+ Signed Integer Word format with Saturate. */
+ __asm__(
+ "xvcvdpsxws %x0,%x1"
+ : "=wa" (temp)
+ : "wa" (__A)
+ : );
+
+#ifdef _ARCH_PWR8
+ temp = vec_mergeo (temp, temp);
+ result = (__v4si) vec_vpkudum ((__vector long long) temp,
+ (__vector long long) vzero);
+#else
+ {
+ const __v16qu pkperm = {0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0a, 0x0b,
+ 0x14, 0x15, 0x16, 0x17, 0x1c, 0x1d, 0x1e, 0x1f };
+ result = (__v4si) vec_perm ((__v16qu) temp, (__v16qu) vzero, pkperm);
+ }
+#endif
+
+ return ((__m128i) result);
+}
+
+extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvttpd_pi32 (__m128d __A)
+{
+ __m128i result = _mm_cvttpd_epi32 (__A);
+
+ return (__m64) result[0];
+}
+
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtsi128_si32 (__m128i __A)
+{
+ return ((__v4si)__A)[0];
+}
+
+#ifdef _ARCH_PWR8
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtpi32_pd (__m64 __A)
+{
+ __v4si temp;
+ __v2di tmp2;
+ __v2df result;
+
+ temp = (__v4si)vec_splats (__A);
+ tmp2 = (__v2di)vec_unpackl (temp);
+ result = vec_ctf ((__vector signed long long) tmp2, 0);
+ return (__m128d)result;
+}
+#endif
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtps_epi32 (__m128 __A)
+{
+ __v4sf rounded;
+ __v4si result;
+
+ rounded = vec_rint((__v4sf) __A);
+ result = vec_cts (rounded, 0);
+ return (__m128i) result;
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvttps_epi32 (__m128 __A)
+{
+ __v4si result;
+
+ result = vec_cts ((__v4sf) __A, 0);
+ return (__m128i) result;
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtps_pd (__m128 __A)
+{
+ /* Check if vec_doubleh is defined by <altivec.h>. If so use that. */
+#ifdef vec_doubleh
+ return (__m128d) vec_doubleh ((__v4sf)__A);
+#else
+ /* Otherwise the compiler is not current and so need to generate the
+ equivalent code. */
+ __v4sf a = (__v4sf)__A;
+ __v4sf temp;
+ __v2df result;
+#ifdef __LITTLE_ENDIAN__
+ /* The input float values are in elements {[0], [1]} but the convert
+ instruction needs them in elements {[1], [3]}, So we use two
+ shift left double vector word immediates to get the elements
+ lined up. */
+ temp = __builtin_vsx_xxsldwi (a, a, 3);
+ temp = __builtin_vsx_xxsldwi (a, temp, 2);
+#else
+ /* The input float values are in elements {[0], [1]} but the convert
+ instruction needs them in elements {[0], [2]}, So we use two
+ shift left double vector word immediates to get the elements
+ lined up. */
+ temp = vec_vmrghw (a, a);
+#endif
+ __asm__(
+ " xvcvspdp %x0,%x1"
+ : "=wa" (result)
+ : "wa" (temp)
+ : );
+ return (__m128d) result;
+#endif
+}
+
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtsd_si32 (__m128d __A)
+{
+ __v2df rounded = vec_rint((__v2df) __A);
+ int result = ((__v2df)rounded)[0];
+
+ return result;
+}
+/* Intel intrinsic. */
+extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtsd_si64 (__m128d __A)
+{
+ __v2df rounded = vec_rint ((__v2df) __A );
+ long long result = ((__v2df) rounded)[0];
+
+ return result;
+}
+
+/* Microsoft intrinsic. */
+extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtsd_si64x (__m128d __A)
+{
+ return _mm_cvtsd_si64 ((__v2df)__A);
+}
+
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvttsd_si32 (__m128d __A)
+{
+ int result = ((__v2df)__A)[0];
+
+ return result;
+}
+
+/* Intel intrinsic. */
+extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvttsd_si64 (__m128d __A)
+{
+ long long result = ((__v2df)__A)[0];
+
+ return result;
+}
+
+/* Microsoft intrinsic. */
+extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvttsd_si64x (__m128d __A)
+{
+ return _mm_cvttsd_si64 (__A);
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtsd_ss (__m128 __A, __m128d __B)
+{
+ __v4sf result = (__v4sf)__A;
+
+#ifdef __LITTLE_ENDIAN__
+ __v4sf temp_s;
+ /* Copy double element[0] to element [1] for conversion. */
+ __v2df temp_b = vec_splat((__v2df)__B, 0);
+
+ /* Pre-rotate __A left 3 (logically right 1) elements. */
+ result = __builtin_vsx_xxsldwi (result, result, 3);
+ /* Convert double to single float scalar in a vector. */
+ __asm__(
+ "xscvdpsp %x0,%x1"
+ : "=wa" (temp_s)
+ : "wa" (temp_b)
+ : );
+ /* Shift the resulting scalar into vector element [0]. */
+ result = __builtin_vsx_xxsldwi (result, temp_s, 1);
+#else
+ result [0] = ((__v2df)__B)[0];
+#endif
+ return (__m128) result;
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtsi32_sd (__m128d __A, int __B)
+{
+ __v2df result = (__v2df)__A;
+ double db = __B;
+ result [0] = db;
+ return (__m128d)result;
+}
+
+/* Intel intrinsic. */
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtsi64_sd (__m128d __A, long long __B)
+{
+ __v2df result = (__v2df)__A;
+ double db = __B;
+ result [0] = db;
+ return (__m128d)result;
+}
+
+/* Microsoft intrinsic. */
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtsi64x_sd (__m128d __A, long long __B)
+{
+ return _mm_cvtsi64_sd (__A, __B);
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtss_sd (__m128d __A, __m128 __B)
+{
+#ifdef __LITTLE_ENDIAN__
+ /* Use splat to move element [0] into position for the convert. */
+ __v4sf temp = vec_splat ((__v4sf)__B, 0);
+ __v2df res;
+ /* Convert single float scalar to double in a vector. */
+ __asm__(
+ "xscvspdp %x0,%x1"
+ : "=wa" (res)
+ : "wa" (temp)
+ : );
+ return (__m128d) vec_mergel (res, (__v2df)__A);
+#else
+ __v2df res = (__v2df)__A;
+ res [0] = ((__v4sf)__B) [0];
+ return (__m128d) res;
+#endif
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_shuffle_pd(__m128d __A, __m128d __B, const int __mask)
+{
+ __vector double result;
+ const int litmsk = __mask & 0x3;
+
+ if (litmsk == 0)
+ result = vec_mergeh (__A, __B);
+#if __GNUC__ < 6
+ else if (litmsk == 1)
+ result = vec_xxpermdi (__B, __A, 2);
+ else if (litmsk == 2)
+ result = vec_xxpermdi (__B, __A, 1);
+#else
+ else if (litmsk == 1)
+ result = vec_xxpermdi (__A, __B, 2);
+ else if (litmsk == 2)
+ result = vec_xxpermdi (__A, __B, 1);
+#endif
+ else
+ result = vec_mergel (__A, __B);
+
+ return result;
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_unpackhi_pd (__m128d __A, __m128d __B)
+{
+ return (__m128d) vec_mergel ((__v2df)__A, (__v2df)__B);
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_unpacklo_pd (__m128d __A, __m128d __B)
+{
+ return (__m128d) vec_mergeh ((__v2df)__A, (__v2df)__B);
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_loadh_pd (__m128d __A, double const *__B)
+{
+ __v2df result = (__v2df)__A;
+ result [1] = *__B;
+ return (__m128d)result;
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_loadl_pd (__m128d __A, double const *__B)
+{
+ __v2df result = (__v2df)__A;
+ result [0] = *__B;
+ return (__m128d)result;
+}
+
+#ifdef _ARCH_PWR8
+/* Intrinsic functions that require PowerISA 2.07 minimum. */
+
+/* Creates a 2-bit mask from the most significant bits of the DPFP values. */
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_movemask_pd (__m128d __A)
+{
+ __vector unsigned long long result;
+ static const __vector unsigned int perm_mask =
+ {
+#ifdef __LITTLE_ENDIAN__
+ 0x80800040, 0x80808080, 0x80808080, 0x80808080
+#else
+ 0x80808080, 0x80808080, 0x80808080, 0x80804000
+#endif
+ };
+
+ result = ((__vector unsigned long long)
+ vec_vbpermq ((__vector unsigned char) __A,
+ (__vector unsigned char) perm_mask));
+
+#ifdef __LITTLE_ENDIAN__
+ return result[1];
+#else
+ return result[0];
+#endif
+}
+#endif /* _ARCH_PWR8 */
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_packs_epi16 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_packs ((__v8hi) __A, (__v8hi)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_packs_epi32 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_packs ((__v4si)__A, (__v4si)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_packus_epi16 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_packsu ((__v8hi) __A, (__v8hi)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_unpackhi_epi8 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_mergel ((__v16qu)__A, (__v16qu)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_unpackhi_epi16 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_mergel ((__v8hu)__A, (__v8hu)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_unpackhi_epi32 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_mergel ((__v4su)__A, (__v4su)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_unpackhi_epi64 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_mergel ((__vector long long) __A,
+ (__vector long long) __B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_unpacklo_epi8 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_mergeh ((__v16qu)__A, (__v16qu)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_unpacklo_epi16 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_mergeh ((__v8hi)__A, (__v8hi)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_unpacklo_epi32 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_mergeh ((__v4si)__A, (__v4si)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_unpacklo_epi64 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_mergeh ((__vector long long) __A,
+ (__vector long long) __B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_add_epi8 (__m128i __A, __m128i __B)
+{
+ return (__m128i) ((__v16qu)__A + (__v16qu)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_add_epi16 (__m128i __A, __m128i __B)
+{
+ return (__m128i) ((__v8hu)__A + (__v8hu)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_add_epi32 (__m128i __A, __m128i __B)
+{
+ return (__m128i) ((__v4su)__A + (__v4su)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_add_epi64 (__m128i __A, __m128i __B)
+{
+ return (__m128i) ((__v2du)__A + (__v2du)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_adds_epi8 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_adds ((__v16qi)__A, (__v16qi)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_adds_epi16 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_adds ((__v8hi)__A, (__v8hi)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_adds_epu8 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_adds ((__v16qu)__A, (__v16qu)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_adds_epu16 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_adds ((__v8hu)__A, (__v8hu)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_sub_epi8 (__m128i __A, __m128i __B)
+{
+ return (__m128i) ((__v16qu)__A - (__v16qu)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_sub_epi16 (__m128i __A, __m128i __B)
+{
+ return (__m128i) ((__v8hu)__A - (__v8hu)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_sub_epi32 (__m128i __A, __m128i __B)
+{
+ return (__m128i) ((__v4su)__A - (__v4su)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_sub_epi64 (__m128i __A, __m128i __B)
+{
+ return (__m128i) ((__v2du)__A - (__v2du)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_subs_epi8 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_subs ((__v16qi)__A, (__v16qi)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_subs_epi16 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_subs ((__v8hi)__A, (__v8hi)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_subs_epu8 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_subs ((__v16qu)__A, (__v16qu)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_subs_epu16 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_subs ((__v8hu)__A, (__v8hu)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_madd_epi16 (__m128i __A, __m128i __B)
+{
+ __vector signed int zero = {0, 0, 0, 0};
+
+ return (__m128i) vec_vmsumshm ((__v8hi)__A, (__v8hi)__B, zero);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mulhi_epi16 (__m128i __A, __m128i __B)
+{
+ __vector signed int w0, w1;
+
+ __vector unsigned char xform1 = {
+#ifdef __LITTLE_ENDIAN__
+ 0x02, 0x03, 0x12, 0x13, 0x06, 0x07, 0x16, 0x17,
+ 0x0A, 0x0B, 0x1A, 0x1B, 0x0E, 0x0F, 0x1E, 0x1F
+#else
+ 0x00, 0x01, 0x10, 0x11, 0x04, 0x05, 0x14, 0x15,
+ 0x08, 0x09, 0x18, 0x19, 0x0C, 0x0D, 0x1C, 0x1D
+#endif
+ };
+
+ w0 = vec_vmulesh ((__v8hi)__A, (__v8hi)__B);
+ w1 = vec_vmulosh ((__v8hi)__A, (__v8hi)__B);
+ return (__m128i) vec_perm (w0, w1, xform1);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mullo_epi16 (__m128i __A, __m128i __B)
+{
+ return (__m128i) ((__v8hi)__A * (__v8hi)__B);
+}
+
+extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mul_su32 (__m64 __A, __m64 __B)
+{
+ unsigned int a = __A;
+ unsigned int b = __B;
+
+ return ((__m64)a * (__m64)b);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mul_epu32 (__m128i __A, __m128i __B)
+{
+#if __GNUC__ < 8
+ __v2du result;
+
+#ifdef __LITTLE_ENDIAN__
+ /* VMX Vector Multiply Odd Unsigned Word. */
+ __asm__(
+ "vmulouw %0,%1,%2"
+ : "=v" (result)
+ : "v" (__A), "v" (__B)
+ : );
+#else
+ /* VMX Vector Multiply Even Unsigned Word. */
+ __asm__(
+ "vmuleuw %0,%1,%2"
+ : "=v" (result)
+ : "v" (__A), "v" (__B)
+ : );
+#endif
+ return (__m128i) result;
+#else
+ return (__m128i) vec_mule ((__v4su)__A, (__v4su)__B);
+#endif
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_slli_epi16 (__m128i __A, int __B)
+{
+ __v8hu lshift;
+ __v8hi result = { 0, 0, 0, 0, 0, 0, 0, 0 };
+
+ if (__B >= 0 && __B < 16)
+ {
+ if (__builtin_constant_p(__B))
+ lshift = (__v8hu) vec_splat_s16(__B);
+ else
+ lshift = vec_splats ((unsigned short) __B);
+
+ result = vec_sl ((__v8hi) __A, lshift);
+ }
+
+ return (__m128i) result;
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_slli_epi32 (__m128i __A, int __B)
+{
+ __v4su lshift;
+ __v4si result = { 0, 0, 0, 0 };
+
+ if (__B >= 0 && __B < 32)
+ {
+ if (__builtin_constant_p(__B) && __B < 16)
+ lshift = (__v4su) vec_splat_s32(__B);
+ else
+ lshift = vec_splats ((unsigned int) __B);
+
+ result = vec_sl ((__v4si) __A, lshift);
+ }
+
+ return (__m128i) result;
+}
+
+#ifdef _ARCH_PWR8
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_slli_epi64 (__m128i __A, int __B)
+{
+ __v2du lshift;
+ __v2di result = { 0, 0 };
+
+ if (__B >= 0 && __B < 64)
+ {
+ if (__builtin_constant_p(__B) && __B < 16)
+ lshift = (__v2du) vec_splat_s32(__B);
+ else
+ lshift = (__v2du) vec_splats ((unsigned int) __B);
+
+ result = vec_sl ((__v2di) __A, lshift);
+ }
+
+ return (__m128i) result;
+}
+#endif
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_srai_epi16 (__m128i __A, int __B)
+{
+ __v8hu rshift = { 15, 15, 15, 15, 15, 15, 15, 15 };
+ __v8hi result;
+
+ if (__B < 16)
+ {
+ if (__builtin_constant_p(__B))
+ rshift = (__v8hu) vec_splat_s16(__B);
+ else
+ rshift = vec_splats ((unsigned short) __B);
+ }
+ result = vec_sra ((__v8hi) __A, rshift);
+
+ return (__m128i) result;
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_srai_epi32 (__m128i __A, int __B)
+{
+ __v4su rshift = { 31, 31, 31, 31 };
+ __v4si result;
+
+ if (__B < 32)
+ {
+ if (__builtin_constant_p(__B))
+ {
+ if (__B < 16)
+ rshift = (__v4su) vec_splat_s32(__B);
+ else
+ rshift = (__v4su) vec_splats((unsigned int)__B);
+ }
+ else
+ rshift = vec_splats ((unsigned int) __B);
+ }
+ result = vec_sra ((__v4si) __A, rshift);
+
+ return (__m128i) result;
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_bslli_si128 (__m128i __A, const int __N)
+{
+ __v16qu result;
+ const __v16qu zeros = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+
+ if (__N < 16)
+ result = vec_sld ((__v16qu) __A, zeros, __N);
+ else
+ result = zeros;
+
+ return (__m128i) result;
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_bsrli_si128 (__m128i __A, const int __N)
+{
+ __v16qu result;
+ const __v16qu zeros = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+
+ if (__N < 16)
+#ifdef __LITTLE_ENDIAN__
+ if (__builtin_constant_p(__N))
+ /* Would like to use Vector Shift Left Double by Octet
+ Immediate here to use the immediate form and avoid
+ load of __N * 8 value into a separate VR. */
+ result = vec_sld (zeros, (__v16qu) __A, (16 - __N));
+ else
+#endif
+ {
+ __v16qu shift = vec_splats((unsigned char)(__N*8));
+#ifdef __LITTLE_ENDIAN__
+ result = vec_sro ((__v16qu)__A, shift);
+#else
+ result = vec_slo ((__v16qu)__A, shift);
+#endif
+ }
+ else
+ result = zeros;
+
+ return (__m128i) result;
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_srli_si128 (__m128i __A, const int __N)
+{
+ return _mm_bsrli_si128 (__A, __N);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_slli_si128 (__m128i __A, const int _imm5)
+{
+ __v16qu result;
+ const __v16qu zeros = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+
+ if (_imm5 < 16)
+#ifdef __LITTLE_ENDIAN__
+ result = vec_sld ((__v16qu) __A, zeros, _imm5);
+#else
+ result = vec_sld (zeros, (__v16qu) __A, (16 - _imm5));
+#endif
+ else
+ result = zeros;
+
+ return (__m128i) result;
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+
+_mm_srli_epi16 (__m128i __A, int __B)
+{
+ __v8hu rshift;
+ __v8hi result = { 0, 0, 0, 0, 0, 0, 0, 0 };
+
+ if (__B < 16)
+ {
+ if (__builtin_constant_p(__B))
+ rshift = (__v8hu) vec_splat_s16(__B);
+ else
+ rshift = vec_splats ((unsigned short) __B);
+
+ result = vec_sr ((__v8hi) __A, rshift);
+ }
+
+ return (__m128i) result;
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_srli_epi32 (__m128i __A, int __B)
+{
+ __v4su rshift;
+ __v4si result = { 0, 0, 0, 0 };
+
+ if (__B < 32)
+ {
+ if (__builtin_constant_p(__B))
+ {
+ if (__B < 16)
+ rshift = (__v4su) vec_splat_s32(__B);
+ else
+ rshift = (__v4su) vec_splats((unsigned int)__B);
+ }
+ else
+ rshift = vec_splats ((unsigned int) __B);
+
+ result = vec_sr ((__v4si) __A, rshift);
+ }
+
+ return (__m128i) result;
+}
+
+#ifdef _ARCH_PWR8
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_srli_epi64 (__m128i __A, int __B)
+{
+ __v2du rshift;
+ __v2di result = { 0, 0 };
+
+ if (__B < 64)
+ {
+ if (__builtin_constant_p(__B))
+ {
+ if (__B < 16)
+ rshift = (__v2du) vec_splat_s32(__B);
+ else
+ rshift = (__v2du) vec_splats((unsigned long long)__B);
+ }
+ else
+ rshift = (__v2du) vec_splats ((unsigned int) __B);
+
+ result = vec_sr ((__v2di) __A, rshift);
+ }
+
+ return (__m128i) result;
+}
+#endif
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_sll_epi16 (__m128i __A, __m128i __B)
+{
+ __v8hu lshift;
+ __vector __bool short shmask;
+ const __v8hu shmax = { 15, 15, 15, 15, 15, 15, 15, 15 };
+ __v8hu result;
+
+#ifdef __LITTLE_ENDIAN__
+ lshift = vec_splat ((__v8hu) __B, 0);
+#else
+ lshift = vec_splat ((__v8hu) __B, 3);
+#endif
+ shmask = vec_cmple (lshift, shmax);
+ result = vec_sl ((__v8hu) __A, lshift);
+ result = vec_sel ((__v8hu) shmask, result, shmask);
+
+ return (__m128i) result;
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_sll_epi32 (__m128i __A, __m128i __B)
+{
+ __v4su lshift;
+ __vector __bool int shmask;
+ const __v4su shmax = { 32, 32, 32, 32 };
+ __v4su result;
+#ifdef __LITTLE_ENDIAN__
+ lshift = vec_splat ((__v4su) __B, 0);
+#else
+ lshift = vec_splat ((__v4su) __B, 1);
+#endif
+ shmask = vec_cmplt (lshift, shmax);
+ result = vec_sl ((__v4su) __A, lshift);
+ result = vec_sel ((__v4su) shmask, result, shmask);
+
+ return (__m128i) result;
+}
+
+#ifdef _ARCH_PWR8
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_sll_epi64 (__m128i __A, __m128i __B)
+{
+ __v2du lshift;
+ __vector __bool long long shmask;
+ const __v2du shmax = { 64, 64 };
+ __v2du result;
+
+ lshift = vec_splat ((__v2du) __B, 0);
+ shmask = vec_cmplt (lshift, shmax);
+ result = vec_sl ((__v2du) __A, lshift);
+ result = vec_sel ((__v2du) shmask, result, shmask);
+
+ return (__m128i) result;
+}
+#endif
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_sra_epi16 (__m128i __A, __m128i __B)
+{
+ const __v8hu rshmax = { 15, 15, 15, 15, 15, 15, 15, 15 };
+ __v8hu rshift;
+ __v8hi result;
+
+#ifdef __LITTLE_ENDIAN__
+ rshift = vec_splat ((__v8hu)__B, 0);
+#else
+ rshift = vec_splat ((__v8hu)__B, 3);
+#endif
+ rshift = vec_min (rshift, rshmax);
+ result = vec_sra ((__v8hi) __A, rshift);
+
+ return (__m128i) result;
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_sra_epi32 (__m128i __A, __m128i __B)
+{
+ const __v4su rshmax = { 31, 31, 31, 31 };
+ __v4su rshift;
+ __v4si result;
+
+#ifdef __LITTLE_ENDIAN__
+ rshift = vec_splat ((__v4su)__B, 0);
+#else
+ rshift = vec_splat ((__v4su)__B, 1);
+#endif
+ rshift = vec_min (rshift, rshmax);
+ result = vec_sra ((__v4si) __A, rshift);
+
+ return (__m128i) result;
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_srl_epi16 (__m128i __A, __m128i __B)
+{
+ __v8hu rshift;
+ __vector __bool short shmask;
+ const __v8hu shmax = { 15, 15, 15, 15, 15, 15, 15, 15 };
+ __v8hu result;
+
+#ifdef __LITTLE_ENDIAN__
+ rshift = vec_splat ((__v8hu) __B, 0);
+#else
+ rshift = vec_splat ((__v8hu) __B, 3);
+#endif
+ shmask = vec_cmple (rshift, shmax);
+ result = vec_sr ((__v8hu) __A, rshift);
+ result = vec_sel ((__v8hu) shmask, result, shmask);
+
+ return (__m128i) result;
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_srl_epi32 (__m128i __A, __m128i __B)
+{
+ __v4su rshift;
+ __vector __bool int shmask;
+ const __v4su shmax = { 32, 32, 32, 32 };
+ __v4su result;
+
+#ifdef __LITTLE_ENDIAN__
+ rshift = vec_splat ((__v4su) __B, 0);
+#else
+ rshift = vec_splat ((__v4su) __B, 1);
+#endif
+ shmask = vec_cmplt (rshift, shmax);
+ result = vec_sr ((__v4su) __A, rshift);
+ result = vec_sel ((__v4su) shmask, result, shmask);
+
+ return (__m128i) result;
+}
+
+#ifdef _ARCH_PWR8
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_srl_epi64 (__m128i __A, __m128i __B)
+{
+ __v2du rshift;
+ __vector __bool long long shmask;
+ const __v2du shmax = { 64, 64 };
+ __v2du result;
+
+ rshift = vec_splat ((__v2du) __B, 0);
+ shmask = vec_cmplt (rshift, shmax);
+ result = vec_sr ((__v2du) __A, rshift);
+ result = vec_sel ((__v2du) shmask, result, shmask);
+
+ return (__m128i) result;
+}
+#endif
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_and_pd (__m128d __A, __m128d __B)
+{
+ return (vec_and ((__v2df) __A, (__v2df) __B));
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_andnot_pd (__m128d __A, __m128d __B)
+{
+ return (vec_andc ((__v2df) __B, (__v2df) __A));
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_or_pd (__m128d __A, __m128d __B)
+{
+ return (vec_or ((__v2df) __A, (__v2df) __B));
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_xor_pd (__m128d __A, __m128d __B)
+{
+ return (vec_xor ((__v2df) __A, (__v2df) __B));
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_and_si128 (__m128i __A, __m128i __B)
+{
+ return (__m128i)vec_and ((__v2di) __A, (__v2di) __B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_andnot_si128 (__m128i __A, __m128i __B)
+{
+ return (__m128i)vec_andc ((__v2di) __B, (__v2di) __A);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_or_si128 (__m128i __A, __m128i __B)
+{
+ return (__m128i)vec_or ((__v2di) __A, (__v2di) __B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_xor_si128 (__m128i __A, __m128i __B)
+{
+ return (__m128i)vec_xor ((__v2di) __A, (__v2di) __B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpeq_epi8 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_cmpeq ((__v16qi) __A, (__v16qi)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpeq_epi16 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_cmpeq ((__v8hi) __A, (__v8hi)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpeq_epi32 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_cmpeq ((__v4si) __A, (__v4si)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmplt_epi8 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_cmplt ((__v16qi) __A, (__v16qi)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmplt_epi16 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_cmplt ((__v8hi) __A, (__v8hi)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmplt_epi32 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_cmplt ((__v4si) __A, (__v4si)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpgt_epi8 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_cmpgt ((__v16qi) __A, (__v16qi)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpgt_epi16 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_cmpgt ((__v8hi) __A, (__v8hi)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpgt_epi32 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_cmpgt ((__v4si) __A, (__v4si)__B);
+}
+
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_extract_epi16 (__m128i const __A, int const __N)
+{
+ return (unsigned short) ((__v8hi)__A)[__N & 7];
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_insert_epi16 (__m128i const __A, int const __D, int const __N)
+{
+ __v8hi result = (__v8hi)__A;
+
+ result [(__N & 7)] = __D;
+
+ return (__m128i) result;
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_max_epi16 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_max ((__v8hi)__A, (__v8hi)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_max_epu8 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_max ((__v16qu) __A, (__v16qu)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_min_epi16 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_min ((__v8hi) __A, (__v8hi)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_min_epu8 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_min ((__v16qu) __A, (__v16qu)__B);
+}
+
+
+#ifdef _ARCH_PWR8
+/* Intrinsic functions that require PowerISA 2.07 minimum. */
+
+/* Creates a 4-bit mask from the most significant bits of the SPFP values. */
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_movemask_epi8 (__m128i __A)
+{
+ __vector unsigned long long result;
+ static const __vector unsigned char perm_mask =
+ {
+ 0x78, 0x70, 0x68, 0x60, 0x58, 0x50, 0x48, 0x40,
+ 0x38, 0x30, 0x28, 0x20, 0x18, 0x10, 0x08, 0x00
+ };
+
+ result = ((__vector unsigned long long)
+ vec_vbpermq ((__vector unsigned char) __A,
+ (__vector unsigned char) perm_mask));
+
+#ifdef __LITTLE_ENDIAN__
+ return result[1];
+#else
+ return result[0];
+#endif
+}
+#endif /* _ARCH_PWR8 */
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mulhi_epu16 (__m128i __A, __m128i __B)
+{
+ __v4su w0, w1;
+ __v16qu xform1 = {
+#ifdef __LITTLE_ENDIAN__
+ 0x02, 0x03, 0x12, 0x13, 0x06, 0x07, 0x16, 0x17,
+ 0x0A, 0x0B, 0x1A, 0x1B, 0x0E, 0x0F, 0x1E, 0x1F
+#else
+ 0x00, 0x01, 0x10, 0x11, 0x04, 0x05, 0x14, 0x15,
+ 0x08, 0x09, 0x18, 0x19, 0x0C, 0x0D, 0x1C, 0x1D
+#endif
+ };
+
+ w0 = vec_vmuleuh ((__v8hu)__A, (__v8hu)__B);
+ w1 = vec_vmulouh ((__v8hu)__A, (__v8hu)__B);
+ return (__m128i) vec_perm (w0, w1, xform1);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_shufflehi_epi16 (__m128i __A, const int __mask)
+{
+ unsigned long element_selector_98 = __mask & 0x03;
+ unsigned long element_selector_BA = (__mask >> 2) & 0x03;
+ unsigned long element_selector_DC = (__mask >> 4) & 0x03;
+ unsigned long element_selector_FE = (__mask >> 6) & 0x03;
+ static const unsigned short permute_selectors[4] =
+ {
+#ifdef __LITTLE_ENDIAN__
+ 0x0908, 0x0B0A, 0x0D0C, 0x0F0E
+#else
+ 0x0809, 0x0A0B, 0x0C0D, 0x0E0F
+#endif
+ };
+ __v2du pmask =
+#ifdef __LITTLE_ENDIAN__
+ { 0x1716151413121110UL, 0UL};
+#else
+ { 0x1011121314151617UL, 0UL};
+#endif
+ __m64_union t;
+ __v2du a, r;
+
+ t.as_short[0] = permute_selectors[element_selector_98];
+ t.as_short[1] = permute_selectors[element_selector_BA];
+ t.as_short[2] = permute_selectors[element_selector_DC];
+ t.as_short[3] = permute_selectors[element_selector_FE];
+ pmask[1] = t.as_m64;
+ a = (__v2du)__A;
+ r = vec_perm (a, a, (__vector unsigned char)pmask);
+ return (__m128i) r;
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_shufflelo_epi16 (__m128i __A, const int __mask)
+{
+ unsigned long element_selector_10 = __mask & 0x03;
+ unsigned long element_selector_32 = (__mask >> 2) & 0x03;
+ unsigned long element_selector_54 = (__mask >> 4) & 0x03;
+ unsigned long element_selector_76 = (__mask >> 6) & 0x03;
+ static const unsigned short permute_selectors[4] =
+ {
+#ifdef __LITTLE_ENDIAN__
+ 0x0100, 0x0302, 0x0504, 0x0706
+#else
+ 0x0001, 0x0203, 0x0405, 0x0607
+#endif
+ };
+ __v2du pmask =
+#ifdef __LITTLE_ENDIAN__
+ { 0UL, 0x1f1e1d1c1b1a1918UL};
+#else
+ { 0UL, 0x18191a1b1c1d1e1fUL};
+#endif
+ __m64_union t;
+ __v2du a, r;
+ t.as_short[0] = permute_selectors[element_selector_10];
+ t.as_short[1] = permute_selectors[element_selector_32];
+ t.as_short[2] = permute_selectors[element_selector_54];
+ t.as_short[3] = permute_selectors[element_selector_76];
+ pmask[0] = t.as_m64;
+ a = (__v2du)__A;
+ r = vec_perm (a, a, (__vector unsigned char)pmask);
+ return (__m128i) r;
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_shuffle_epi32 (__m128i __A, const int __mask)
+{
+ unsigned long element_selector_10 = __mask & 0x03;
+ unsigned long element_selector_32 = (__mask >> 2) & 0x03;
+ unsigned long element_selector_54 = (__mask >> 4) & 0x03;
+ unsigned long element_selector_76 = (__mask >> 6) & 0x03;
+ static const unsigned int permute_selectors[4] =
+ {
+#ifdef __LITTLE_ENDIAN__
+ 0x03020100, 0x07060504, 0x0B0A0908, 0x0F0E0D0C
+#else
+ 0x00010203, 0x04050607, 0x08090A0B, 0x0C0D0E0F
+#endif
+ };
+ __v4su t;
+
+ t[0] = permute_selectors[element_selector_10];
+ t[1] = permute_selectors[element_selector_32];
+ t[2] = permute_selectors[element_selector_54] + 0x10101010;
+ t[3] = permute_selectors[element_selector_76] + 0x10101010;
+ return (__m128i)vec_perm ((__v4si) __A, (__v4si)__A, (__vector unsigned char)t);
+}
+
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskmoveu_si128 (__m128i __A, __m128i __B, char *__C)
+{
+ __v2du hibit = { 0x7f7f7f7f7f7f7f7fUL, 0x7f7f7f7f7f7f7f7fUL};
+ __v16qu mask, tmp;
+ __m128i_u *p = (__m128i_u*)__C;
+
+ tmp = (__v16qu)_mm_loadu_si128(p);
+ mask = (__v16qu)vec_cmpgt ((__v16qu)__B, (__v16qu)hibit);
+ tmp = vec_sel (tmp, (__v16qu)__A, mask);
+ _mm_storeu_si128 (p, (__m128i)tmp);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_avg_epu8 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_avg ((__v16qu)__A, (__v16qu)__B);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_avg_epu16 (__m128i __A, __m128i __B)
+{
+ return (__m128i) vec_avg ((__v8hu)__A, (__v8hu)__B);
+}
+
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_sad_epu8 (__m128i __A, __m128i __B)
+{
+ __v16qu a, b;
+ __v16qu vmin, vmax, vabsdiff;
+ __v4si vsum;
+ const __v4su zero = { 0, 0, 0, 0 };
+ __v4si result;
+
+ a = (__v16qu) __A;
+ b = (__v16qu) __B;
+ vmin = vec_min (a, b);
+ vmax = vec_max (a, b);
+ vabsdiff = vec_sub (vmax, vmin);
+ /* Sum four groups of bytes into integers. */
+ vsum = (__vector signed int) vec_sum4s (vabsdiff, zero);
+ /* Sum across four integers with two integer results. */
+ result = vec_sum2s (vsum, (__vector signed int) zero);
+ /* Rotate the sums into the correct position. */
+#ifdef __LITTLE_ENDIAN__
+ result = vec_sld (result, result, 4);
+#else
+ result = vec_sld (result, result, 6);
+#endif
+ /* Rotate the sums into the correct position. */
+ return (__m128i) result;
+}
+
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_stream_si32 (int *__A, int __B)
+{
+ /* Use the data cache block touch for store transient. */
+ __asm__ (
+ "dcbtstt 0,%0"
+ :
+ : "b" (__A)
+ : "memory"
+ );
+ *__A = __B;
+}
+
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_stream_si64 (long long int *__A, long long int __B)
+{
+ /* Use the data cache block touch for store transient. */
+ __asm__ (
+ " dcbtstt 0,%0"
+ :
+ : "b" (__A)
+ : "memory"
+ );
+ *__A = __B;
+}
+
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_stream_si128 (__m128i *__A, __m128i __B)
+{
+ /* Use the data cache block touch for store transient. */
+ __asm__ (
+ "dcbtstt 0,%0"
+ :
+ : "b" (__A)
+ : "memory"
+ );
+ *__A = __B;
+}
+
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_stream_pd (double *__A, __m128d __B)
+{
+ /* Use the data cache block touch for store transient. */
+ __asm__ (
+ "dcbtstt 0,%0"
+ :
+ : "b" (__A)
+ : "memory"
+ );
+ *(__m128d*)__A = __B;
+}
+
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_clflush (void const *__A)
+{
+ /* Use the data cache block flush. */
+ __asm__ (
+ "dcbf 0,%0"
+ :
+ : "b" (__A)
+ : "memory"
+ );
+}
+
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_lfence (void)
+{
+ /* Use light weight sync for load to load ordering. */
+ __atomic_thread_fence (__ATOMIC_RELEASE);
+}
+
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mfence (void)
+{
+ /* Use heavy weight sync for any to any ordering. */
+ __atomic_thread_fence (__ATOMIC_SEQ_CST);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtsi32_si128 (int __A)
+{
+ return _mm_set_epi32 (0, 0, 0, __A);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtsi64_si128 (long long __A)
+{
+ return __extension__ (__m128i)(__v2di){ __A, 0LL };
+}
+
+/* Microsoft intrinsic. */
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtsi64x_si128 (long long __A)
+{
+ return __extension__ (__m128i)(__v2di){ __A, 0LL };
+}
+
+/* Casts between various SP, DP, INT vector types. Note that these do no
+ conversion of values, they just change the type. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_castpd_ps(__m128d __A)
+{
+ return (__m128) __A;
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_castpd_si128(__m128d __A)
+{
+ return (__m128i) __A;
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_castps_pd(__m128 __A)
+{
+ return (__m128d) __A;
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_castps_si128(__m128 __A)
+{
+ return (__m128i) __A;
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_castsi128_ps(__m128i __A)
+{
+ return (__m128) __A;
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_castsi128_pd(__m128i __A)
+{
+ return (__m128d) __A;
+}
+
+#endif /* EMMINTRIN_H_ */
diff --git a/lib/Headers/ppc_wrappers/mm_malloc.h b/lib/Headers/ppc_wrappers/mm_malloc.h
new file mode 100644
index 000000000000..d91d7865c893
--- /dev/null
+++ b/lib/Headers/ppc_wrappers/mm_malloc.h
@@ -0,0 +1,44 @@
+/*===---- mm_malloc.h - Implementation of _mm_malloc and _mm_free ----------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef _MM_MALLOC_H_INCLUDED
+#define _MM_MALLOC_H_INCLUDED
+
+#include <stdlib.h>
+
+/* We can't depend on <stdlib.h> since the prototype of posix_memalign
+ may not be visible. */
+#ifndef __cplusplus
+extern int posix_memalign (void **, size_t, size_t);
+#else
+extern "C" int posix_memalign (void **, size_t, size_t) throw ();
+#endif
+
+static __inline void *
+_mm_malloc (size_t size, size_t alignment)
+{
+ /* PowerPC64 ELF V2 ABI requires quadword alignment. */
+ size_t vec_align = sizeof (__vector float);
+ void *ptr;
+
+ if (alignment < vec_align)
+ alignment = vec_align;
+ if (posix_memalign (&ptr, alignment, size) == 0)
+ return ptr;
+ else
+ return NULL;
+}
+
+static __inline void
+_mm_free (void * ptr)
+{
+ free (ptr);
+}
+
+#endif /* _MM_MALLOC_H_INCLUDED */
diff --git a/lib/Headers/ppc_wrappers/mmintrin.h b/lib/Headers/ppc_wrappers/mmintrin.h
new file mode 100644
index 000000000000..b949653adf5a
--- /dev/null
+++ b/lib/Headers/ppc_wrappers/mmintrin.h
@@ -0,0 +1,1443 @@
+/*===---- mmintrin.h - Implementation of MMX intrinsics on PowerPC ---------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+/* Implemented from the specification included in the Intel C++ Compiler
+ User Guide and Reference, version 9.0. */
+
+#ifndef NO_WARN_X86_INTRINSICS
+/* This header file is to help porting code using Intel intrinsics
+ explicitly from x86_64 to powerpc64/powerpc64le.
+
+ Since PowerPC target doesn't support native 64-bit vector type, we
+ typedef __m64 to 64-bit unsigned long long in MMX intrinsics, which
+ works well for _si64 and some _pi32 operations.
+
+ For _pi16 and _pi8 operations, it's better to transfer __m64 into
+ 128-bit PowerPC vector first. Power8 introduced direct register
+ move instructions which helps for more efficient implementation.
+
+ It's user's responsibility to determine if the results of such port
+ are acceptable or further changes are needed. Please note that much
+ code using Intel intrinsics CAN BE REWRITTEN in more portable and
+ efficient standard C or GNU C extensions with 64-bit scalar
+ operations, or 128-bit SSE/Altivec operations, which are more
+ recommended. */
+#error \
+ "Please read comment above. Use -DNO_WARN_X86_INTRINSICS to disable this error."
+#endif
+
+#ifndef _MMINTRIN_H_INCLUDED
+#define _MMINTRIN_H_INCLUDED
+
+#include <altivec.h>
+/* The Intel API is flexible enough that we must allow aliasing with other
+ vector types, and their scalar components. */
+typedef __attribute__((__aligned__(8))) unsigned long long __m64;
+
+typedef __attribute__((__aligned__(8))) union {
+ __m64 as_m64;
+ char as_char[8];
+ signed char as_signed_char[8];
+ short as_short[4];
+ int as_int[2];
+ long long as_long_long;
+ float as_float[2];
+ double as_double;
+} __m64_union;
+
+/* Empty the multimedia state. */
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_empty(void) {
+ /* nothing to do on PowerPC. */
+}
+
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_empty(void) {
+ /* nothing to do on PowerPC. */
+}
+
+/* Convert I to a __m64 object. The integer is zero-extended to 64-bits. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtsi32_si64(int __i) {
+ return (__m64)(unsigned int)__i;
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_from_int(int __i) {
+ return _mm_cvtsi32_si64(__i);
+}
+
+/* Convert the lower 32 bits of the __m64 object into an integer. */
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtsi64_si32(__m64 __i) {
+ return ((int)__i);
+}
+
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_to_int(__m64 __i) {
+ return _mm_cvtsi64_si32(__i);
+}
+
+/* Convert I to a __m64 object. */
+
+/* Intel intrinsic. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_from_int64(long long __i) {
+ return (__m64)__i;
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtsi64_m64(long long __i) {
+ return (__m64)__i;
+}
+
+/* Microsoft intrinsic. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtsi64x_si64(long long __i) {
+ return (__m64)__i;
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_set_pi64x(long long __i) {
+ return (__m64)__i;
+}
+
+/* Convert the __m64 object to a 64bit integer. */
+
+/* Intel intrinsic. */
+extern __inline long long
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_to_int64(__m64 __i) {
+ return (long long)__i;
+}
+
+extern __inline long long
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtm64_si64(__m64 __i) {
+ return (long long)__i;
+}
+
+/* Microsoft intrinsic. */
+extern __inline long long
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtsi64_si64x(__m64 __i) {
+ return (long long)__i;
+}
+
+#ifdef _ARCH_PWR8
+/* Pack the four 16-bit values from M1 into the lower four 8-bit values of
+ the result, and the four 16-bit values from M2 into the upper four 8-bit
+ values of the result, all with signed saturation. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_packs_pi16(__m64 __m1, __m64 __m2) {
+ __vector signed short vm1;
+ __vector signed char vresult;
+
+ vm1 = (__vector signed short)(__vector unsigned long long)
+#ifdef __LITTLE_ENDIAN__
+ {__m1, __m2};
+#else
+ {__m2, __m1};
+#endif
+ vresult = vec_packs(vm1, vm1);
+ return (__m64)((__vector long long)vresult)[0];
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_packsswb(__m64 __m1, __m64 __m2) {
+ return _mm_packs_pi16(__m1, __m2);
+}
+
+/* Pack the two 32-bit values from M1 in to the lower two 16-bit values of
+ the result, and the two 32-bit values from M2 into the upper two 16-bit
+ values of the result, all with signed saturation. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_packs_pi32(__m64 __m1, __m64 __m2) {
+ __vector signed int vm1;
+ __vector signed short vresult;
+
+ vm1 = (__vector signed int)(__vector unsigned long long)
+#ifdef __LITTLE_ENDIAN__
+ {__m1, __m2};
+#else
+ {__m2, __m1};
+#endif
+ vresult = vec_packs(vm1, vm1);
+ return (__m64)((__vector long long)vresult)[0];
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_packssdw(__m64 __m1, __m64 __m2) {
+ return _mm_packs_pi32(__m1, __m2);
+}
+
+/* Pack the four 16-bit values from M1 into the lower four 8-bit values of
+ the result, and the four 16-bit values from M2 into the upper four 8-bit
+ values of the result, all with unsigned saturation. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_packs_pu16(__m64 __m1, __m64 __m2) {
+ __vector unsigned char r;
+ __vector signed short vm1 = (__vector signed short)(__vector long long)
+#ifdef __LITTLE_ENDIAN__
+ {__m1, __m2};
+#else
+ {__m2, __m1};
+#endif
+ const __vector signed short __zero = {0};
+ __vector __bool short __select = vec_cmplt(vm1, __zero);
+ r = vec_packs((__vector unsigned short)vm1, (__vector unsigned short)vm1);
+ __vector __bool char packsel = vec_pack(__select, __select);
+ r = vec_sel(r, (const __vector unsigned char)__zero, packsel);
+ return (__m64)((__vector long long)r)[0];
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_packuswb(__m64 __m1, __m64 __m2) {
+ return _mm_packs_pu16(__m1, __m2);
+}
+#endif /* end ARCH_PWR8 */
+
+/* Interleave the four 8-bit values from the high half of M1 with the four
+ 8-bit values from the high half of M2. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_unpackhi_pi8(__m64 __m1, __m64 __m2) {
+#if _ARCH_PWR8
+ __vector unsigned char a, b, c;
+
+ a = (__vector unsigned char)vec_splats(__m1);
+ b = (__vector unsigned char)vec_splats(__m2);
+ c = vec_mergel(a, b);
+ return (__m64)((__vector long long)c)[1];
+#else
+ __m64_union m1, m2, res;
+
+ m1.as_m64 = __m1;
+ m2.as_m64 = __m2;
+
+ res.as_char[0] = m1.as_char[4];
+ res.as_char[1] = m2.as_char[4];
+ res.as_char[2] = m1.as_char[5];
+ res.as_char[3] = m2.as_char[5];
+ res.as_char[4] = m1.as_char[6];
+ res.as_char[5] = m2.as_char[6];
+ res.as_char[6] = m1.as_char[7];
+ res.as_char[7] = m2.as_char[7];
+
+ return (__m64)res.as_m64;
+#endif
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_punpckhbw(__m64 __m1, __m64 __m2) {
+ return _mm_unpackhi_pi8(__m1, __m2);
+}
+
+/* Interleave the two 16-bit values from the high half of M1 with the two
+ 16-bit values from the high half of M2. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_unpackhi_pi16(__m64 __m1, __m64 __m2) {
+ __m64_union m1, m2, res;
+
+ m1.as_m64 = __m1;
+ m2.as_m64 = __m2;
+
+ res.as_short[0] = m1.as_short[2];
+ res.as_short[1] = m2.as_short[2];
+ res.as_short[2] = m1.as_short[3];
+ res.as_short[3] = m2.as_short[3];
+
+ return (__m64)res.as_m64;
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_punpckhwd(__m64 __m1, __m64 __m2) {
+ return _mm_unpackhi_pi16(__m1, __m2);
+}
+/* Interleave the 32-bit value from the high half of M1 with the 32-bit
+ value from the high half of M2. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_unpackhi_pi32(__m64 __m1, __m64 __m2) {
+ __m64_union m1, m2, res;
+
+ m1.as_m64 = __m1;
+ m2.as_m64 = __m2;
+
+ res.as_int[0] = m1.as_int[1];
+ res.as_int[1] = m2.as_int[1];
+
+ return (__m64)res.as_m64;
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_punpckhdq(__m64 __m1, __m64 __m2) {
+ return _mm_unpackhi_pi32(__m1, __m2);
+}
+/* Interleave the four 8-bit values from the low half of M1 with the four
+ 8-bit values from the low half of M2. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_unpacklo_pi8(__m64 __m1, __m64 __m2) {
+#if _ARCH_PWR8
+ __vector unsigned char a, b, c;
+
+ a = (__vector unsigned char)vec_splats(__m1);
+ b = (__vector unsigned char)vec_splats(__m2);
+ c = vec_mergel(a, b);
+ return (__m64)((__vector long long)c)[0];
+#else
+ __m64_union m1, m2, res;
+
+ m1.as_m64 = __m1;
+ m2.as_m64 = __m2;
+
+ res.as_char[0] = m1.as_char[0];
+ res.as_char[1] = m2.as_char[0];
+ res.as_char[2] = m1.as_char[1];
+ res.as_char[3] = m2.as_char[1];
+ res.as_char[4] = m1.as_char[2];
+ res.as_char[5] = m2.as_char[2];
+ res.as_char[6] = m1.as_char[3];
+ res.as_char[7] = m2.as_char[3];
+
+ return (__m64)res.as_m64;
+#endif
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_punpcklbw(__m64 __m1, __m64 __m2) {
+ return _mm_unpacklo_pi8(__m1, __m2);
+}
+/* Interleave the two 16-bit values from the low half of M1 with the two
+ 16-bit values from the low half of M2. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_unpacklo_pi16(__m64 __m1, __m64 __m2) {
+ __m64_union m1, m2, res;
+
+ m1.as_m64 = __m1;
+ m2.as_m64 = __m2;
+
+ res.as_short[0] = m1.as_short[0];
+ res.as_short[1] = m2.as_short[0];
+ res.as_short[2] = m1.as_short[1];
+ res.as_short[3] = m2.as_short[1];
+
+ return (__m64)res.as_m64;
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_punpcklwd(__m64 __m1, __m64 __m2) {
+ return _mm_unpacklo_pi16(__m1, __m2);
+}
+
+/* Interleave the 32-bit value from the low half of M1 with the 32-bit
+ value from the low half of M2. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_unpacklo_pi32(__m64 __m1, __m64 __m2) {
+ __m64_union m1, m2, res;
+
+ m1.as_m64 = __m1;
+ m2.as_m64 = __m2;
+
+ res.as_int[0] = m1.as_int[0];
+ res.as_int[1] = m2.as_int[0];
+
+ return (__m64)res.as_m64;
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_punpckldq(__m64 __m1, __m64 __m2) {
+ return _mm_unpacklo_pi32(__m1, __m2);
+}
+
+/* Add the 8-bit values in M1 to the 8-bit values in M2. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_add_pi8(__m64 __m1, __m64 __m2) {
+#if _ARCH_PWR8
+ __vector signed char a, b, c;
+
+ a = (__vector signed char)vec_splats(__m1);
+ b = (__vector signed char)vec_splats(__m2);
+ c = vec_add(a, b);
+ return (__m64)((__vector long long)c)[0];
+#else
+ __m64_union m1, m2, res;
+
+ m1.as_m64 = __m1;
+ m2.as_m64 = __m2;
+
+ res.as_char[0] = m1.as_char[0] + m2.as_char[0];
+ res.as_char[1] = m1.as_char[1] + m2.as_char[1];
+ res.as_char[2] = m1.as_char[2] + m2.as_char[2];
+ res.as_char[3] = m1.as_char[3] + m2.as_char[3];
+ res.as_char[4] = m1.as_char[4] + m2.as_char[4];
+ res.as_char[5] = m1.as_char[5] + m2.as_char[5];
+ res.as_char[6] = m1.as_char[6] + m2.as_char[6];
+ res.as_char[7] = m1.as_char[7] + m2.as_char[7];
+
+ return (__m64)res.as_m64;
+#endif
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_paddb(__m64 __m1, __m64 __m2) {
+ return _mm_add_pi8(__m1, __m2);
+}
+
+/* Add the 16-bit values in M1 to the 16-bit values in M2. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_add_pi16(__m64 __m1, __m64 __m2) {
+#if _ARCH_PWR8
+ __vector signed short a, b, c;
+
+ a = (__vector signed short)vec_splats(__m1);
+ b = (__vector signed short)vec_splats(__m2);
+ c = vec_add(a, b);
+ return (__m64)((__vector long long)c)[0];
+#else
+ __m64_union m1, m2, res;
+
+ m1.as_m64 = __m1;
+ m2.as_m64 = __m2;
+
+ res.as_short[0] = m1.as_short[0] + m2.as_short[0];
+ res.as_short[1] = m1.as_short[1] + m2.as_short[1];
+ res.as_short[2] = m1.as_short[2] + m2.as_short[2];
+ res.as_short[3] = m1.as_short[3] + m2.as_short[3];
+
+ return (__m64)res.as_m64;
+#endif
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_paddw(__m64 __m1, __m64 __m2) {
+ return _mm_add_pi16(__m1, __m2);
+}
+
+/* Add the 32-bit values in M1 to the 32-bit values in M2. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_add_pi32(__m64 __m1, __m64 __m2) {
+#if _ARCH_PWR9
+ __vector signed int a, b, c;
+
+ a = (__vector signed int)vec_splats(__m1);
+ b = (__vector signed int)vec_splats(__m2);
+ c = vec_add(a, b);
+ return (__m64)((__vector long long)c)[0];
+#else
+ __m64_union m1, m2, res;
+
+ m1.as_m64 = __m1;
+ m2.as_m64 = __m2;
+
+ res.as_int[0] = m1.as_int[0] + m2.as_int[0];
+ res.as_int[1] = m1.as_int[1] + m2.as_int[1];
+
+ return (__m64)res.as_m64;
+#endif
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_paddd(__m64 __m1, __m64 __m2) {
+ return _mm_add_pi32(__m1, __m2);
+}
+
+/* Subtract the 8-bit values in M2 from the 8-bit values in M1. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sub_pi8(__m64 __m1, __m64 __m2) {
+#if _ARCH_PWR8
+ __vector signed char a, b, c;
+
+ a = (__vector signed char)vec_splats(__m1);
+ b = (__vector signed char)vec_splats(__m2);
+ c = vec_sub(a, b);
+ return (__m64)((__vector long long)c)[0];
+#else
+ __m64_union m1, m2, res;
+
+ m1.as_m64 = __m1;
+ m2.as_m64 = __m2;
+
+ res.as_char[0] = m1.as_char[0] - m2.as_char[0];
+ res.as_char[1] = m1.as_char[1] - m2.as_char[1];
+ res.as_char[2] = m1.as_char[2] - m2.as_char[2];
+ res.as_char[3] = m1.as_char[3] - m2.as_char[3];
+ res.as_char[4] = m1.as_char[4] - m2.as_char[4];
+ res.as_char[5] = m1.as_char[5] - m2.as_char[5];
+ res.as_char[6] = m1.as_char[6] - m2.as_char[6];
+ res.as_char[7] = m1.as_char[7] - m2.as_char[7];
+
+ return (__m64)res.as_m64;
+#endif
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_psubb(__m64 __m1, __m64 __m2) {
+ return _mm_sub_pi8(__m1, __m2);
+}
+
+/* Subtract the 16-bit values in M2 from the 16-bit values in M1. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sub_pi16(__m64 __m1, __m64 __m2) {
+#if _ARCH_PWR8
+ __vector signed short a, b, c;
+
+ a = (__vector signed short)vec_splats(__m1);
+ b = (__vector signed short)vec_splats(__m2);
+ c = vec_sub(a, b);
+ return (__m64)((__vector long long)c)[0];
+#else
+ __m64_union m1, m2, res;
+
+ m1.as_m64 = __m1;
+ m2.as_m64 = __m2;
+
+ res.as_short[0] = m1.as_short[0] - m2.as_short[0];
+ res.as_short[1] = m1.as_short[1] - m2.as_short[1];
+ res.as_short[2] = m1.as_short[2] - m2.as_short[2];
+ res.as_short[3] = m1.as_short[3] - m2.as_short[3];
+
+ return (__m64)res.as_m64;
+#endif
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_psubw(__m64 __m1, __m64 __m2) {
+ return _mm_sub_pi16(__m1, __m2);
+}
+
+/* Subtract the 32-bit values in M2 from the 32-bit values in M1. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sub_pi32(__m64 __m1, __m64 __m2) {
+#if _ARCH_PWR9
+ __vector signed int a, b, c;
+
+ a = (__vector signed int)vec_splats(__m1);
+ b = (__vector signed int)vec_splats(__m2);
+ c = vec_sub(a, b);
+ return (__m64)((__vector long long)c)[0];
+#else
+ __m64_union m1, m2, res;
+
+ m1.as_m64 = __m1;
+ m2.as_m64 = __m2;
+
+ res.as_int[0] = m1.as_int[0] - m2.as_int[0];
+ res.as_int[1] = m1.as_int[1] - m2.as_int[1];
+
+ return (__m64)res.as_m64;
+#endif
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_psubd(__m64 __m1, __m64 __m2) {
+ return _mm_sub_pi32(__m1, __m2);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_add_si64(__m64 __m1, __m64 __m2) {
+ return (__m1 + __m2);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sub_si64(__m64 __m1, __m64 __m2) {
+ return (__m1 - __m2);
+}
+
+/* Shift the 64-bit value in M left by COUNT. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sll_si64(__m64 __m, __m64 __count) {
+ return (__m << __count);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_psllq(__m64 __m, __m64 __count) {
+ return _mm_sll_si64(__m, __count);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_slli_si64(__m64 __m, const int __count) {
+ return (__m << __count);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_psllqi(__m64 __m, const int __count) {
+ return _mm_slli_si64(__m, __count);
+}
+
+/* Shift the 64-bit value in M left by COUNT; shift in zeros. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_srl_si64(__m64 __m, __m64 __count) {
+ return (__m >> __count);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_psrlq(__m64 __m, __m64 __count) {
+ return _mm_srl_si64(__m, __count);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_srli_si64(__m64 __m, const int __count) {
+ return (__m >> __count);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_psrlqi(__m64 __m, const int __count) {
+ return _mm_srli_si64(__m, __count);
+}
+
+/* Bit-wise AND the 64-bit values in M1 and M2. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_and_si64(__m64 __m1, __m64 __m2) {
+ return (__m1 & __m2);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_pand(__m64 __m1, __m64 __m2) {
+ return _mm_and_si64(__m1, __m2);
+}
+
+/* Bit-wise complement the 64-bit value in M1 and bit-wise AND it with the
+ 64-bit value in M2. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_andnot_si64(__m64 __m1, __m64 __m2) {
+ return (~__m1 & __m2);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_pandn(__m64 __m1, __m64 __m2) {
+ return _mm_andnot_si64(__m1, __m2);
+}
+
+/* Bit-wise inclusive OR the 64-bit values in M1 and M2. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_or_si64(__m64 __m1, __m64 __m2) {
+ return (__m1 | __m2);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_por(__m64 __m1, __m64 __m2) {
+ return _mm_or_si64(__m1, __m2);
+}
+
+/* Bit-wise exclusive OR the 64-bit values in M1 and M2. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_xor_si64(__m64 __m1, __m64 __m2) {
+ return (__m1 ^ __m2);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_pxor(__m64 __m1, __m64 __m2) {
+ return _mm_xor_si64(__m1, __m2);
+}
+
+/* Creates a 64-bit zero. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_setzero_si64(void) {
+ return (__m64)0;
+}
+
+/* Compare eight 8-bit values. The result of the comparison is 0xFF if the
+ test is true and zero if false. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpeq_pi8(__m64 __m1, __m64 __m2) {
+#if defined(_ARCH_PWR6) && defined(__powerpc64__)
+ __m64 res;
+ __asm__("cmpb %0,%1,%2;\n" : "=r"(res) : "r"(__m1), "r"(__m2) :);
+ return (res);
+#else
+ __m64_union m1, m2, res;
+
+ m1.as_m64 = __m1;
+ m2.as_m64 = __m2;
+
+ res.as_char[0] = (m1.as_char[0] == m2.as_char[0]) ? -1 : 0;
+ res.as_char[1] = (m1.as_char[1] == m2.as_char[1]) ? -1 : 0;
+ res.as_char[2] = (m1.as_char[2] == m2.as_char[2]) ? -1 : 0;
+ res.as_char[3] = (m1.as_char[3] == m2.as_char[3]) ? -1 : 0;
+ res.as_char[4] = (m1.as_char[4] == m2.as_char[4]) ? -1 : 0;
+ res.as_char[5] = (m1.as_char[5] == m2.as_char[5]) ? -1 : 0;
+ res.as_char[6] = (m1.as_char[6] == m2.as_char[6]) ? -1 : 0;
+ res.as_char[7] = (m1.as_char[7] == m2.as_char[7]) ? -1 : 0;
+
+ return (__m64)res.as_m64;
+#endif
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_pcmpeqb(__m64 __m1, __m64 __m2) {
+ return _mm_cmpeq_pi8(__m1, __m2);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpgt_pi8(__m64 __m1, __m64 __m2) {
+#if _ARCH_PWR8
+ __vector signed char a, b, c;
+
+ a = (__vector signed char)vec_splats(__m1);
+ b = (__vector signed char)vec_splats(__m2);
+ c = (__vector signed char)vec_cmpgt(a, b);
+ return (__m64)((__vector long long)c)[0];
+#else
+ __m64_union m1, m2, res;
+
+ m1.as_m64 = __m1;
+ m2.as_m64 = __m2;
+
+ res.as_char[0] = (m1.as_char[0] > m2.as_char[0]) ? -1 : 0;
+ res.as_char[1] = (m1.as_char[1] > m2.as_char[1]) ? -1 : 0;
+ res.as_char[2] = (m1.as_char[2] > m2.as_char[2]) ? -1 : 0;
+ res.as_char[3] = (m1.as_char[3] > m2.as_char[3]) ? -1 : 0;
+ res.as_char[4] = (m1.as_char[4] > m2.as_char[4]) ? -1 : 0;
+ res.as_char[5] = (m1.as_char[5] > m2.as_char[5]) ? -1 : 0;
+ res.as_char[6] = (m1.as_char[6] > m2.as_char[6]) ? -1 : 0;
+ res.as_char[7] = (m1.as_char[7] > m2.as_char[7]) ? -1 : 0;
+
+ return (__m64)res.as_m64;
+#endif
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_pcmpgtb(__m64 __m1, __m64 __m2) {
+ return _mm_cmpgt_pi8(__m1, __m2);
+}
+
+/* Compare four 16-bit values. The result of the comparison is 0xFFFF if
+ the test is true and zero if false. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpeq_pi16(__m64 __m1, __m64 __m2) {
+#if _ARCH_PWR8
+ __vector signed short a, b, c;
+
+ a = (__vector signed short)vec_splats(__m1);
+ b = (__vector signed short)vec_splats(__m2);
+ c = (__vector signed short)vec_cmpeq(a, b);
+ return (__m64)((__vector long long)c)[0];
+#else
+ __m64_union m1, m2, res;
+
+ m1.as_m64 = __m1;
+ m2.as_m64 = __m2;
+
+ res.as_short[0] = (m1.as_short[0] == m2.as_short[0]) ? -1 : 0;
+ res.as_short[1] = (m1.as_short[1] == m2.as_short[1]) ? -1 : 0;
+ res.as_short[2] = (m1.as_short[2] == m2.as_short[2]) ? -1 : 0;
+ res.as_short[3] = (m1.as_short[3] == m2.as_short[3]) ? -1 : 0;
+
+ return (__m64)res.as_m64;
+#endif
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_pcmpeqw(__m64 __m1, __m64 __m2) {
+ return _mm_cmpeq_pi16(__m1, __m2);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpgt_pi16(__m64 __m1, __m64 __m2) {
+#if _ARCH_PWR8
+ __vector signed short a, b, c;
+
+ a = (__vector signed short)vec_splats(__m1);
+ b = (__vector signed short)vec_splats(__m2);
+ c = (__vector signed short)vec_cmpgt(a, b);
+ return (__m64)((__vector long long)c)[0];
+#else
+ __m64_union m1, m2, res;
+
+ m1.as_m64 = __m1;
+ m2.as_m64 = __m2;
+
+ res.as_short[0] = (m1.as_short[0] > m2.as_short[0]) ? -1 : 0;
+ res.as_short[1] = (m1.as_short[1] > m2.as_short[1]) ? -1 : 0;
+ res.as_short[2] = (m1.as_short[2] > m2.as_short[2]) ? -1 : 0;
+ res.as_short[3] = (m1.as_short[3] > m2.as_short[3]) ? -1 : 0;
+
+ return (__m64)res.as_m64;
+#endif
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_pcmpgtw(__m64 __m1, __m64 __m2) {
+ return _mm_cmpgt_pi16(__m1, __m2);
+}
+
+/* Compare two 32-bit values. The result of the comparison is 0xFFFFFFFF if
+ the test is true and zero if false. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpeq_pi32(__m64 __m1, __m64 __m2) {
+#if _ARCH_PWR9
+ __vector signed int a, b, c;
+
+ a = (__vector signed int)vec_splats(__m1);
+ b = (__vector signed int)vec_splats(__m2);
+ c = (__vector signed int)vec_cmpeq(a, b);
+ return (__m64)((__vector long long)c)[0];
+#else
+ __m64_union m1, m2, res;
+
+ m1.as_m64 = __m1;
+ m2.as_m64 = __m2;
+
+ res.as_int[0] = (m1.as_int[0] == m2.as_int[0]) ? -1 : 0;
+ res.as_int[1] = (m1.as_int[1] == m2.as_int[1]) ? -1 : 0;
+
+ return (__m64)res.as_m64;
+#endif
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_pcmpeqd(__m64 __m1, __m64 __m2) {
+ return _mm_cmpeq_pi32(__m1, __m2);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpgt_pi32(__m64 __m1, __m64 __m2) {
+#if _ARCH_PWR9
+ __vector signed int a, b, c;
+
+ a = (__vector signed int)vec_splats(__m1);
+ b = (__vector signed int)vec_splats(__m2);
+ c = (__vector signed int)vec_cmpgt(a, b);
+ return (__m64)((__vector long long)c)[0];
+#else
+ __m64_union m1, m2, res;
+
+ m1.as_m64 = __m1;
+ m2.as_m64 = __m2;
+
+ res.as_int[0] = (m1.as_int[0] > m2.as_int[0]) ? -1 : 0;
+ res.as_int[1] = (m1.as_int[1] > m2.as_int[1]) ? -1 : 0;
+
+ return (__m64)res.as_m64;
+#endif
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_pcmpgtd(__m64 __m1, __m64 __m2) {
+ return _mm_cmpgt_pi32(__m1, __m2);
+}
+
+#if _ARCH_PWR8
+/* Add the 8-bit values in M1 to the 8-bit values in M2 using signed
+ saturated arithmetic. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_adds_pi8(__m64 __m1, __m64 __m2) {
+ __vector signed char a, b, c;
+
+ a = (__vector signed char)vec_splats(__m1);
+ b = (__vector signed char)vec_splats(__m2);
+ c = vec_adds(a, b);
+ return (__m64)((__vector long long)c)[0];
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_paddsb(__m64 __m1, __m64 __m2) {
+ return _mm_adds_pi8(__m1, __m2);
+}
+/* Add the 16-bit values in M1 to the 16-bit values in M2 using signed
+ saturated arithmetic. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_adds_pi16(__m64 __m1, __m64 __m2) {
+ __vector signed short a, b, c;
+
+ a = (__vector signed short)vec_splats(__m1);
+ b = (__vector signed short)vec_splats(__m2);
+ c = vec_adds(a, b);
+ return (__m64)((__vector long long)c)[0];
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_paddsw(__m64 __m1, __m64 __m2) {
+ return _mm_adds_pi16(__m1, __m2);
+}
+/* Add the 8-bit values in M1 to the 8-bit values in M2 using unsigned
+ saturated arithmetic. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_adds_pu8(__m64 __m1, __m64 __m2) {
+ __vector unsigned char a, b, c;
+
+ a = (__vector unsigned char)vec_splats(__m1);
+ b = (__vector unsigned char)vec_splats(__m2);
+ c = vec_adds(a, b);
+ return (__m64)((__vector long long)c)[0];
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_paddusb(__m64 __m1, __m64 __m2) {
+ return _mm_adds_pu8(__m1, __m2);
+}
+
+/* Add the 16-bit values in M1 to the 16-bit values in M2 using unsigned
+ saturated arithmetic. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_adds_pu16(__m64 __m1, __m64 __m2) {
+ __vector unsigned short a, b, c;
+
+ a = (__vector unsigned short)vec_splats(__m1);
+ b = (__vector unsigned short)vec_splats(__m2);
+ c = vec_adds(a, b);
+ return (__m64)((__vector long long)c)[0];
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_paddusw(__m64 __m1, __m64 __m2) {
+ return _mm_adds_pu16(__m1, __m2);
+}
+
+/* Subtract the 8-bit values in M2 from the 8-bit values in M1 using signed
+ saturating arithmetic. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_subs_pi8(__m64 __m1, __m64 __m2) {
+ __vector signed char a, b, c;
+
+ a = (__vector signed char)vec_splats(__m1);
+ b = (__vector signed char)vec_splats(__m2);
+ c = vec_subs(a, b);
+ return (__m64)((__vector long long)c)[0];
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_psubsb(__m64 __m1, __m64 __m2) {
+ return _mm_subs_pi8(__m1, __m2);
+}
+
+/* Subtract the 16-bit values in M2 from the 16-bit values in M1 using
+ signed saturating arithmetic. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_subs_pi16(__m64 __m1, __m64 __m2) {
+ __vector signed short a, b, c;
+
+ a = (__vector signed short)vec_splats(__m1);
+ b = (__vector signed short)vec_splats(__m2);
+ c = vec_subs(a, b);
+ return (__m64)((__vector long long)c)[0];
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_psubsw(__m64 __m1, __m64 __m2) {
+ return _mm_subs_pi16(__m1, __m2);
+}
+
+/* Subtract the 8-bit values in M2 from the 8-bit values in M1 using
+ unsigned saturating arithmetic. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_subs_pu8(__m64 __m1, __m64 __m2) {
+ __vector unsigned char a, b, c;
+
+ a = (__vector unsigned char)vec_splats(__m1);
+ b = (__vector unsigned char)vec_splats(__m2);
+ c = vec_subs(a, b);
+ return (__m64)((__vector long long)c)[0];
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_psubusb(__m64 __m1, __m64 __m2) {
+ return _mm_subs_pu8(__m1, __m2);
+}
+
+/* Subtract the 16-bit values in M2 from the 16-bit values in M1 using
+ unsigned saturating arithmetic. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_subs_pu16(__m64 __m1, __m64 __m2) {
+ __vector unsigned short a, b, c;
+
+ a = (__vector unsigned short)vec_splats(__m1);
+ b = (__vector unsigned short)vec_splats(__m2);
+ c = vec_subs(a, b);
+ return (__m64)((__vector long long)c)[0];
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_psubusw(__m64 __m1, __m64 __m2) {
+ return _mm_subs_pu16(__m1, __m2);
+}
+
+/* Multiply four 16-bit values in M1 by four 16-bit values in M2 producing
+ four 32-bit intermediate results, which are then summed by pairs to
+ produce two 32-bit results. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_madd_pi16(__m64 __m1, __m64 __m2) {
+ __vector signed short a, b;
+ __vector signed int c;
+ __vector signed int zero = {0, 0, 0, 0};
+
+ a = (__vector signed short)vec_splats(__m1);
+ b = (__vector signed short)vec_splats(__m2);
+ c = vec_vmsumshm(a, b, zero);
+ return (__m64)((__vector long long)c)[0];
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_pmaddwd(__m64 __m1, __m64 __m2) {
+ return _mm_madd_pi16(__m1, __m2);
+}
+/* Multiply four signed 16-bit values in M1 by four signed 16-bit values in
+ M2 and produce the high 16 bits of the 32-bit results. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_mulhi_pi16(__m64 __m1, __m64 __m2) {
+ __vector signed short a, b;
+ __vector signed short c;
+ __vector signed int w0, w1;
+ __vector unsigned char xform1 = {
+#ifdef __LITTLE_ENDIAN__
+ 0x02, 0x03, 0x12, 0x13, 0x06, 0x07, 0x16, 0x17, 0x0A,
+ 0x0B, 0x1A, 0x1B, 0x0E, 0x0F, 0x1E, 0x1F
+#else
+ 0x00, 0x01, 0x10, 0x11, 0x04, 0x05, 0x14, 0x15, 0x00,
+ 0x01, 0x10, 0x11, 0x04, 0x05, 0x14, 0x15
+#endif
+ };
+
+ a = (__vector signed short)vec_splats(__m1);
+ b = (__vector signed short)vec_splats(__m2);
+
+ w0 = vec_vmulesh(a, b);
+ w1 = vec_vmulosh(a, b);
+ c = (__vector signed short)vec_perm(w0, w1, xform1);
+
+ return (__m64)((__vector long long)c)[0];
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_pmulhw(__m64 __m1, __m64 __m2) {
+ return _mm_mulhi_pi16(__m1, __m2);
+}
+
+/* Multiply four 16-bit values in M1 by four 16-bit values in M2 and produce
+ the low 16 bits of the results. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_mullo_pi16(__m64 __m1, __m64 __m2) {
+ __vector signed short a, b, c;
+
+ a = (__vector signed short)vec_splats(__m1);
+ b = (__vector signed short)vec_splats(__m2);
+ c = a * b;
+ return (__m64)((__vector long long)c)[0];
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_pmullw(__m64 __m1, __m64 __m2) {
+ return _mm_mullo_pi16(__m1, __m2);
+}
+
+/* Shift four 16-bit values in M left by COUNT. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sll_pi16(__m64 __m, __m64 __count) {
+ __vector signed short m, r;
+ __vector unsigned short c;
+
+ if (__count <= 15) {
+ m = (__vector signed short)vec_splats(__m);
+ c = (__vector unsigned short)vec_splats((unsigned short)__count);
+ r = vec_sl(m, (__vector unsigned short)c);
+ return (__m64)((__vector long long)r)[0];
+ } else
+ return (0);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_psllw(__m64 __m, __m64 __count) {
+ return _mm_sll_pi16(__m, __count);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_slli_pi16(__m64 __m, int __count) {
+ /* Promote int to long then invoke mm_sll_pi16. */
+ return _mm_sll_pi16(__m, __count);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_psllwi(__m64 __m, int __count) {
+ return _mm_slli_pi16(__m, __count);
+}
+
+/* Shift two 32-bit values in M left by COUNT. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sll_pi32(__m64 __m, __m64 __count) {
+ __m64_union m, res;
+
+ m.as_m64 = __m;
+
+ res.as_int[0] = m.as_int[0] << __count;
+ res.as_int[1] = m.as_int[1] << __count;
+ return (res.as_m64);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_pslld(__m64 __m, __m64 __count) {
+ return _mm_sll_pi32(__m, __count);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_slli_pi32(__m64 __m, int __count) {
+ /* Promote int to long then invoke mm_sll_pi32. */
+ return _mm_sll_pi32(__m, __count);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_pslldi(__m64 __m, int __count) {
+ return _mm_slli_pi32(__m, __count);
+}
+
+/* Shift four 16-bit values in M right by COUNT; shift in the sign bit. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sra_pi16(__m64 __m, __m64 __count) {
+ __vector signed short m, r;
+ __vector unsigned short c;
+
+ if (__count <= 15) {
+ m = (__vector signed short)vec_splats(__m);
+ c = (__vector unsigned short)vec_splats((unsigned short)__count);
+ r = vec_sra(m, (__vector unsigned short)c);
+ return (__m64)((__vector long long)r)[0];
+ } else
+ return (0);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_psraw(__m64 __m, __m64 __count) {
+ return _mm_sra_pi16(__m, __count);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_srai_pi16(__m64 __m, int __count) {
+ /* Promote int to long then invoke mm_sra_pi32. */
+ return _mm_sra_pi16(__m, __count);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_psrawi(__m64 __m, int __count) {
+ return _mm_srai_pi16(__m, __count);
+}
+
+/* Shift two 32-bit values in M right by COUNT; shift in the sign bit. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sra_pi32(__m64 __m, __m64 __count) {
+ __m64_union m, res;
+
+ m.as_m64 = __m;
+
+ res.as_int[0] = m.as_int[0] >> __count;
+ res.as_int[1] = m.as_int[1] >> __count;
+ return (res.as_m64);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_psrad(__m64 __m, __m64 __count) {
+ return _mm_sra_pi32(__m, __count);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_srai_pi32(__m64 __m, int __count) {
+ /* Promote int to long then invoke mm_sra_pi32. */
+ return _mm_sra_pi32(__m, __count);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_psradi(__m64 __m, int __count) {
+ return _mm_srai_pi32(__m, __count);
+}
+
+/* Shift four 16-bit values in M right by COUNT; shift in zeros. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_srl_pi16(__m64 __m, __m64 __count) {
+ __vector unsigned short m, r;
+ __vector unsigned short c;
+
+ if (__count <= 15) {
+ m = (__vector unsigned short)vec_splats(__m);
+ c = (__vector unsigned short)vec_splats((unsigned short)__count);
+ r = vec_sr(m, (__vector unsigned short)c);
+ return (__m64)((__vector long long)r)[0];
+ } else
+ return (0);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_psrlw(__m64 __m, __m64 __count) {
+ return _mm_srl_pi16(__m, __count);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_srli_pi16(__m64 __m, int __count) {
+ /* Promote int to long then invoke mm_sra_pi32. */
+ return _mm_srl_pi16(__m, __count);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_psrlwi(__m64 __m, int __count) {
+ return _mm_srli_pi16(__m, __count);
+}
+
+/* Shift two 32-bit values in M right by COUNT; shift in zeros. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_srl_pi32(__m64 __m, __m64 __count) {
+ __m64_union m, res;
+
+ m.as_m64 = __m;
+
+ res.as_int[0] = (unsigned int)m.as_int[0] >> __count;
+ res.as_int[1] = (unsigned int)m.as_int[1] >> __count;
+ return (res.as_m64);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_psrld(__m64 __m, __m64 __count) {
+ return _mm_srl_pi32(__m, __count);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_srli_pi32(__m64 __m, int __count) {
+ /* Promote int to long then invoke mm_srl_pi32. */
+ return _mm_srl_pi32(__m, __count);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_psrldi(__m64 __m, int __count) {
+ return _mm_srli_pi32(__m, __count);
+}
+#endif /* _ARCH_PWR8 */
+
+/* Creates a vector of two 32-bit values; I0 is least significant. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_set_pi32(int __i1, int __i0) {
+ __m64_union res;
+
+ res.as_int[0] = __i0;
+ res.as_int[1] = __i1;
+ return (res.as_m64);
+}
+
+/* Creates a vector of four 16-bit values; W0 is least significant. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_set_pi16(short __w3, short __w2, short __w1, short __w0) {
+ __m64_union res;
+
+ res.as_short[0] = __w0;
+ res.as_short[1] = __w1;
+ res.as_short[2] = __w2;
+ res.as_short[3] = __w3;
+ return (res.as_m64);
+}
+
+/* Creates a vector of eight 8-bit values; B0 is least significant. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_set_pi8(char __b7, char __b6, char __b5, char __b4, char __b3,
+ char __b2, char __b1, char __b0) {
+ __m64_union res;
+
+ res.as_char[0] = __b0;
+ res.as_char[1] = __b1;
+ res.as_char[2] = __b2;
+ res.as_char[3] = __b3;
+ res.as_char[4] = __b4;
+ res.as_char[5] = __b5;
+ res.as_char[6] = __b6;
+ res.as_char[7] = __b7;
+ return (res.as_m64);
+}
+
+/* Similar, but with the arguments in reverse order. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_setr_pi32(int __i0, int __i1) {
+ __m64_union res;
+
+ res.as_int[0] = __i0;
+ res.as_int[1] = __i1;
+ return (res.as_m64);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_setr_pi16(short __w0, short __w1, short __w2, short __w3) {
+ return _mm_set_pi16(__w3, __w2, __w1, __w0);
+}
+
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_setr_pi8(char __b0, char __b1, char __b2, char __b3, char __b4,
+ char __b5, char __b6, char __b7) {
+ return _mm_set_pi8(__b7, __b6, __b5, __b4, __b3, __b2, __b1, __b0);
+}
+
+/* Creates a vector of two 32-bit values, both elements containing I. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_set1_pi32(int __i) {
+ __m64_union res;
+
+ res.as_int[0] = __i;
+ res.as_int[1] = __i;
+ return (res.as_m64);
+}
+
+/* Creates a vector of four 16-bit values, all elements containing W. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_set1_pi16(short __w) {
+#if _ARCH_PWR9
+ __vector signed short w;
+
+ w = (__vector signed short)vec_splats(__w);
+ return (__m64)((__vector long long)w)[0];
+#else
+ __m64_union res;
+
+ res.as_short[0] = __w;
+ res.as_short[1] = __w;
+ res.as_short[2] = __w;
+ res.as_short[3] = __w;
+ return (res.as_m64);
+#endif
+}
+
+/* Creates a vector of eight 8-bit values, all elements containing B. */
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_set1_pi8(signed char __b) {
+#if _ARCH_PWR8
+ __vector signed char b;
+
+ b = (__vector signed char)vec_splats(__b);
+ return (__m64)((__vector long long)b)[0];
+#else
+ __m64_union res;
+
+ res.as_char[0] = __b;
+ res.as_char[1] = __b;
+ res.as_char[2] = __b;
+ res.as_char[3] = __b;
+ res.as_char[4] = __b;
+ res.as_char[5] = __b;
+ res.as_char[6] = __b;
+ res.as_char[7] = __b;
+ return (res.as_m64);
+#endif
+}
+#endif /* _MMINTRIN_H_INCLUDED */
diff --git a/lib/Headers/ppc_wrappers/xmmintrin.h b/lib/Headers/ppc_wrappers/xmmintrin.h
new file mode 100644
index 000000000000..1b322b66519a
--- /dev/null
+++ b/lib/Headers/ppc_wrappers/xmmintrin.h
@@ -0,0 +1,1838 @@
+/*===---- xmmintrin.h - Implementation of SSE intrinsics on PowerPC --------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+/* Implemented from the specification included in the Intel C++ Compiler
+ User Guide and Reference, version 9.0. */
+
+#ifndef NO_WARN_X86_INTRINSICS
+/* This header file is to help porting code using Intel intrinsics
+ explicitly from x86_64 to powerpc64/powerpc64le.
+
+ Since X86 SSE intrinsics mainly handles __m128 type, PowerPC
+ VMX/VSX ISA is a good match for vector float SIMD operations.
+ However scalar float operations in vector (XMM) registers require
+ the POWER8 VSX ISA (2.07) level. There are differences for data
+ format and placement of float scalars in the vector register, which
+ require extra steps to match SSE scalar float semantics on POWER.
+
+ It should be noted that there's much difference between X86_64's
+ MXSCR and PowerISA's FPSCR/VSCR registers. It's recommended to use
+ portable <fenv.h> instead of access MXSCR directly.
+
+ Most SSE scalar float intrinsic operations can be performed more
+ efficiently as C language float scalar operations or optimized to
+ use vector SIMD operations. We recommend this for new applications. */
+#error "Please read comment above. Use -DNO_WARN_X86_INTRINSICS to disable this error."
+#endif
+
+#ifndef _XMMINTRIN_H_INCLUDED
+#define _XMMINTRIN_H_INCLUDED
+
+/* Define four value permute mask */
+#define _MM_SHUFFLE(w,x,y,z) (((w) << 6) | ((x) << 4) | ((y) << 2) | (z))
+
+#include <altivec.h>
+
+/* Avoid collisions between altivec.h and strict adherence to C++ and
+ C11 standards. This should eventually be done inside altivec.h itself,
+ but only after testing a full distro build. */
+#if defined(__STRICT_ANSI__) && (defined(__cplusplus) || \
+ (defined(__STDC_VERSION__) && \
+ __STDC_VERSION__ >= 201112L))
+#undef vector
+#undef pixel
+#undef bool
+#endif
+
+/* We need type definitions from the MMX header file. */
+#include <mmintrin.h>
+
+/* Get _mm_malloc () and _mm_free (). */
+#if __STDC_HOSTED__
+#include <mm_malloc.h>
+#endif
+
+/* The Intel API is flexible enough that we must allow aliasing with other
+ vector types, and their scalar components. */
+typedef float __m128 __attribute__ ((__vector_size__ (16), __may_alias__));
+
+/* Unaligned version of the same type. */
+typedef float __m128_u __attribute__ ((__vector_size__ (16), __may_alias__,
+ __aligned__ (1)));
+
+/* Internal data types for implementing the intrinsics. */
+typedef float __v4sf __attribute__ ((__vector_size__ (16)));
+
+/* Create an undefined vector. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_undefined_ps (void)
+{
+ __m128 __Y = __Y;
+ return __Y;
+}
+
+/* Create a vector of zeros. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_setzero_ps (void)
+{
+ return __extension__ (__m128){ 0.0f, 0.0f, 0.0f, 0.0f };
+}
+
+/* Load four SPFP values from P. The address must be 16-byte aligned. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_load_ps (float const *__P)
+{
+ return ((__m128)vec_ld(0, (__v4sf*)__P));
+}
+
+/* Load four SPFP values from P. The address need not be 16-byte aligned. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_loadu_ps (float const *__P)
+{
+ return (vec_vsx_ld(0, __P));
+}
+
+/* Load four SPFP values in reverse order. The address must be aligned. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_loadr_ps (float const *__P)
+{
+ __v4sf __tmp;
+ __m128 result;
+ static const __vector unsigned char permute_vector =
+ { 0x1C, 0x1D, 0x1E, 0x1F, 0x18, 0x19, 0x1A, 0x1B, 0x14, 0x15, 0x16,
+ 0x17, 0x10, 0x11, 0x12, 0x13 };
+
+ __tmp = vec_ld (0, (__v4sf *) __P);
+ result = (__m128) vec_perm (__tmp, __tmp, permute_vector);
+ return result;
+}
+
+/* Create a vector with all four elements equal to F. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_set1_ps (float __F)
+{
+ return __extension__ (__m128)(__v4sf){ __F, __F, __F, __F };
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_set_ps1 (float __F)
+{
+ return _mm_set1_ps (__F);
+}
+
+/* Create the vector [Z Y X W]. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_set_ps (const float __Z, const float __Y, const float __X, const float __W)
+{
+ return __extension__ (__m128)(__v4sf){ __W, __X, __Y, __Z };
+}
+
+/* Create the vector [W X Y Z]. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_setr_ps (float __Z, float __Y, float __X, float __W)
+{
+ return __extension__ (__m128)(__v4sf){ __Z, __Y, __X, __W };
+}
+
+/* Store four SPFP values. The address must be 16-byte aligned. */
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_store_ps (float *__P, __m128 __A)
+{
+ vec_st((__v4sf)__A, 0, (__v4sf*)__P);
+}
+
+/* Store four SPFP values. The address need not be 16-byte aligned. */
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_storeu_ps (float *__P, __m128 __A)
+{
+ *(__m128_u *)__P = __A;
+}
+
+/* Store four SPFP values in reverse order. The address must be aligned. */
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_storer_ps (float *__P, __m128 __A)
+{
+ __v4sf __tmp;
+ static const __vector unsigned char permute_vector =
+ { 0x1C, 0x1D, 0x1E, 0x1F, 0x18, 0x19, 0x1A, 0x1B, 0x14, 0x15, 0x16,
+ 0x17, 0x10, 0x11, 0x12, 0x13 };
+
+ __tmp = (__m128) vec_perm (__A, __A, permute_vector);
+
+ _mm_store_ps (__P, __tmp);
+}
+
+/* Store the lower SPFP value across four words. */
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_store1_ps (float *__P, __m128 __A)
+{
+ __v4sf __va = vec_splat((__v4sf)__A, 0);
+ _mm_store_ps (__P, __va);
+}
+
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_store_ps1 (float *__P, __m128 __A)
+{
+ _mm_store1_ps (__P, __A);
+}
+
+/* Create a vector with element 0 as F and the rest zero. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_set_ss (float __F)
+{
+ return __extension__ (__m128)(__v4sf){ __F, 0.0f, 0.0f, 0.0f };
+}
+
+/* Sets the low SPFP value of A from the low value of B. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_move_ss (__m128 __A, __m128 __B)
+{
+ static const __vector unsigned int mask = {0xffffffff, 0, 0, 0};
+
+ return (vec_sel ((__v4sf)__A, (__v4sf)__B, mask));
+}
+
+/* Create a vector with element 0 as *P and the rest zero. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_load_ss (float const *__P)
+{
+ return _mm_set_ss (*__P);
+}
+
+/* Stores the lower SPFP value. */
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_store_ss (float *__P, __m128 __A)
+{
+ *__P = ((__v4sf)__A)[0];
+}
+
+/* Perform the respective operation on the lower SPFP (single-precision
+ floating-point) values of A and B; the upper three SPFP values are
+ passed through from A. */
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_add_ss (__m128 __A, __m128 __B)
+{
+#ifdef _ARCH_PWR7
+ __m128 a, b, c;
+ static const __vector unsigned int mask = {0xffffffff, 0, 0, 0};
+ /* PowerISA VSX does not allow partial (for just lower double)
+ results. So to insure we don't generate spurious exceptions
+ (from the upper double values) we splat the lower double
+ before we to the operation. */
+ a = vec_splat (__A, 0);
+ b = vec_splat (__B, 0);
+ c = a + b;
+ /* Then we merge the lower float result with the original upper
+ float elements from __A. */
+ return (vec_sel (__A, c, mask));
+#else
+ __A[0] = __A[0] + __B[0];
+ return (__A);
+#endif
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_sub_ss (__m128 __A, __m128 __B)
+{
+#ifdef _ARCH_PWR7
+ __m128 a, b, c;
+ static const __vector unsigned int mask = {0xffffffff, 0, 0, 0};
+ /* PowerISA VSX does not allow partial (for just lower double)
+ results. So to insure we don't generate spurious exceptions
+ (from the upper double values) we splat the lower double
+ before we to the operation. */
+ a = vec_splat (__A, 0);
+ b = vec_splat (__B, 0);
+ c = a - b;
+ /* Then we merge the lower float result with the original upper
+ float elements from __A. */
+ return (vec_sel (__A, c, mask));
+#else
+ __A[0] = __A[0] - __B[0];
+ return (__A);
+#endif
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mul_ss (__m128 __A, __m128 __B)
+{
+#ifdef _ARCH_PWR7
+ __m128 a, b, c;
+ static const __vector unsigned int mask = {0xffffffff, 0, 0, 0};
+ /* PowerISA VSX does not allow partial (for just lower double)
+ results. So to insure we don't generate spurious exceptions
+ (from the upper double values) we splat the lower double
+ before we to the operation. */
+ a = vec_splat (__A, 0);
+ b = vec_splat (__B, 0);
+ c = a * b;
+ /* Then we merge the lower float result with the original upper
+ float elements from __A. */
+ return (vec_sel (__A, c, mask));
+#else
+ __A[0] = __A[0] * __B[0];
+ return (__A);
+#endif
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_div_ss (__m128 __A, __m128 __B)
+{
+#ifdef _ARCH_PWR7
+ __m128 a, b, c;
+ static const __vector unsigned int mask = {0xffffffff, 0, 0, 0};
+ /* PowerISA VSX does not allow partial (for just lower double)
+ results. So to insure we don't generate spurious exceptions
+ (from the upper double values) we splat the lower double
+ before we to the operation. */
+ a = vec_splat (__A, 0);
+ b = vec_splat (__B, 0);
+ c = a / b;
+ /* Then we merge the lower float result with the original upper
+ float elements from __A. */
+ return (vec_sel (__A, c, mask));
+#else
+ __A[0] = __A[0] / __B[0];
+ return (__A);
+#endif
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_sqrt_ss (__m128 __A)
+{
+ __m128 a, c;
+ static const __vector unsigned int mask = {0xffffffff, 0, 0, 0};
+ /* PowerISA VSX does not allow partial (for just lower double)
+ * results. So to insure we don't generate spurious exceptions
+ * (from the upper double values) we splat the lower double
+ * before we to the operation. */
+ a = vec_splat (__A, 0);
+ c = vec_sqrt (a);
+ /* Then we merge the lower float result with the original upper
+ * float elements from __A. */
+ return (vec_sel (__A, c, mask));
+}
+
+/* Perform the respective operation on the four SPFP values in A and B. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_add_ps (__m128 __A, __m128 __B)
+{
+ return (__m128) ((__v4sf)__A + (__v4sf)__B);
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_sub_ps (__m128 __A, __m128 __B)
+{
+ return (__m128) ((__v4sf)__A - (__v4sf)__B);
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mul_ps (__m128 __A, __m128 __B)
+{
+ return (__m128) ((__v4sf)__A * (__v4sf)__B);
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_div_ps (__m128 __A, __m128 __B)
+{
+ return (__m128) ((__v4sf)__A / (__v4sf)__B);
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_sqrt_ps (__m128 __A)
+{
+ return (vec_sqrt ((__v4sf)__A));
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_rcp_ps (__m128 __A)
+{
+ return (vec_re ((__v4sf)__A));
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_rsqrt_ps (__m128 __A)
+{
+ return (vec_rsqrte (__A));
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_rcp_ss (__m128 __A)
+{
+ __m128 a, c;
+ static const __vector unsigned int mask = {0xffffffff, 0, 0, 0};
+ /* PowerISA VSX does not allow partial (for just lower double)
+ * results. So to insure we don't generate spurious exceptions
+ * (from the upper double values) we splat the lower double
+ * before we to the operation. */
+ a = vec_splat (__A, 0);
+ c = _mm_rcp_ps (a);
+ /* Then we merge the lower float result with the original upper
+ * float elements from __A. */
+ return (vec_sel (__A, c, mask));
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_rsqrt_ss (__m128 __A)
+{
+ __m128 a, c;
+ static const __vector unsigned int mask = {0xffffffff, 0, 0, 0};
+ /* PowerISA VSX does not allow partial (for just lower double)
+ * results. So to insure we don't generate spurious exceptions
+ * (from the upper double values) we splat the lower double
+ * before we to the operation. */
+ a = vec_splat (__A, 0);
+ c = vec_rsqrte (a);
+ /* Then we merge the lower float result with the original upper
+ * float elements from __A. */
+ return (vec_sel (__A, c, mask));
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_min_ss (__m128 __A, __m128 __B)
+{
+ __v4sf a, b, c;
+ static const __vector unsigned int mask = {0xffffffff, 0, 0, 0};
+ /* PowerISA VSX does not allow partial (for just lower float)
+ * results. So to insure we don't generate spurious exceptions
+ * (from the upper float values) we splat the lower float
+ * before we to the operation. */
+ a = vec_splat ((__v4sf)__A, 0);
+ b = vec_splat ((__v4sf)__B, 0);
+ c = vec_min (a, b);
+ /* Then we merge the lower float result with the original upper
+ * float elements from __A. */
+ return (vec_sel ((__v4sf)__A, c, mask));
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_max_ss (__m128 __A, __m128 __B)
+{
+ __v4sf a, b, c;
+ static const __vector unsigned int mask = {0xffffffff, 0, 0, 0};
+ /* PowerISA VSX does not allow partial (for just lower float)
+ * results. So to insure we don't generate spurious exceptions
+ * (from the upper float values) we splat the lower float
+ * before we to the operation. */
+ a = vec_splat (__A, 0);
+ b = vec_splat (__B, 0);
+ c = vec_max (a, b);
+ /* Then we merge the lower float result with the original upper
+ * float elements from __A. */
+ return (vec_sel ((__v4sf)__A, c, mask));
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_min_ps (__m128 __A, __m128 __B)
+{
+ __vector __bool int m = vec_cmpgt ((__v4sf) __B, (__v4sf) __A);
+ return vec_sel (__B, __A, m);
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_max_ps (__m128 __A, __m128 __B)
+{
+ __vector __bool int m = vec_cmpgt ((__v4sf) __A, (__v4sf) __B);
+ return vec_sel (__B, __A, m);
+}
+
+/* Perform logical bit-wise operations on 128-bit values. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_and_ps (__m128 __A, __m128 __B)
+{
+ return ((__m128)vec_and ((__v4sf)__A, (__v4sf)__B));
+// return __builtin_ia32_andps (__A, __B);
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_andnot_ps (__m128 __A, __m128 __B)
+{
+ return ((__m128)vec_andc ((__v4sf)__B, (__v4sf)__A));
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_or_ps (__m128 __A, __m128 __B)
+{
+ return ((__m128)vec_or ((__v4sf)__A, (__v4sf)__B));
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_xor_ps (__m128 __A, __m128 __B)
+{
+ return ((__m128)vec_xor ((__v4sf)__A, (__v4sf)__B));
+}
+
+/* Perform a comparison on the four SPFP values of A and B. For each
+ element, if the comparison is true, place a mask of all ones in the
+ result, otherwise a mask of zeros. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpeq_ps (__m128 __A, __m128 __B)
+{
+ return ((__m128)vec_cmpeq ((__v4sf)__A,(__v4sf) __B));
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmplt_ps (__m128 __A, __m128 __B)
+{
+ return ((__m128)vec_cmplt ((__v4sf)__A, (__v4sf)__B));
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmple_ps (__m128 __A, __m128 __B)
+{
+ return ((__m128)vec_cmple ((__v4sf)__A, (__v4sf)__B));
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpgt_ps (__m128 __A, __m128 __B)
+{
+ return ((__m128)vec_cmpgt ((__v4sf)__A, (__v4sf)__B));
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpge_ps (__m128 __A, __m128 __B)
+{
+ return ((__m128)vec_cmpge ((__v4sf)__A, (__v4sf)__B));
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpneq_ps (__m128 __A, __m128 __B)
+{
+ __v4sf temp = (__v4sf ) vec_cmpeq ((__v4sf) __A, (__v4sf)__B);
+ return ((__m128)vec_nor (temp, temp));
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpnlt_ps (__m128 __A, __m128 __B)
+{
+ return ((__m128)vec_cmpge ((__v4sf)__A, (__v4sf)__B));
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpnle_ps (__m128 __A, __m128 __B)
+{
+ return ((__m128)vec_cmpgt ((__v4sf)__A, (__v4sf)__B));
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpngt_ps (__m128 __A, __m128 __B)
+{
+ return ((__m128)vec_cmple ((__v4sf)__A, (__v4sf)__B));
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpnge_ps (__m128 __A, __m128 __B)
+{
+ return ((__m128)vec_cmplt ((__v4sf)__A, (__v4sf)__B));
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpord_ps (__m128 __A, __m128 __B)
+{
+ __vector unsigned int a, b;
+ __vector unsigned int c, d;
+ static const __vector unsigned int float_exp_mask =
+ { 0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000 };
+
+ a = (__vector unsigned int) vec_abs ((__v4sf)__A);
+ b = (__vector unsigned int) vec_abs ((__v4sf)__B);
+ c = (__vector unsigned int) vec_cmpgt (float_exp_mask, a);
+ d = (__vector unsigned int) vec_cmpgt (float_exp_mask, b);
+ return ((__m128 ) vec_and (c, d));
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpunord_ps (__m128 __A, __m128 __B)
+{
+ __vector unsigned int a, b;
+ __vector unsigned int c, d;
+ static const __vector unsigned int float_exp_mask =
+ { 0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000 };
+
+ a = (__vector unsigned int) vec_abs ((__v4sf)__A);
+ b = (__vector unsigned int) vec_abs ((__v4sf)__B);
+ c = (__vector unsigned int) vec_cmpgt (a, float_exp_mask);
+ d = (__vector unsigned int) vec_cmpgt (b, float_exp_mask);
+ return ((__m128 ) vec_or (c, d));
+}
+
+/* Perform a comparison on the lower SPFP values of A and B. If the
+ comparison is true, place a mask of all ones in the result, otherwise a
+ mask of zeros. The upper three SPFP values are passed through from A. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpeq_ss (__m128 __A, __m128 __B)
+{
+ static const __vector unsigned int mask =
+ { 0xffffffff, 0, 0, 0 };
+ __v4sf a, b, c;
+ /* PowerISA VMX does not allow partial (for just element 0)
+ * results. So to insure we don't generate spurious exceptions
+ * (from the upper elements) we splat the lower float
+ * before we to the operation. */
+ a = vec_splat ((__v4sf) __A, 0);
+ b = vec_splat ((__v4sf) __B, 0);
+ c = (__v4sf) vec_cmpeq(a, b);
+ /* Then we merge the lower float result with the original upper
+ * float elements from __A. */
+ return ((__m128)vec_sel ((__v4sf)__A, c, mask));
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmplt_ss (__m128 __A, __m128 __B)
+{
+ static const __vector unsigned int mask =
+ { 0xffffffff, 0, 0, 0 };
+ __v4sf a, b, c;
+ /* PowerISA VMX does not allow partial (for just element 0)
+ * results. So to insure we don't generate spurious exceptions
+ * (from the upper elements) we splat the lower float
+ * before we to the operation. */
+ a = vec_splat ((__v4sf) __A, 0);
+ b = vec_splat ((__v4sf) __B, 0);
+ c = (__v4sf) vec_cmplt(a, b);
+ /* Then we merge the lower float result with the original upper
+ * float elements from __A. */
+ return ((__m128)vec_sel ((__v4sf)__A, c, mask));
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmple_ss (__m128 __A, __m128 __B)
+{
+ static const __vector unsigned int mask =
+ { 0xffffffff, 0, 0, 0 };
+ __v4sf a, b, c;
+ /* PowerISA VMX does not allow partial (for just element 0)
+ * results. So to insure we don't generate spurious exceptions
+ * (from the upper elements) we splat the lower float
+ * before we to the operation. */
+ a = vec_splat ((__v4sf) __A, 0);
+ b = vec_splat ((__v4sf) __B, 0);
+ c = (__v4sf) vec_cmple(a, b);
+ /* Then we merge the lower float result with the original upper
+ * float elements from __A. */
+ return ((__m128)vec_sel ((__v4sf)__A, c, mask));
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpgt_ss (__m128 __A, __m128 __B)
+{
+ static const __vector unsigned int mask =
+ { 0xffffffff, 0, 0, 0 };
+ __v4sf a, b, c;
+ /* PowerISA VMX does not allow partial (for just element 0)
+ * results. So to insure we don't generate spurious exceptions
+ * (from the upper elements) we splat the lower float
+ * before we to the operation. */
+ a = vec_splat ((__v4sf) __A, 0);
+ b = vec_splat ((__v4sf) __B, 0);
+ c = (__v4sf) vec_cmpgt(a, b);
+ /* Then we merge the lower float result with the original upper
+ * float elements from __A. */
+ return ((__m128)vec_sel ((__v4sf)__A, c, mask));
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpge_ss (__m128 __A, __m128 __B)
+{
+ static const __vector unsigned int mask =
+ { 0xffffffff, 0, 0, 0 };
+ __v4sf a, b, c;
+ /* PowerISA VMX does not allow partial (for just element 0)
+ * results. So to insure we don't generate spurious exceptions
+ * (from the upper elements) we splat the lower float
+ * before we to the operation. */
+ a = vec_splat ((__v4sf) __A, 0);
+ b = vec_splat ((__v4sf) __B, 0);
+ c = (__v4sf) vec_cmpge(a, b);
+ /* Then we merge the lower float result with the original upper
+ * float elements from __A. */
+ return ((__m128)vec_sel ((__v4sf)__A, c, mask));
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpneq_ss (__m128 __A, __m128 __B)
+{
+ static const __vector unsigned int mask =
+ { 0xffffffff, 0, 0, 0 };
+ __v4sf a, b, c;
+ /* PowerISA VMX does not allow partial (for just element 0)
+ * results. So to insure we don't generate spurious exceptions
+ * (from the upper elements) we splat the lower float
+ * before we to the operation. */
+ a = vec_splat ((__v4sf) __A, 0);
+ b = vec_splat ((__v4sf) __B, 0);
+ c = (__v4sf) vec_cmpeq(a, b);
+ c = vec_nor (c, c);
+ /* Then we merge the lower float result with the original upper
+ * float elements from __A. */
+ return ((__m128)vec_sel ((__v4sf)__A, c, mask));
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpnlt_ss (__m128 __A, __m128 __B)
+{
+ static const __vector unsigned int mask =
+ { 0xffffffff, 0, 0, 0 };
+ __v4sf a, b, c;
+ /* PowerISA VMX does not allow partial (for just element 0)
+ * results. So to insure we don't generate spurious exceptions
+ * (from the upper elements) we splat the lower float
+ * before we to the operation. */
+ a = vec_splat ((__v4sf) __A, 0);
+ b = vec_splat ((__v4sf) __B, 0);
+ c = (__v4sf) vec_cmpge(a, b);
+ /* Then we merge the lower float result with the original upper
+ * float elements from __A. */
+ return ((__m128)vec_sel ((__v4sf)__A, c, mask));
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpnle_ss (__m128 __A, __m128 __B)
+{
+ static const __vector unsigned int mask =
+ { 0xffffffff, 0, 0, 0 };
+ __v4sf a, b, c;
+ /* PowerISA VMX does not allow partial (for just element 0)
+ * results. So to insure we don't generate spurious exceptions
+ * (from the upper elements) we splat the lower float
+ * before we to the operation. */
+ a = vec_splat ((__v4sf) __A, 0);
+ b = vec_splat ((__v4sf) __B, 0);
+ c = (__v4sf) vec_cmpgt(a, b);
+ /* Then we merge the lower float result with the original upper
+ * float elements from __A. */
+ return ((__m128)vec_sel ((__v4sf)__A, c, mask));
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpngt_ss (__m128 __A, __m128 __B)
+{
+ static const __vector unsigned int mask =
+ { 0xffffffff, 0, 0, 0 };
+ __v4sf a, b, c;
+ /* PowerISA VMX does not allow partial (for just element 0)
+ * results. So to insure we don't generate spurious exceptions
+ * (from the upper elements) we splat the lower float
+ * before we to the operation. */
+ a = vec_splat ((__v4sf) __A, 0);
+ b = vec_splat ((__v4sf) __B, 0);
+ c = (__v4sf) vec_cmple(a, b);
+ /* Then we merge the lower float result with the original upper
+ * float elements from __A. */
+ return ((__m128)vec_sel ((__v4sf)__A, c, mask));
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpnge_ss (__m128 __A, __m128 __B)
+{
+ static const __vector unsigned int mask =
+ { 0xffffffff, 0, 0, 0 };
+ __v4sf a, b, c;
+ /* PowerISA VMX does not allow partial (for just element 0)
+ * results. So to insure we don't generate spurious exceptions
+ * (from the upper elements) we splat the lower float
+ * before we do the operation. */
+ a = vec_splat ((__v4sf) __A, 0);
+ b = vec_splat ((__v4sf) __B, 0);
+ c = (__v4sf) vec_cmplt(a, b);
+ /* Then we merge the lower float result with the original upper
+ * float elements from __A. */
+ return ((__m128)vec_sel ((__v4sf)__A, c, mask));
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpord_ss (__m128 __A, __m128 __B)
+{
+ __vector unsigned int a, b;
+ __vector unsigned int c, d;
+ static const __vector unsigned int float_exp_mask =
+ { 0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000 };
+ static const __vector unsigned int mask =
+ { 0xffffffff, 0, 0, 0 };
+
+ a = (__vector unsigned int) vec_abs ((__v4sf)__A);
+ b = (__vector unsigned int) vec_abs ((__v4sf)__B);
+ c = (__vector unsigned int) vec_cmpgt (float_exp_mask, a);
+ d = (__vector unsigned int) vec_cmpgt (float_exp_mask, b);
+ c = vec_and (c, d);
+ /* Then we merge the lower float result with the original upper
+ * float elements from __A. */
+ return ((__m128)vec_sel ((__v4sf)__A, (__v4sf)c, mask));
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpunord_ss (__m128 __A, __m128 __B)
+{
+ __vector unsigned int a, b;
+ __vector unsigned int c, d;
+ static const __vector unsigned int float_exp_mask =
+ { 0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000 };
+ static const __vector unsigned int mask =
+ { 0xffffffff, 0, 0, 0 };
+
+ a = (__vector unsigned int) vec_abs ((__v4sf)__A);
+ b = (__vector unsigned int) vec_abs ((__v4sf)__B);
+ c = (__vector unsigned int) vec_cmpgt (a, float_exp_mask);
+ d = (__vector unsigned int) vec_cmpgt (b, float_exp_mask);
+ c = vec_or (c, d);
+ /* Then we merge the lower float result with the original upper
+ * float elements from __A. */
+ return ((__m128)vec_sel ((__v4sf)__A, (__v4sf)c, mask));
+}
+
+/* Compare the lower SPFP values of A and B and return 1 if true
+ and 0 if false. */
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_comieq_ss (__m128 __A, __m128 __B)
+{
+ return (__A[0] == __B[0]);
+}
+
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_comilt_ss (__m128 __A, __m128 __B)
+{
+ return (__A[0] < __B[0]);
+}
+
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_comile_ss (__m128 __A, __m128 __B)
+{
+ return (__A[0] <= __B[0]);
+}
+
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_comigt_ss (__m128 __A, __m128 __B)
+{
+ return (__A[0] > __B[0]);
+}
+
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_comige_ss (__m128 __A, __m128 __B)
+{
+ return (__A[0] >= __B[0]);
+}
+
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_comineq_ss (__m128 __A, __m128 __B)
+{
+ return (__A[0] != __B[0]);
+}
+
+/* FIXME
+ * The __mm_ucomi??_ss implementations below are exactly the same as
+ * __mm_comi??_ss because GCC for PowerPC only generates unordered
+ * compares (scalar and vector).
+ * Technically __mm_comieq_ss et al should be using the ordered
+ * compare and signal for QNaNs.
+ * The __mm_ucomieq_sd et all should be OK, as is.
+ */
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_ucomieq_ss (__m128 __A, __m128 __B)
+{
+ return (__A[0] == __B[0]);
+}
+
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_ucomilt_ss (__m128 __A, __m128 __B)
+{
+ return (__A[0] < __B[0]);
+}
+
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_ucomile_ss (__m128 __A, __m128 __B)
+{
+ return (__A[0] <= __B[0]);
+}
+
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_ucomigt_ss (__m128 __A, __m128 __B)
+{
+ return (__A[0] > __B[0]);
+}
+
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_ucomige_ss (__m128 __A, __m128 __B)
+{
+ return (__A[0] >= __B[0]);
+}
+
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_ucomineq_ss (__m128 __A, __m128 __B)
+{
+ return (__A[0] != __B[0]);
+}
+
+extern __inline float __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtss_f32 (__m128 __A)
+{
+ return ((__v4sf)__A)[0];
+}
+
+/* Convert the lower SPFP value to a 32-bit integer according to the current
+ rounding mode. */
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtss_si32 (__m128 __A)
+{
+ __m64 res = 0;
+#ifdef _ARCH_PWR8
+ double dtmp;
+ __asm__(
+#ifdef __LITTLE_ENDIAN__
+ "xxsldwi %x0,%x0,%x0,3;\n"
+#endif
+ "xscvspdp %x2,%x0;\n"
+ "fctiw %2,%2;\n"
+ "mfvsrd %1,%x2;\n"
+ : "+wa" (__A),
+ "=r" (res),
+ "=f" (dtmp)
+ : );
+#else
+ res = __builtin_rint(__A[0]);
+#endif
+ return (res);
+}
+
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvt_ss2si (__m128 __A)
+{
+ return _mm_cvtss_si32 (__A);
+}
+
+/* Convert the lower SPFP value to a 32-bit integer according to the
+ current rounding mode. */
+
+/* Intel intrinsic. */
+extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtss_si64 (__m128 __A)
+{
+ __m64 res = 0;
+#ifdef _ARCH_PWR8
+ double dtmp;
+ __asm__(
+#ifdef __LITTLE_ENDIAN__
+ "xxsldwi %x0,%x0,%x0,3;\n"
+#endif
+ "xscvspdp %x2,%x0;\n"
+ "fctid %2,%2;\n"
+ "mfvsrd %1,%x2;\n"
+ : "+wa" (__A),
+ "=r" (res),
+ "=f" (dtmp)
+ : );
+#else
+ res = __builtin_llrint(__A[0]);
+#endif
+ return (res);
+}
+
+/* Microsoft intrinsic. */
+extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtss_si64x (__m128 __A)
+{
+ return _mm_cvtss_si64 ((__v4sf) __A);
+}
+
+/* Constants for use with _mm_prefetch. */
+enum _mm_hint
+{
+ /* _MM_HINT_ET is _MM_HINT_T with set 3rd bit. */
+ _MM_HINT_ET0 = 7,
+ _MM_HINT_ET1 = 6,
+ _MM_HINT_T0 = 3,
+ _MM_HINT_T1 = 2,
+ _MM_HINT_T2 = 1,
+ _MM_HINT_NTA = 0
+};
+
+/* Loads one cache line from address P to a location "closer" to the
+ processor. The selector I specifies the type of prefetch operation. */
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_prefetch (const void *__P, enum _mm_hint __I)
+{
+ /* Current PowerPC will ignores the hint parameters. */
+ __builtin_prefetch (__P);
+}
+
+/* Convert the two lower SPFP values to 32-bit integers according to the
+ current rounding mode. Return the integers in packed form. */
+extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtps_pi32 (__m128 __A)
+{
+ /* Splat two lower SPFP values to both halves. */
+ __v4sf temp, rounded;
+ __vector unsigned long long result;
+
+ /* Splat two lower SPFP values to both halves. */
+ temp = (__v4sf) vec_splat ((__vector long long)__A, 0);
+ rounded = vec_rint(temp);
+ result = (__vector unsigned long long) vec_cts (rounded, 0);
+
+ return (__m64) ((__vector long long) result)[0];
+}
+
+extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvt_ps2pi (__m128 __A)
+{
+ return _mm_cvtps_pi32 (__A);
+}
+
+/* Truncate the lower SPFP value to a 32-bit integer. */
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvttss_si32 (__m128 __A)
+{
+ /* Extract the lower float element. */
+ float temp = __A[0];
+ /* truncate to 32-bit integer and return. */
+ return temp;
+}
+
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtt_ss2si (__m128 __A)
+{
+ return _mm_cvttss_si32 (__A);
+}
+
+/* Intel intrinsic. */
+extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvttss_si64 (__m128 __A)
+{
+ /* Extract the lower float element. */
+ float temp = __A[0];
+ /* truncate to 32-bit integer and return. */
+ return temp;
+}
+
+/* Microsoft intrinsic. */
+extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvttss_si64x (__m128 __A)
+{
+ /* Extract the lower float element. */
+ float temp = __A[0];
+ /* truncate to 32-bit integer and return. */
+ return temp;
+}
+
+/* Truncate the two lower SPFP values to 32-bit integers. Return the
+ integers in packed form. */
+extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvttps_pi32 (__m128 __A)
+{
+ __v4sf temp;
+ __vector unsigned long long result;
+
+ /* Splat two lower SPFP values to both halves. */
+ temp = (__v4sf) vec_splat ((__vector long long)__A, 0);
+ result = (__vector unsigned long long) vec_cts (temp, 0);
+
+ return (__m64) ((__vector long long) result)[0];
+}
+
+extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtt_ps2pi (__m128 __A)
+{
+ return _mm_cvttps_pi32 (__A);
+}
+
+/* Convert B to a SPFP value and insert it as element zero in A. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtsi32_ss (__m128 __A, int __B)
+{
+ float temp = __B;
+ __A[0] = temp;
+
+ return __A;
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvt_si2ss (__m128 __A, int __B)
+{
+ return _mm_cvtsi32_ss (__A, __B);
+}
+
+/* Convert B to a SPFP value and insert it as element zero in A. */
+/* Intel intrinsic. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtsi64_ss (__m128 __A, long long __B)
+{
+ float temp = __B;
+ __A[0] = temp;
+
+ return __A;
+}
+
+/* Microsoft intrinsic. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtsi64x_ss (__m128 __A, long long __B)
+{
+ return _mm_cvtsi64_ss (__A, __B);
+}
+
+/* Convert the two 32-bit values in B to SPFP form and insert them
+ as the two lower elements in A. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtpi32_ps (__m128 __A, __m64 __B)
+{
+ __vector signed int vm1;
+ __vector float vf1;
+
+ vm1 = (__vector signed int) (__vector unsigned long long) {__B, __B};
+ vf1 = (__vector float) vec_ctf (vm1, 0);
+
+ return ((__m128) (__vector unsigned long long)
+ { ((__vector unsigned long long)vf1) [0],
+ ((__vector unsigned long long)__A) [1]});
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvt_pi2ps (__m128 __A, __m64 __B)
+{
+ return _mm_cvtpi32_ps (__A, __B);
+}
+
+/* Convert the four signed 16-bit values in A to SPFP form. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtpi16_ps (__m64 __A)
+{
+ __vector signed short vs8;
+ __vector signed int vi4;
+ __vector float vf1;
+
+ vs8 = (__vector signed short) (__vector unsigned long long) { __A, __A };
+ vi4 = vec_vupklsh (vs8);
+ vf1 = (__vector float) vec_ctf (vi4, 0);
+
+ return (__m128) vf1;
+}
+
+/* Convert the four unsigned 16-bit values in A to SPFP form. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtpu16_ps (__m64 __A)
+{
+ const __vector unsigned short zero =
+ { 0, 0, 0, 0, 0, 0, 0, 0 };
+ __vector unsigned short vs8;
+ __vector unsigned int vi4;
+ __vector float vf1;
+
+ vs8 = (__vector unsigned short) (__vector unsigned long long) { __A, __A };
+ vi4 = (__vector unsigned int) vec_mergel
+#ifdef __LITTLE_ENDIAN__
+ (vs8, zero);
+#else
+ (zero, vs8);
+#endif
+ vf1 = (__vector float) vec_ctf (vi4, 0);
+
+ return (__m128) vf1;
+}
+
+/* Convert the low four signed 8-bit values in A to SPFP form. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtpi8_ps (__m64 __A)
+{
+ __vector signed char vc16;
+ __vector signed short vs8;
+ __vector signed int vi4;
+ __vector float vf1;
+
+ vc16 = (__vector signed char) (__vector unsigned long long) { __A, __A };
+ vs8 = vec_vupkhsb (vc16);
+ vi4 = vec_vupkhsh (vs8);
+ vf1 = (__vector float) vec_ctf (vi4, 0);
+
+ return (__m128) vf1;
+}
+
+/* Convert the low four unsigned 8-bit values in A to SPFP form. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+
+_mm_cvtpu8_ps (__m64 __A)
+{
+ const __vector unsigned char zero =
+ { 0, 0, 0, 0, 0, 0, 0, 0 };
+ __vector unsigned char vc16;
+ __vector unsigned short vs8;
+ __vector unsigned int vi4;
+ __vector float vf1;
+
+ vc16 = (__vector unsigned char) (__vector unsigned long long) { __A, __A };
+#ifdef __LITTLE_ENDIAN__
+ vs8 = (__vector unsigned short) vec_mergel (vc16, zero);
+ vi4 = (__vector unsigned int) vec_mergeh (vs8,
+ (__vector unsigned short) zero);
+#else
+ vs8 = (__vector unsigned short) vec_mergel (zero, vc16);
+ vi4 = (__vector unsigned int) vec_mergeh ((__vector unsigned short) zero,
+ vs8);
+#endif
+ vf1 = (__vector float) vec_ctf (vi4, 0);
+
+ return (__m128) vf1;
+}
+
+/* Convert the four signed 32-bit values in A and B to SPFP form. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtpi32x2_ps (__m64 __A, __m64 __B)
+{
+ __vector signed int vi4;
+ __vector float vf4;
+
+ vi4 = (__vector signed int) (__vector unsigned long long) { __A, __B };
+ vf4 = (__vector float) vec_ctf (vi4, 0);
+ return (__m128) vf4;
+}
+
+/* Convert the four SPFP values in A to four signed 16-bit integers. */
+extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtps_pi16 (__m128 __A)
+{
+ __v4sf rounded;
+ __vector signed int temp;
+ __vector unsigned long long result;
+
+ rounded = vec_rint(__A);
+ temp = vec_cts (rounded, 0);
+ result = (__vector unsigned long long) vec_pack (temp, temp);
+
+ return (__m64) ((__vector long long) result)[0];
+}
+
+/* Convert the four SPFP values in A to four signed 8-bit integers. */
+extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtps_pi8 (__m128 __A)
+{
+ __v4sf rounded;
+ __vector signed int tmp_i;
+ static const __vector signed int zero = {0, 0, 0, 0};
+ __vector signed short tmp_s;
+ __vector signed char res_v;
+
+ rounded = vec_rint(__A);
+ tmp_i = vec_cts (rounded, 0);
+ tmp_s = vec_pack (tmp_i, zero);
+ res_v = vec_pack (tmp_s, tmp_s);
+ return (__m64) ((__vector long long) res_v)[0];
+}
+
+/* Selects four specific SPFP values from A and B based on MASK. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+
+_mm_shuffle_ps (__m128 __A, __m128 __B, int const __mask)
+{
+ unsigned long element_selector_10 = __mask & 0x03;
+ unsigned long element_selector_32 = (__mask >> 2) & 0x03;
+ unsigned long element_selector_54 = (__mask >> 4) & 0x03;
+ unsigned long element_selector_76 = (__mask >> 6) & 0x03;
+ static const unsigned int permute_selectors[4] =
+ {
+#ifdef __LITTLE_ENDIAN__
+ 0x03020100, 0x07060504, 0x0B0A0908, 0x0F0E0D0C
+#else
+ 0x00010203, 0x04050607, 0x08090A0B, 0x0C0D0E0F
+#endif
+ };
+ __vector unsigned int t;
+
+ t[0] = permute_selectors[element_selector_10];
+ t[1] = permute_selectors[element_selector_32];
+ t[2] = permute_selectors[element_selector_54] + 0x10101010;
+ t[3] = permute_selectors[element_selector_76] + 0x10101010;
+ return vec_perm ((__v4sf) __A, (__v4sf)__B, (__vector unsigned char)t);
+}
+
+/* Selects and interleaves the upper two SPFP values from A and B. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_unpackhi_ps (__m128 __A, __m128 __B)
+{
+ return (__m128) vec_vmrglw ((__v4sf) __A, (__v4sf)__B);
+}
+
+/* Selects and interleaves the lower two SPFP values from A and B. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_unpacklo_ps (__m128 __A, __m128 __B)
+{
+ return (__m128) vec_vmrghw ((__v4sf) __A, (__v4sf)__B);
+}
+
+/* Sets the upper two SPFP values with 64-bits of data loaded from P;
+ the lower two values are passed through from A. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_loadh_pi (__m128 __A, __m64 const *__P)
+{
+ __vector unsigned long long __a = (__vector unsigned long long)__A;
+ __vector unsigned long long __p = vec_splats(*__P);
+ __a [1] = __p [1];
+
+ return (__m128)__a;
+}
+
+/* Stores the upper two SPFP values of A into P. */
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_storeh_pi (__m64 *__P, __m128 __A)
+{
+ __vector unsigned long long __a = (__vector unsigned long long) __A;
+
+ *__P = __a[1];
+}
+
+/* Moves the upper two values of B into the lower two values of A. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_movehl_ps (__m128 __A, __m128 __B)
+{
+ return (__m128) vec_mergel ((__vector unsigned long long)__B,
+ (__vector unsigned long long)__A);
+}
+
+/* Moves the lower two values of B into the upper two values of A. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_movelh_ps (__m128 __A, __m128 __B)
+{
+ return (__m128) vec_mergeh ((__vector unsigned long long)__A,
+ (__vector unsigned long long)__B);
+}
+
+/* Sets the lower two SPFP values with 64-bits of data loaded from P;
+ the upper two values are passed through from A. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_loadl_pi (__m128 __A, __m64 const *__P)
+{
+ __vector unsigned long long __a = (__vector unsigned long long)__A;
+ __vector unsigned long long __p = vec_splats(*__P);
+ __a [0] = __p [0];
+
+ return (__m128)__a;
+}
+
+/* Stores the lower two SPFP values of A into P. */
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_storel_pi (__m64 *__P, __m128 __A)
+{
+ __vector unsigned long long __a = (__vector unsigned long long) __A;
+
+ *__P = __a[0];
+}
+
+#ifdef _ARCH_PWR8
+/* Intrinsic functions that require PowerISA 2.07 minimum. */
+
+/* Creates a 4-bit mask from the most significant bits of the SPFP values. */
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_movemask_ps (__m128 __A)
+{
+ __vector unsigned long long result;
+ static const __vector unsigned int perm_mask =
+ {
+#ifdef __LITTLE_ENDIAN__
+ 0x00204060, 0x80808080, 0x80808080, 0x80808080
+#else
+ 0x80808080, 0x80808080, 0x80808080, 0x00204060
+#endif
+ };
+
+ result = ((__vector unsigned long long)
+ vec_vbpermq ((__vector unsigned char) __A,
+ (__vector unsigned char) perm_mask));
+
+#ifdef __LITTLE_ENDIAN__
+ return result[1];
+#else
+ return result[0];
+#endif
+}
+#endif /* _ARCH_PWR8 */
+
+/* Create a vector with all four elements equal to *P. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_load1_ps (float const *__P)
+{
+ return _mm_set1_ps (*__P);
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_load_ps1 (float const *__P)
+{
+ return _mm_load1_ps (__P);
+}
+
+/* Extracts one of the four words of A. The selector N must be immediate. */
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_extract_pi16 (__m64 const __A, int const __N)
+{
+ unsigned int shiftr = __N & 3;
+#ifdef __BIG_ENDIAN__
+ shiftr = 3 - shiftr;
+#endif
+
+ return ((__A >> (shiftr * 16)) & 0xffff);
+}
+
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_m_pextrw (__m64 const __A, int const __N)
+{
+ return _mm_extract_pi16 (__A, __N);
+}
+
+/* Inserts word D into one of four words of A. The selector N must be
+ immediate. */
+extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_insert_pi16 (__m64 const __A, int const __D, int const __N)
+{
+ const int shiftl = (__N & 3) * 16;
+ const __m64 shiftD = (const __m64) __D << shiftl;
+ const __m64 mask = 0xffffUL << shiftl;
+ __m64 result = (__A & (~mask)) | (shiftD & mask);
+
+ return (result);
+}
+
+extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_m_pinsrw (__m64 const __A, int const __D, int const __N)
+{
+ return _mm_insert_pi16 (__A, __D, __N);
+}
+
+/* Compute the element-wise maximum of signed 16-bit values. */
+extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+
+_mm_max_pi16 (__m64 __A, __m64 __B)
+{
+#if _ARCH_PWR8
+ __vector signed short a, b, r;
+ __vector __bool short c;
+
+ a = (__vector signed short)vec_splats (__A);
+ b = (__vector signed short)vec_splats (__B);
+ c = (__vector __bool short)vec_cmpgt (a, b);
+ r = vec_sel (b, a, c);
+ return (__m64) ((__vector long long) r)[0];
+#else
+ __m64_union m1, m2, res;
+
+ m1.as_m64 = __A;
+ m2.as_m64 = __B;
+
+ res.as_short[0] =
+ (m1.as_short[0] > m2.as_short[0]) ? m1.as_short[0] : m2.as_short[0];
+ res.as_short[1] =
+ (m1.as_short[1] > m2.as_short[1]) ? m1.as_short[1] : m2.as_short[1];
+ res.as_short[2] =
+ (m1.as_short[2] > m2.as_short[2]) ? m1.as_short[2] : m2.as_short[2];
+ res.as_short[3] =
+ (m1.as_short[3] > m2.as_short[3]) ? m1.as_short[3] : m2.as_short[3];
+
+ return (__m64) res.as_m64;
+#endif
+}
+
+extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_m_pmaxsw (__m64 __A, __m64 __B)
+{
+ return _mm_max_pi16 (__A, __B);
+}
+
+/* Compute the element-wise maximum of unsigned 8-bit values. */
+extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_max_pu8 (__m64 __A, __m64 __B)
+{
+#if _ARCH_PWR8
+ __vector unsigned char a, b, r;
+ __vector __bool char c;
+
+ a = (__vector unsigned char)vec_splats (__A);
+ b = (__vector unsigned char)vec_splats (__B);
+ c = (__vector __bool char)vec_cmpgt (a, b);
+ r = vec_sel (b, a, c);
+ return (__m64) ((__vector long long) r)[0];
+#else
+ __m64_union m1, m2, res;
+ long i;
+
+ m1.as_m64 = __A;
+ m2.as_m64 = __B;
+
+
+ for (i = 0; i < 8; i++)
+ res.as_char[i] =
+ ((unsigned char) m1.as_char[i] > (unsigned char) m2.as_char[i]) ?
+ m1.as_char[i] : m2.as_char[i];
+
+ return (__m64) res.as_m64;
+#endif
+}
+
+extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_m_pmaxub (__m64 __A, __m64 __B)
+{
+ return _mm_max_pu8 (__A, __B);
+}
+
+/* Compute the element-wise minimum of signed 16-bit values. */
+extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_min_pi16 (__m64 __A, __m64 __B)
+{
+#if _ARCH_PWR8
+ __vector signed short a, b, r;
+ __vector __bool short c;
+
+ a = (__vector signed short)vec_splats (__A);
+ b = (__vector signed short)vec_splats (__B);
+ c = (__vector __bool short)vec_cmplt (a, b);
+ r = vec_sel (b, a, c);
+ return (__m64) ((__vector long long) r)[0];
+#else
+ __m64_union m1, m2, res;
+
+ m1.as_m64 = __A;
+ m2.as_m64 = __B;
+
+ res.as_short[0] =
+ (m1.as_short[0] < m2.as_short[0]) ? m1.as_short[0] : m2.as_short[0];
+ res.as_short[1] =
+ (m1.as_short[1] < m2.as_short[1]) ? m1.as_short[1] : m2.as_short[1];
+ res.as_short[2] =
+ (m1.as_short[2] < m2.as_short[2]) ? m1.as_short[2] : m2.as_short[2];
+ res.as_short[3] =
+ (m1.as_short[3] < m2.as_short[3]) ? m1.as_short[3] : m2.as_short[3];
+
+ return (__m64) res.as_m64;
+#endif
+}
+
+extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_m_pminsw (__m64 __A, __m64 __B)
+{
+ return _mm_min_pi16 (__A, __B);
+}
+
+/* Compute the element-wise minimum of unsigned 8-bit values. */
+extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_min_pu8 (__m64 __A, __m64 __B)
+{
+#if _ARCH_PWR8
+ __vector unsigned char a, b, r;
+ __vector __bool char c;
+
+ a = (__vector unsigned char)vec_splats (__A);
+ b = (__vector unsigned char)vec_splats (__B);
+ c = (__vector __bool char)vec_cmplt (a, b);
+ r = vec_sel (b, a, c);
+ return (__m64) ((__vector long long) r)[0];
+#else
+ __m64_union m1, m2, res;
+ long i;
+
+ m1.as_m64 = __A;
+ m2.as_m64 = __B;
+
+
+ for (i = 0; i < 8; i++)
+ res.as_char[i] =
+ ((unsigned char) m1.as_char[i] < (unsigned char) m2.as_char[i]) ?
+ m1.as_char[i] : m2.as_char[i];
+
+ return (__m64) res.as_m64;
+#endif
+}
+
+extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_m_pminub (__m64 __A, __m64 __B)
+{
+ return _mm_min_pu8 (__A, __B);
+}
+
+/* Create an 8-bit mask of the signs of 8-bit values. */
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_movemask_pi8 (__m64 __A)
+{
+ unsigned long long p =
+#ifdef __LITTLE_ENDIAN__
+ 0x0008101820283038UL; // permute control for sign bits
+#else
+ 0x3830282018100800UL; // permute control for sign bits
+#endif
+ return __builtin_bpermd (p, __A);
+}
+
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_m_pmovmskb (__m64 __A)
+{
+ return _mm_movemask_pi8 (__A);
+}
+
+/* Multiply four unsigned 16-bit values in A by four unsigned 16-bit values
+ in B and produce the high 16 bits of the 32-bit results. */
+extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mulhi_pu16 (__m64 __A, __m64 __B)
+{
+ __vector unsigned short a, b;
+ __vector unsigned short c;
+ __vector unsigned int w0, w1;
+ __vector unsigned char xform1 = {
+#ifdef __LITTLE_ENDIAN__
+ 0x02, 0x03, 0x12, 0x13, 0x06, 0x07, 0x16, 0x17,
+ 0x0A, 0x0B, 0x1A, 0x1B, 0x0E, 0x0F, 0x1E, 0x1F
+#else
+ 0x00, 0x01, 0x10, 0x11, 0x04, 0x05, 0x14, 0x15,
+ 0x00, 0x01, 0x10, 0x11, 0x04, 0x05, 0x14, 0x15
+#endif
+ };
+
+ a = (__vector unsigned short)vec_splats (__A);
+ b = (__vector unsigned short)vec_splats (__B);
+
+ w0 = vec_vmuleuh (a, b);
+ w1 = vec_vmulouh (a, b);
+ c = (__vector unsigned short)vec_perm (w0, w1, xform1);
+
+ return (__m64) ((__vector long long) c)[0];
+}
+
+extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_m_pmulhuw (__m64 __A, __m64 __B)
+{
+ return _mm_mulhi_pu16 (__A, __B);
+}
+
+/* Return a combination of the four 16-bit values in A. The selector
+ must be an immediate. */
+extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_shuffle_pi16 (__m64 __A, int const __N)
+{
+ unsigned long element_selector_10 = __N & 0x03;
+ unsigned long element_selector_32 = (__N >> 2) & 0x03;
+ unsigned long element_selector_54 = (__N >> 4) & 0x03;
+ unsigned long element_selector_76 = (__N >> 6) & 0x03;
+ static const unsigned short permute_selectors[4] =
+ {
+#ifdef __LITTLE_ENDIAN__
+ 0x0908, 0x0B0A, 0x0D0C, 0x0F0E
+#else
+ 0x0607, 0x0405, 0x0203, 0x0001
+#endif
+ };
+ __m64_union t;
+ __vector unsigned long long a, p, r;
+
+#ifdef __LITTLE_ENDIAN__
+ t.as_short[0] = permute_selectors[element_selector_10];
+ t.as_short[1] = permute_selectors[element_selector_32];
+ t.as_short[2] = permute_selectors[element_selector_54];
+ t.as_short[3] = permute_selectors[element_selector_76];
+#else
+ t.as_short[3] = permute_selectors[element_selector_10];
+ t.as_short[2] = permute_selectors[element_selector_32];
+ t.as_short[1] = permute_selectors[element_selector_54];
+ t.as_short[0] = permute_selectors[element_selector_76];
+#endif
+ p = vec_splats (t.as_m64);
+ a = vec_splats (__A);
+ r = vec_perm (a, a, (__vector unsigned char)p);
+ return (__m64) ((__vector long long) r)[0];
+}
+
+extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_m_pshufw (__m64 __A, int const __N)
+{
+ return _mm_shuffle_pi16 (__A, __N);
+}
+
+/* Conditionally store byte elements of A into P. The high bit of each
+ byte in the selector N determines whether the corresponding byte from
+ A is stored. */
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskmove_si64 (__m64 __A, __m64 __N, char *__P)
+{
+ __m64 hibit = 0x8080808080808080UL;
+ __m64 mask, tmp;
+ __m64 *p = (__m64*)__P;
+
+ tmp = *p;
+ mask = _mm_cmpeq_pi8 ((__N & hibit), hibit);
+ tmp = (tmp & (~mask)) | (__A & mask);
+ *p = tmp;
+}
+
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_m_maskmovq (__m64 __A, __m64 __N, char *__P)
+{
+ _mm_maskmove_si64 (__A, __N, __P);
+}
+
+/* Compute the rounded averages of the unsigned 8-bit values in A and B. */
+extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_avg_pu8 (__m64 __A, __m64 __B)
+{
+ __vector unsigned char a, b, c;
+
+ a = (__vector unsigned char)vec_splats (__A);
+ b = (__vector unsigned char)vec_splats (__B);
+ c = vec_avg (a, b);
+ return (__m64) ((__vector long long) c)[0];
+}
+
+extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_m_pavgb (__m64 __A, __m64 __B)
+{
+ return _mm_avg_pu8 (__A, __B);
+}
+
+/* Compute the rounded averages of the unsigned 16-bit values in A and B. */
+extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_avg_pu16 (__m64 __A, __m64 __B)
+{
+ __vector unsigned short a, b, c;
+
+ a = (__vector unsigned short)vec_splats (__A);
+ b = (__vector unsigned short)vec_splats (__B);
+ c = vec_avg (a, b);
+ return (__m64) ((__vector long long) c)[0];
+}
+
+extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_m_pavgw (__m64 __A, __m64 __B)
+{
+ return _mm_avg_pu16 (__A, __B);
+}
+
+/* Compute the sum of the absolute differences of the unsigned 8-bit
+ values in A and B. Return the value in the lower 16-bit word; the
+ upper words are cleared. */
+extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_sad_pu8 (__m64 __A, __m64 __B)
+{
+ __vector unsigned char a, b;
+ __vector unsigned char vmin, vmax, vabsdiff;
+ __vector signed int vsum;
+ const __vector unsigned int zero =
+ { 0, 0, 0, 0 };
+ __m64_union result = {0};
+
+ a = (__vector unsigned char) (__vector unsigned long long) { 0UL, __A };
+ b = (__vector unsigned char) (__vector unsigned long long) { 0UL, __B };
+ vmin = vec_min (a, b);
+ vmax = vec_max (a, b);
+ vabsdiff = vec_sub (vmax, vmin);
+ /* Sum four groups of bytes into integers. */
+ vsum = (__vector signed int) vec_sum4s (vabsdiff, zero);
+ /* Sum across four integers with integer result. */
+ vsum = vec_sums (vsum, (__vector signed int) zero);
+ /* The sum is in the right most 32-bits of the vector result.
+ Transfer to a GPR and truncate to 16 bits. */
+ result.as_short[0] = vsum[3];
+ return result.as_m64;
+}
+
+extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_m_psadbw (__m64 __A, __m64 __B)
+{
+ return _mm_sad_pu8 (__A, __B);
+}
+
+/* Stores the data in A to the address P without polluting the caches. */
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_stream_pi (__m64 *__P, __m64 __A)
+{
+ /* Use the data cache block touch for store transient. */
+ __asm__ (
+ " dcbtstt 0,%0"
+ :
+ : "b" (__P)
+ : "memory"
+ );
+ *__P = __A;
+}
+
+/* Likewise. The address must be 16-byte aligned. */
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_stream_ps (float *__P, __m128 __A)
+{
+ /* Use the data cache block touch for store transient. */
+ __asm__ (
+ " dcbtstt 0,%0"
+ :
+ : "b" (__P)
+ : "memory"
+ );
+ _mm_store_ps (__P, __A);
+}
+
+/* Guarantees that every preceding store is globally visible before
+ any subsequent store. */
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_sfence (void)
+{
+ /* Generate a light weight sync. */
+ __atomic_thread_fence (__ATOMIC_RELEASE);
+}
+
+/* The execution of the next instruction is delayed by an implementation
+ specific amount of time. The instruction does not modify the
+ architectural state. This is after the pop_options pragma because
+ it does not require SSE support in the processor--the encoding is a
+ nop on processors that do not support it. */
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_pause (void)
+{
+ /* There is no exact match with this construct, but the following is
+ close to the desired effect. */
+#if _ARCH_PWR8
+ /* On power8 and later processors we can depend on Program Priority
+ (PRI) and associated "very low" PPI setting. Since we don't know
+ what PPI this thread is running at we: 1) save the current PRI
+ from the PPR SPR into a local GRP, 2) set the PRI to "very low*
+ via the special or 31,31,31 encoding. 3) issue an "isync" to
+ insure the PRI change takes effect before we execute any more
+ instructions.
+ Now we can execute a lwsync (release barrier) while we execute
+ this thread at "very low" PRI. Finally we restore the original
+ PRI and continue execution. */
+ unsigned long __PPR;
+
+ __asm__ volatile (
+ " mfppr %0;"
+ " or 31,31,31;"
+ " isync;"
+ " lwsync;"
+ " isync;"
+ " mtppr %0;"
+ : "=r" (__PPR)
+ :
+ : "memory"
+ );
+#else
+ /* For older processor where we may not even have Program Priority
+ controls we can only depend on Heavy Weight Sync. */
+ __atomic_thread_fence (__ATOMIC_SEQ_CST);
+#endif
+}
+
+/* Transpose the 4x4 matrix composed of row[0-3]. */
+#define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
+do { \
+ __v4sf __r0 = (row0), __r1 = (row1), __r2 = (row2), __r3 = (row3); \
+ __v4sf __t0 = vec_vmrghw (__r0, __r1); \
+ __v4sf __t1 = vec_vmrghw (__r2, __r3); \
+ __v4sf __t2 = vec_vmrglw (__r0, __r1); \
+ __v4sf __t3 = vec_vmrglw (__r2, __r3); \
+ (row0) = (__v4sf)vec_mergeh ((__vector long long)__t0, \
+ (__vector long long)__t1); \
+ (row1) = (__v4sf)vec_mergel ((__vector long long)__t0, \
+ (__vector long long)__t1); \
+ (row2) = (__v4sf)vec_mergeh ((__vector long long)__t2, \
+ (__vector long long)__t3); \
+ (row3) = (__v4sf)vec_mergel ((__vector long long)__t2, \
+ (__vector long long)__t3); \
+} while (0)
+
+/* For backward source compatibility. */
+//# include <emmintrin.h>
+
+#endif /* _XMMINTRIN_H_INCLUDED */
diff --git a/lib/Headers/prfchwintrin.h b/lib/Headers/prfchwintrin.h
index 70851396f48e..6e8a4ef2ec97 100644
--- a/lib/Headers/prfchwintrin.h
+++ b/lib/Headers/prfchwintrin.h
@@ -1,22 +1,8 @@
/*===---- prfchwintrin.h - PREFETCHW intrinsic -----------------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/ptwriteintrin.h b/lib/Headers/ptwriteintrin.h
index 1bb1df0a2edf..0a04f7c1dfc4 100644
--- a/lib/Headers/ptwriteintrin.h
+++ b/lib/Headers/ptwriteintrin.h
@@ -1,22 +1,8 @@
/*===------------ ptwriteintrin.h - PTWRITE intrinsic --------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/rdseedintrin.h b/lib/Headers/rdseedintrin.h
index 419466932cf5..ccb3d2dd2294 100644
--- a/lib/Headers/rdseedintrin.h
+++ b/lib/Headers/rdseedintrin.h
@@ -1,22 +1,8 @@
/*===---- rdseedintrin.h - RDSEED intrinsics -------------------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/rtmintrin.h b/lib/Headers/rtmintrin.h
index e6a58d743bc9..36ff5835173f 100644
--- a/lib/Headers/rtmintrin.h
+++ b/lib/Headers/rtmintrin.h
@@ -1,22 +1,8 @@
/*===---- rtmintrin.h - RTM intrinsics -------------------------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/s390intrin.h b/lib/Headers/s390intrin.h
index d51274c07df5..73a915c23319 100644
--- a/lib/Headers/s390intrin.h
+++ b/lib/Headers/s390intrin.h
@@ -1,22 +1,8 @@
/*===---- s390intrin.h - SystemZ intrinsics --------------------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/sgxintrin.h b/lib/Headers/sgxintrin.h
index 20aee766103c..303a21f6b2e9 100644
--- a/lib/Headers/sgxintrin.h
+++ b/lib/Headers/sgxintrin.h
@@ -1,22 +1,8 @@
/*===---- sgxintrin.h - X86 SGX intrinsics configuration -------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
@@ -28,6 +14,8 @@
#ifndef __SGXINTRIN_H
#define __SGXINTRIN_H
+#if __has_extension(gnu_asm)
+
/* Define the default attributes for the functions in this file. */
#define __DEFAULT_FN_ATTRS \
__attribute__((__always_inline__, __nodebug__, __target__("sgx")))
@@ -67,4 +55,6 @@ _enclv_u32(unsigned int __leaf, __SIZE_TYPE__ __d[])
#undef __DEFAULT_FN_ATTRS
+#endif /* __has_extension(gnu_asm) */
+
#endif
diff --git a/lib/Headers/shaintrin.h b/lib/Headers/shaintrin.h
index 3df4718ced4d..08b1fb1dc16a 100644
--- a/lib/Headers/shaintrin.h
+++ b/lib/Headers/shaintrin.h
@@ -1,22 +1,8 @@
/*===---- shaintrin.h - SHA intrinsics -------------------------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/smmintrin.h b/lib/Headers/smmintrin.h
index 4806b3e4e150..025830a74280 100644
--- a/lib/Headers/smmintrin.h
+++ b/lib/Headers/smmintrin.h
@@ -1,22 +1,8 @@
/*===---- smmintrin.h - SSE4 intrinsics ------------------------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/stdalign.h b/lib/Headers/stdalign.h
index 3738d1284f95..6ad25db4539a 100644
--- a/lib/Headers/stdalign.h
+++ b/lib/Headers/stdalign.h
@@ -1,22 +1,8 @@
/*===---- stdalign.h - Standard header for alignment ------------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/stdarg.h b/lib/Headers/stdarg.h
index 101426fff151..0bc39408c1e5 100644
--- a/lib/Headers/stdarg.h
+++ b/lib/Headers/stdarg.h
@@ -1,24 +1,8 @@
/*===---- stdarg.h - Variable argument handling ----------------------------===
*
- * Copyright (c) 2008 Eli Friedman
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/stdatomic.h b/lib/Headers/stdatomic.h
index b4845a74e494..665551ea69a4 100644
--- a/lib/Headers/stdatomic.h
+++ b/lib/Headers/stdatomic.h
@@ -1,22 +1,8 @@
/*===---- stdatomic.h - Standard header for atomic types and operations -----===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/stdbool.h b/lib/Headers/stdbool.h
index 5cb66b55d02a..2525363dd02a 100644
--- a/lib/Headers/stdbool.h
+++ b/lib/Headers/stdbool.h
@@ -1,24 +1,8 @@
/*===---- stdbool.h - Standard header for booleans -------------------------===
*
- * Copyright (c) 2008 Eli Friedman
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/stddef.h b/lib/Headers/stddef.h
index 735499671156..15acd4427ca1 100644
--- a/lib/Headers/stddef.h
+++ b/lib/Headers/stddef.h
@@ -1,24 +1,8 @@
/*===---- stddef.h - Basic type definitions --------------------------------===
*
- * Copyright (c) 2008 Eli Friedman
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/stdint.h b/lib/Headers/stdint.h
index 0afcca3a9daa..192f653e95a1 100644
--- a/lib/Headers/stdint.h
+++ b/lib/Headers/stdint.h
@@ -1,29 +1,18 @@
/*===---- stdint.h - Standard header for sized integer types --------------===*\
*
- * Copyright (c) 2009 Chris Lattner
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
\*===----------------------------------------------------------------------===*/
#ifndef __CLANG_STDINT_H
+// AIX system headers need stdint.h to be re-enterable while _STD_TYPES_T
+// is defined until an inclusion of it without _STD_TYPES_T occurs, in which
+// case the header guard macro is defined.
+#if !defined(_AIX) || !defined(_STD_TYPES_T) || !defined(__STDC_HOSTED__)
#define __CLANG_STDINT_H
+#endif
/* If we're hosted, fall back to the system's stdint.h, which might have
* additional definitions.
diff --git a/lib/Headers/stdnoreturn.h b/lib/Headers/stdnoreturn.h
index a7a301d7e0bd..e83cd8153752 100644
--- a/lib/Headers/stdnoreturn.h
+++ b/lib/Headers/stdnoreturn.h
@@ -1,22 +1,8 @@
/*===---- stdnoreturn.h - Standard header for noreturn macro ---------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/tbmintrin.h b/lib/Headers/tbmintrin.h
index 1d0d746a824a..f4e848a1c001 100644
--- a/lib/Headers/tbmintrin.h
+++ b/lib/Headers/tbmintrin.h
@@ -1,22 +1,8 @@
/*===---- tbmintrin.h - TBM intrinsics -------------------------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/tgmath.h b/lib/Headers/tgmath.h
index 34e26dcc05ec..7acf18b9dd35 100644
--- a/lib/Headers/tgmath.h
+++ b/lib/Headers/tgmath.h
@@ -1,24 +1,8 @@
/*===---- tgmath.h - Standard header for type generic math ----------------===*\
*
- * Copyright (c) 2009 Howard Hinnant
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
\*===----------------------------------------------------------------------===*/
diff --git a/lib/Headers/tmmintrin.h b/lib/Headers/tmmintrin.h
index 734cd391be60..35533e115c7d 100644
--- a/lib/Headers/tmmintrin.h
+++ b/lib/Headers/tmmintrin.h
@@ -1,22 +1,8 @@
/*===---- tmmintrin.h - SSSE3 intrinsics -----------------------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/unwind.h b/lib/Headers/unwind.h
index 0e8317e5b9d9..029524b7bc84 100644
--- a/lib/Headers/unwind.h
+++ b/lib/Headers/unwind.h
@@ -1,22 +1,8 @@
/*===---- unwind.h - Stack unwinding ----------------------------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
@@ -66,8 +52,8 @@ extern "C" {
#pragma GCC visibility push(default)
#endif
-typedef uintptr_t _Unwind_Word;
-typedef intptr_t _Unwind_Sword;
+typedef uintptr_t _Unwind_Word __attribute__((__mode__(__unwind_word__)));
+typedef intptr_t _Unwind_Sword __attribute__((__mode__(__unwind_word__)));
typedef uintptr_t _Unwind_Ptr;
typedef uintptr_t _Unwind_Internal_Ptr;
typedef uint64_t _Unwind_Exception_Class;
diff --git a/lib/Headers/vadefs.h b/lib/Headers/vadefs.h
index 7fe9a74e3f69..b6175684469c 100644
--- a/lib/Headers/vadefs.h
+++ b/lib/Headers/vadefs.h
@@ -1,22 +1,8 @@
/* ===-------- vadefs.h ---------------------------------------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/vaesintrin.h b/lib/Headers/vaesintrin.h
index e4174bb82ff0..c4d5c3e75140 100644
--- a/lib/Headers/vaesintrin.h
+++ b/lib/Headers/vaesintrin.h
@@ -1,23 +1,9 @@
/*===------------------ vaesintrin.h - VAES intrinsics ---------------------===
*
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/varargs.h b/lib/Headers/varargs.h
index b5477d0a6a48..d241b7de3cb2 100644
--- a/lib/Headers/varargs.h
+++ b/lib/Headers/varargs.h
@@ -1,22 +1,8 @@
/*===---- varargs.h - Variable argument handling -------------------------------------===
*
-* Permission is hereby granted, free of charge, to any person obtaining a copy
-* of this software and associated documentation files (the "Software"), to deal
-* in the Software without restriction, including without limitation the rights
-* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-* copies of the Software, and to permit persons to whom the Software is
-* furnished to do so, subject to the following conditions:
-*
-* The above copyright notice and this permission notice shall be included in
-* all copies or substantial portions of the Software.
-*
-* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-* THE SOFTWARE.
+* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+* See https://llvm.org/LICENSE.txt for license information.
+* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/vecintrin.h b/lib/Headers/vecintrin.h
index e627389838df..c71b76a3ee39 100644
--- a/lib/Headers/vecintrin.h
+++ b/lib/Headers/vecintrin.h
@@ -1,22 +1,8 @@
/*===---- vecintrin.h - Vector intrinsics ----------------------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
@@ -531,6 +517,141 @@ vec_bperm_u128(vector unsigned char __a, vector unsigned char __b) {
}
#endif
+/*-- vec_revb ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed short
+vec_revb(vector signed short __vec) {
+ return (vector signed short)
+ __builtin_s390_vlbrh((vector unsigned short)__vec);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_revb(vector unsigned short __vec) {
+ return __builtin_s390_vlbrh(__vec);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_revb(vector signed int __vec) {
+ return (vector signed int)
+ __builtin_s390_vlbrf((vector unsigned int)__vec);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_revb(vector unsigned int __vec) {
+ return __builtin_s390_vlbrf(__vec);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_revb(vector signed long long __vec) {
+ return (vector signed long long)
+ __builtin_s390_vlbrg((vector unsigned long long)__vec);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_revb(vector unsigned long long __vec) {
+ return __builtin_s390_vlbrg(__vec);
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai vector float
+vec_revb(vector float __vec) {
+ return (vector float)
+ __builtin_s390_vlbrf((vector unsigned int)__vec);
+}
+#endif
+
+static inline __ATTRS_o_ai vector double
+vec_revb(vector double __vec) {
+ return (vector double)
+ __builtin_s390_vlbrg((vector unsigned long long)__vec);
+}
+
+/*-- vec_reve ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_reve(vector signed char __vec) {
+ return (vector signed char) { __vec[15], __vec[14], __vec[13], __vec[12],
+ __vec[11], __vec[10], __vec[9], __vec[8],
+ __vec[7], __vec[6], __vec[5], __vec[4],
+ __vec[3], __vec[2], __vec[1], __vec[0] };
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_reve(vector unsigned char __vec) {
+ return (vector unsigned char) { __vec[15], __vec[14], __vec[13], __vec[12],
+ __vec[11], __vec[10], __vec[9], __vec[8],
+ __vec[7], __vec[6], __vec[5], __vec[4],
+ __vec[3], __vec[2], __vec[1], __vec[0] };
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_reve(vector bool char __vec) {
+ return (vector bool char) { __vec[15], __vec[14], __vec[13], __vec[12],
+ __vec[11], __vec[10], __vec[9], __vec[8],
+ __vec[7], __vec[6], __vec[5], __vec[4],
+ __vec[3], __vec[2], __vec[1], __vec[0] };
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_reve(vector signed short __vec) {
+ return (vector signed short) { __vec[7], __vec[6], __vec[5], __vec[4],
+ __vec[3], __vec[2], __vec[1], __vec[0] };
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_reve(vector unsigned short __vec) {
+ return (vector unsigned short) { __vec[7], __vec[6], __vec[5], __vec[4],
+ __vec[3], __vec[2], __vec[1], __vec[0] };
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_reve(vector bool short __vec) {
+ return (vector bool short) { __vec[7], __vec[6], __vec[5], __vec[4],
+ __vec[3], __vec[2], __vec[1], __vec[0] };
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_reve(vector signed int __vec) {
+ return (vector signed int) { __vec[3], __vec[2], __vec[1], __vec[0] };
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_reve(vector unsigned int __vec) {
+ return (vector unsigned int) { __vec[3], __vec[2], __vec[1], __vec[0] };
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_reve(vector bool int __vec) {
+ return (vector bool int) { __vec[3], __vec[2], __vec[1], __vec[0] };
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_reve(vector signed long long __vec) {
+ return (vector signed long long) { __vec[1], __vec[0] };
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_reve(vector unsigned long long __vec) {
+ return (vector unsigned long long) { __vec[1], __vec[0] };
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_reve(vector bool long long __vec) {
+ return (vector bool long long) { __vec[1], __vec[0] };
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai vector float
+vec_reve(vector float __vec) {
+ return (vector float) { __vec[3], __vec[2], __vec[1], __vec[0] };
+}
+#endif
+
+static inline __ATTRS_o_ai vector double
+vec_reve(vector double __vec) {
+ return (vector double) { __vec[1], __vec[0] };
+}
+
/*-- vec_sel ----------------------------------------------------------------*/
static inline __ATTRS_o_ai vector signed char
@@ -6849,6 +6970,56 @@ vec_sldw(vector double __a, vector double __b, int __c)
__builtin_s390_vsldb((vector unsigned char)(X), \
(vector unsigned char)(Y), (Z) * 4))
+/*-- vec_sldb ---------------------------------------------------------------*/
+
+#if __ARCH__ >= 13
+
+extern __ATTRS_o vector signed char
+vec_sldb(vector signed char __a, vector signed char __b, int __c)
+ __constant_range(__c, 0, 7);
+
+extern __ATTRS_o vector unsigned char
+vec_sldb(vector unsigned char __a, vector unsigned char __b, int __c)
+ __constant_range(__c, 0, 7);
+
+extern __ATTRS_o vector signed short
+vec_sldb(vector signed short __a, vector signed short __b, int __c)
+ __constant_range(__c, 0, 7);
+
+extern __ATTRS_o vector unsigned short
+vec_sldb(vector unsigned short __a, vector unsigned short __b, int __c)
+ __constant_range(__c, 0, 7);
+
+extern __ATTRS_o vector signed int
+vec_sldb(vector signed int __a, vector signed int __b, int __c)
+ __constant_range(__c, 0, 7);
+
+extern __ATTRS_o vector unsigned int
+vec_sldb(vector unsigned int __a, vector unsigned int __b, int __c)
+ __constant_range(__c, 0, 7);
+
+extern __ATTRS_o vector signed long long
+vec_sldb(vector signed long long __a, vector signed long long __b, int __c)
+ __constant_range(__c, 0, 7);
+
+extern __ATTRS_o vector unsigned long long
+vec_sldb(vector unsigned long long __a, vector unsigned long long __b, int __c)
+ __constant_range(__c, 0, 7);
+
+extern __ATTRS_o vector float
+vec_sldb(vector float __a, vector float __b, int __c)
+ __constant_range(__c, 0, 7);
+
+extern __ATTRS_o vector double
+vec_sldb(vector double __a, vector double __b, int __c)
+ __constant_range(__c, 0, 7);
+
+#define vec_sldb(X, Y, Z) ((__typeof__((vec_sldb)((X), (Y), (Z)))) \
+ __builtin_s390_vsld((vector unsigned char)(X), \
+ (vector unsigned char)(Y), (Z)))
+
+#endif
+
/*-- vec_sral ---------------------------------------------------------------*/
static inline __ATTRS_o_ai vector signed char
@@ -7579,6 +7750,56 @@ vec_srb(vector double __a, vector unsigned long long __b) {
(vector unsigned char)__a, (vector unsigned char)__b);
}
+/*-- vec_srdb ---------------------------------------------------------------*/
+
+#if __ARCH__ >= 13
+
+extern __ATTRS_o vector signed char
+vec_srdb(vector signed char __a, vector signed char __b, int __c)
+ __constant_range(__c, 0, 7);
+
+extern __ATTRS_o vector unsigned char
+vec_srdb(vector unsigned char __a, vector unsigned char __b, int __c)
+ __constant_range(__c, 0, 7);
+
+extern __ATTRS_o vector signed short
+vec_srdb(vector signed short __a, vector signed short __b, int __c)
+ __constant_range(__c, 0, 7);
+
+extern __ATTRS_o vector unsigned short
+vec_srdb(vector unsigned short __a, vector unsigned short __b, int __c)
+ __constant_range(__c, 0, 7);
+
+extern __ATTRS_o vector signed int
+vec_srdb(vector signed int __a, vector signed int __b, int __c)
+ __constant_range(__c, 0, 7);
+
+extern __ATTRS_o vector unsigned int
+vec_srdb(vector unsigned int __a, vector unsigned int __b, int __c)
+ __constant_range(__c, 0, 7);
+
+extern __ATTRS_o vector signed long long
+vec_srdb(vector signed long long __a, vector signed long long __b, int __c)
+ __constant_range(__c, 0, 7);
+
+extern __ATTRS_o vector unsigned long long
+vec_srdb(vector unsigned long long __a, vector unsigned long long __b, int __c)
+ __constant_range(__c, 0, 7);
+
+extern __ATTRS_o vector float
+vec_srdb(vector float __a, vector float __b, int __c)
+ __constant_range(__c, 0, 7);
+
+extern __ATTRS_o vector double
+vec_srdb(vector double __a, vector double __b, int __c)
+ __constant_range(__c, 0, 7);
+
+#define vec_srdb(X, Y, Z) ((__typeof__((vec_srdb)((X), (Y), (Z)))) \
+ __builtin_s390_vsrd((vector unsigned char)(X), \
+ (vector unsigned char)(Y), (Z)))
+
+#endif
+
/*-- vec_abs ----------------------------------------------------------------*/
static inline __ATTRS_o_ai vector signed char
@@ -8725,6 +8946,22 @@ vec_double(vector unsigned long long __a) {
return __builtin_convertvector(__a, vector double);
}
+/*-- vec_float --------------------------------------------------------------*/
+
+#if __ARCH__ >= 13
+
+static inline __ATTRS_o_ai vector float
+vec_float(vector signed int __a) {
+ return __builtin_convertvector(__a, vector float);
+}
+
+static inline __ATTRS_o_ai vector float
+vec_float(vector unsigned int __a) {
+ return __builtin_convertvector(__a, vector float);
+}
+
+#endif
+
/*-- vec_signed -------------------------------------------------------------*/
static inline __ATTRS_o_ai vector signed long long
@@ -8732,6 +8969,13 @@ vec_signed(vector double __a) {
return __builtin_convertvector(__a, vector signed long long);
}
+#if __ARCH__ >= 13
+static inline __ATTRS_o_ai vector signed int
+vec_signed(vector float __a) {
+ return __builtin_convertvector(__a, vector signed int);
+}
+#endif
+
/*-- vec_unsigned -----------------------------------------------------------*/
static inline __ATTRS_o_ai vector unsigned long long
@@ -8739,6 +8983,13 @@ vec_unsigned(vector double __a) {
return __builtin_convertvector(__a, vector unsigned long long);
}
+#if __ARCH__ >= 13
+static inline __ATTRS_o_ai vector unsigned int
+vec_unsigned(vector float __a) {
+ return __builtin_convertvector(__a, vector unsigned int);
+}
+#endif
+
/*-- vec_roundp -------------------------------------------------------------*/
#if __ARCH__ >= 12
@@ -10456,6 +10707,147 @@ vec_find_any_ne_or_0_idx_cc(vector unsigned int __a, vector unsigned int __b,
return __builtin_s390_vfaezfs(__a, __b, 8, __cc);
}
+/*-- vec_search_string_cc ---------------------------------------------------*/
+
+#if __ARCH__ >= 13
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_search_string_cc(vector signed char __a, vector signed char __b,
+ vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrsb((vector unsigned char)__a,
+ (vector unsigned char)__b, __c, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_search_string_cc(vector bool char __a, vector bool char __b,
+ vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrsb((vector unsigned char)__a,
+ (vector unsigned char)__b, __c, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_search_string_cc(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrsb(__a, __b, __c, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_search_string_cc(vector signed short __a, vector signed short __b,
+ vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrsh((vector unsigned short)__a,
+ (vector unsigned short)__b, __c, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_search_string_cc(vector bool short __a, vector bool short __b,
+ vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrsh((vector unsigned short)__a,
+ (vector unsigned short)__b, __c, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_search_string_cc(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrsh(__a, __b, __c, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_search_string_cc(vector signed int __a, vector signed int __b,
+ vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrsf((vector unsigned int)__a,
+ (vector unsigned int)__b, __c, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_search_string_cc(vector bool int __a, vector bool int __b,
+ vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrsf((vector unsigned int)__a,
+ (vector unsigned int)__b, __c, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_search_string_cc(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrsf(__a, __b, __c, __cc);
+}
+
+#endif
+
+/*-- vec_search_string_until_zero_cc ----------------------------------------*/
+
+#if __ARCH__ >= 13
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_search_string_until_zero_cc(vector signed char __a,
+ vector signed char __b,
+ vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrszb((vector unsigned char)__a,
+ (vector unsigned char)__b, __c, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_search_string_until_zero_cc(vector bool char __a,
+ vector bool char __b,
+ vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrszb((vector unsigned char)__a,
+ (vector unsigned char)__b, __c, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_search_string_until_zero_cc(vector unsigned char __a,
+ vector unsigned char __b,
+ vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrszb(__a, __b, __c, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_search_string_until_zero_cc(vector signed short __a,
+ vector signed short __b,
+ vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrszh((vector unsigned short)__a,
+ (vector unsigned short)__b, __c, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_search_string_until_zero_cc(vector bool short __a,
+ vector bool short __b,
+ vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrszh((vector unsigned short)__a,
+ (vector unsigned short)__b, __c, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_search_string_until_zero_cc(vector unsigned short __a,
+ vector unsigned short __b,
+ vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrszh(__a, __b, __c, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_search_string_until_zero_cc(vector signed int __a,
+ vector signed int __b,
+ vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrszf((vector unsigned int)__a,
+ (vector unsigned int)__b, __c, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_search_string_until_zero_cc(vector bool int __a,
+ vector bool int __b,
+ vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrszf((vector unsigned int)__a,
+ (vector unsigned int)__b, __c, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_search_string_until_zero_cc(vector unsigned int __a,
+ vector unsigned int __b,
+ vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrszf(__a, __b, __c, __cc);
+}
+
+#endif
+
#undef __constant_pow2_range
#undef __constant_range
#undef __constant
diff --git a/lib/Headers/vpclmulqdqintrin.h b/lib/Headers/vpclmulqdqintrin.h
index 86174a457e11..470d83254905 100644
--- a/lib/Headers/vpclmulqdqintrin.h
+++ b/lib/Headers/vpclmulqdqintrin.h
@@ -1,23 +1,9 @@
/*===------------ vpclmulqdqintrin.h - VPCLMULQDQ intrinsics ---------------===
*
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/waitpkgintrin.h b/lib/Headers/waitpkgintrin.h
index e29d6cfa5a51..7ecada4cf723 100644
--- a/lib/Headers/waitpkgintrin.h
+++ b/lib/Headers/waitpkgintrin.h
@@ -1,22 +1,8 @@
/*===----------------------- waitpkgintrin.h - WAITPKG --------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/wbnoinvdintrin.h b/lib/Headers/wbnoinvdintrin.h
index cad83368dbe7..cac0347efc4f 100644
--- a/lib/Headers/wbnoinvdintrin.h
+++ b/lib/Headers/wbnoinvdintrin.h
@@ -1,22 +1,8 @@
/*===-------------- wbnoinvdintrin.h - wbnoinvd intrinsic-------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/wmmintrin.h b/lib/Headers/wmmintrin.h
index 569a8d838dad..f932ca81089c 100644
--- a/lib/Headers/wmmintrin.h
+++ b/lib/Headers/wmmintrin.h
@@ -1,22 +1,8 @@
/*===---- wmmintrin.h - AES intrinsics ------------------------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/x86intrin.h b/lib/Headers/x86intrin.h
index 728c58c3ebbc..a8b36622d410 100644
--- a/lib/Headers/x86intrin.h
+++ b/lib/Headers/x86intrin.h
@@ -1,22 +1,8 @@
/*===---- x86intrin.h - X86 intrinsics -------------------------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/xmmintrin.h b/lib/Headers/xmmintrin.h
index 17af17267c83..75ff37655bda 100644
--- a/lib/Headers/xmmintrin.h
+++ b/lib/Headers/xmmintrin.h
@@ -1,22 +1,8 @@
/*===---- xmmintrin.h - SSE intrinsics -------------------------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
@@ -28,7 +14,9 @@
typedef int __v4si __attribute__((__vector_size__(16)));
typedef float __v4sf __attribute__((__vector_size__(16)));
-typedef float __m128 __attribute__((__vector_size__(16)));
+typedef float __m128 __attribute__((__vector_size__(16), __aligned__(16)));
+
+typedef float __m128_u __attribute__((__vector_size__(16), __aligned__(1)));
/* Unsigned types */
typedef unsigned int __v4su __attribute__((__vector_size__(16)));
@@ -1752,7 +1740,7 @@ static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_loadu_ps(const float *__p)
{
struct __loadu_ps {
- __m128 __v;
+ __m128_u __v;
} __attribute__((__packed__, __may_alias__));
return ((struct __loadu_ps*)__p)->__v;
}
@@ -1931,7 +1919,11 @@ _mm_setzero_ps(void)
static __inline__ void __DEFAULT_FN_ATTRS
_mm_storeh_pi(__m64 *__p, __m128 __a)
{
- __builtin_ia32_storehps((__v2si *)__p, (__v4sf)__a);
+ typedef float __mm_storeh_pi_v2f32 __attribute__((__vector_size__(8)));
+ struct __mm_storeh_pi_struct {
+ __mm_storeh_pi_v2f32 __u;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __mm_storeh_pi_struct*)__p)->__u = __builtin_shufflevector(__a, __a, 2, 3);
}
/// Stores the lower 64 bits of a 128-bit vector of [4 x float] to a
@@ -1948,7 +1940,11 @@ _mm_storeh_pi(__m64 *__p, __m128 __a)
static __inline__ void __DEFAULT_FN_ATTRS
_mm_storel_pi(__m64 *__p, __m128 __a)
{
- __builtin_ia32_storelps((__v2si *)__p, (__v4sf)__a);
+ typedef float __mm_storeh_pi_v2f32 __attribute__((__vector_size__(8)));
+ struct __mm_storeh_pi_struct {
+ __mm_storeh_pi_v2f32 __u;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __mm_storeh_pi_struct*)__p)->__u = __builtin_shufflevector(__a, __a, 0, 1);
}
/// Stores the lower 32 bits of a 128-bit vector of [4 x float] to a
@@ -1987,7 +1983,7 @@ static __inline__ void __DEFAULT_FN_ATTRS
_mm_storeu_ps(float *__p, __m128 __a)
{
struct __storeu_ps {
- __m128 __v;
+ __m128_u __v;
} __attribute__((__packed__, __may_alias__));
((struct __storeu_ps*)__p)->__v = __a;
}
diff --git a/lib/Headers/xopintrin.h b/lib/Headers/xopintrin.h
index 9d540a2abdbe..5cedde41b625 100644
--- a/lib/Headers/xopintrin.h
+++ b/lib/Headers/xopintrin.h
@@ -1,22 +1,8 @@
/*===---- xopintrin.h - XOP intrinsics -------------------------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/xsavecintrin.h b/lib/Headers/xsavecintrin.h
index 25577a95fc9a..5524947fa98e 100644
--- a/lib/Headers/xsavecintrin.h
+++ b/lib/Headers/xsavecintrin.h
@@ -1,22 +1,8 @@
/*===---- xsavecintrin.h - XSAVEC intrinsic --------------------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/xsaveintrin.h b/lib/Headers/xsaveintrin.h
index 16f3a78d3f5b..9429db6dde56 100644
--- a/lib/Headers/xsaveintrin.h
+++ b/lib/Headers/xsaveintrin.h
@@ -1,22 +1,8 @@
/*===---- xsaveintrin.h - XSAVE intrinsic ----------------------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
@@ -28,6 +14,10 @@
#ifndef __XSAVEINTRIN_H
#define __XSAVEINTRIN_H
+#ifdef _MSC_VER
+#define _XCR_XFEATURE_ENABLED_MASK 0
+#endif
+
/* Define the default attributes for the functions in this file. */
#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("xsave")))
@@ -41,6 +31,20 @@ _xrstor(void *__p, unsigned long long __m) {
__builtin_ia32_xrstor(__p, __m);
}
+#ifndef _MSC_VER
+#define _xgetbv(A) __builtin_ia32_xgetbv((long long)(A))
+#define _xsetbv(A, B) __builtin_ia32_xsetbv((unsigned int)(A), (unsigned long long)(B))
+#else
+#ifdef __cplusplus
+extern "C" {
+#endif
+unsigned __int64 __cdecl _xgetbv(unsigned int);
+void __cdecl _xsetbv(unsigned int, unsigned __int64);
+#ifdef __cplusplus
+}
+#endif
+#endif /* _MSC_VER */
+
#ifdef __x86_64__
static __inline__ void __DEFAULT_FN_ATTRS
_xsave64(void *__p, unsigned long long __m) {
@@ -51,6 +55,7 @@ static __inline__ void __DEFAULT_FN_ATTRS
_xrstor64(void *__p, unsigned long long __m) {
__builtin_ia32_xrstor64(__p, __m);
}
+
#endif
#undef __DEFAULT_FN_ATTRS
diff --git a/lib/Headers/xsaveoptintrin.h b/lib/Headers/xsaveoptintrin.h
index 792cf92d46e8..89a4c44db5d9 100644
--- a/lib/Headers/xsaveoptintrin.h
+++ b/lib/Headers/xsaveoptintrin.h
@@ -1,22 +1,8 @@
/*===---- xsaveoptintrin.h - XSAVEOPT intrinsic ----------------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/xsavesintrin.h b/lib/Headers/xsavesintrin.h
index fe2bc4b93b22..3f99219a2972 100644
--- a/lib/Headers/xsavesintrin.h
+++ b/lib/Headers/xsavesintrin.h
@@ -1,22 +1,8 @@
/*===---- xsavesintrin.h - XSAVES intrinsic --------------------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Headers/xtestintrin.h b/lib/Headers/xtestintrin.h
index 924424386b94..7d19e3733d7e 100644
--- a/lib/Headers/xtestintrin.h
+++ b/lib/Headers/xtestintrin.h
@@ -1,22 +1,8 @@
/*===---- xtestintrin.h - XTEST intrinsic ----------------------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
diff --git a/lib/Index/CodegenNameGenerator.cpp b/lib/Index/CodegenNameGenerator.cpp
index bf52e2108baa..56d3d0603486 100644
--- a/lib/Index/CodegenNameGenerator.cpp
+++ b/lib/Index/CodegenNameGenerator.cpp
@@ -1,9 +1,8 @@
//===- CodegenNameGenerator.cpp - Codegen name generation -----------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -13,203 +12,12 @@
#include "clang/Index/CodegenNameGenerator.h"
#include "clang/AST/ASTContext.h"
-#include "clang/AST/DeclCXX.h"
-#include "clang/AST/DeclObjC.h"
-#include "clang/AST/Mangle.h"
-#include "clang/AST/VTableBuilder.h"
-#include "clang/Basic/TargetInfo.h"
-#include "llvm/IR/DataLayout.h"
-#include "llvm/IR/Mangler.h"
-#include "llvm/Support/raw_ostream.h"
using namespace clang;
using namespace clang::index;
-struct CodegenNameGenerator::Implementation {
- std::unique_ptr<MangleContext> MC;
- llvm::DataLayout DL;
-
- Implementation(ASTContext &Ctx)
- : MC(Ctx.createMangleContext()),
- DL(Ctx.getTargetInfo().getDataLayout()) {}
-
- bool writeName(const Decl *D, raw_ostream &OS) {
- // First apply frontend mangling.
- SmallString<128> FrontendBuf;
- llvm::raw_svector_ostream FrontendBufOS(FrontendBuf);
- if (auto *FD = dyn_cast<FunctionDecl>(D)) {
- if (FD->isDependentContext())
- return true;
- if (writeFuncOrVarName(FD, FrontendBufOS))
- return true;
- } else if (auto *VD = dyn_cast<VarDecl>(D)) {
- if (writeFuncOrVarName(VD, FrontendBufOS))
- return true;
- } else if (auto *MD = dyn_cast<ObjCMethodDecl>(D)) {
- MC->mangleObjCMethodNameWithoutSize(MD, OS);
- return false;
- } else if (auto *ID = dyn_cast<ObjCInterfaceDecl>(D)) {
- writeObjCClassName(ID, FrontendBufOS);
- } else {
- return true;
- }
-
- // Now apply backend mangling.
- llvm::Mangler::getNameWithPrefix(OS, FrontendBufOS.str(), DL);
- return false;
- }
-
- std::string getName(const Decl *D) {
- std::string Name;
- {
- llvm::raw_string_ostream OS(Name);
- writeName(D, OS);
- }
- return Name;
- }
-
- enum ObjCKind {
- ObjCClass,
- ObjCMetaclass,
- };
-
- std::vector<std::string> getAllManglings(const ObjCContainerDecl *OCD) {
- StringRef ClassName;
- if (const auto *OID = dyn_cast<ObjCInterfaceDecl>(OCD))
- ClassName = OID->getObjCRuntimeNameAsString();
- else if (const auto *OID = dyn_cast<ObjCImplementationDecl>(OCD))
- ClassName = OID->getObjCRuntimeNameAsString();
-
- if (ClassName.empty())
- return {};
-
- auto Mangle = [&](ObjCKind Kind, StringRef ClassName) -> std::string {
- SmallString<40> Mangled;
- auto Prefix = getClassSymbolPrefix(Kind, OCD->getASTContext());
- llvm::Mangler::getNameWithPrefix(Mangled, Prefix + ClassName, DL);
- return Mangled.str();
- };
-
- return {
- Mangle(ObjCClass, ClassName),
- Mangle(ObjCMetaclass, ClassName),
- };
- }
-
- std::vector<std::string> getAllManglings(const Decl *D) {
- if (const auto *OCD = dyn_cast<ObjCContainerDecl>(D))
- return getAllManglings(OCD);
-
- if (!(isa<CXXRecordDecl>(D) || isa<CXXMethodDecl>(D)))
- return {};
-
- const NamedDecl *ND = cast<NamedDecl>(D);
-
- ASTContext &Ctx = ND->getASTContext();
- std::unique_ptr<MangleContext> M(Ctx.createMangleContext());
-
- std::vector<std::string> Manglings;
-
- auto hasDefaultCXXMethodCC = [](ASTContext &C, const CXXMethodDecl *MD) {
- auto DefaultCC = C.getDefaultCallingConvention(/*IsVariadic=*/false,
- /*IsCSSMethod=*/true);
- auto CC = MD->getType()->getAs<FunctionProtoType>()->getCallConv();
- return CC == DefaultCC;
- };
-
- if (const auto *CD = dyn_cast_or_null<CXXConstructorDecl>(ND)) {
- Manglings.emplace_back(getMangledStructor(CD, Ctor_Base));
-
- if (Ctx.getTargetInfo().getCXXABI().isItaniumFamily())
- if (!CD->getParent()->isAbstract())
- Manglings.emplace_back(getMangledStructor(CD, Ctor_Complete));
-
- if (Ctx.getTargetInfo().getCXXABI().isMicrosoft())
- if (CD->hasAttr<DLLExportAttr>() && CD->isDefaultConstructor())
- if (!(hasDefaultCXXMethodCC(Ctx, CD) && CD->getNumParams() == 0))
- Manglings.emplace_back(getMangledStructor(CD, Ctor_DefaultClosure));
- } else if (const auto *DD = dyn_cast_or_null<CXXDestructorDecl>(ND)) {
- Manglings.emplace_back(getMangledStructor(DD, Dtor_Base));
- if (Ctx.getTargetInfo().getCXXABI().isItaniumFamily()) {
- Manglings.emplace_back(getMangledStructor(DD, Dtor_Complete));
- if (DD->isVirtual())
- Manglings.emplace_back(getMangledStructor(DD, Dtor_Deleting));
- }
- } else if (const auto *MD = dyn_cast_or_null<CXXMethodDecl>(ND)) {
- Manglings.emplace_back(getName(ND));
- if (MD->isVirtual())
- if (const auto *TIV = Ctx.getVTableContext()->getThunkInfo(MD))
- for (const auto &T : *TIV)
- Manglings.emplace_back(getMangledThunk(MD, T));
- }
-
- return Manglings;
- }
-
-private:
- bool writeFuncOrVarName(const NamedDecl *D, raw_ostream &OS) {
- if (MC->shouldMangleDeclName(D)) {
- if (const auto *CtorD = dyn_cast<CXXConstructorDecl>(D))
- MC->mangleCXXCtor(CtorD, Ctor_Complete, OS);
- else if (const auto *DtorD = dyn_cast<CXXDestructorDecl>(D))
- MC->mangleCXXDtor(DtorD, Dtor_Complete, OS);
- else
- MC->mangleName(D, OS);
- return false;
- } else {
- IdentifierInfo *II = D->getIdentifier();
- if (!II)
- return true;
- OS << II->getName();
- return false;
- }
- }
-
- void writeObjCClassName(const ObjCInterfaceDecl *D, raw_ostream &OS) {
- OS << getClassSymbolPrefix(ObjCClass, D->getASTContext());
- OS << D->getObjCRuntimeNameAsString();
- }
-
- static StringRef getClassSymbolPrefix(ObjCKind Kind, const ASTContext &Context) {
- if (Context.getLangOpts().ObjCRuntime.isGNUFamily())
- return Kind == ObjCMetaclass ? "_OBJC_METACLASS_" : "_OBJC_CLASS_";
- return Kind == ObjCMetaclass ? "OBJC_METACLASS_$_" : "OBJC_CLASS_$_";
- }
-
- std::string getMangledStructor(const NamedDecl *ND, unsigned StructorType) {
- std::string FrontendBuf;
- llvm::raw_string_ostream FOS(FrontendBuf);
-
- if (const auto *CD = dyn_cast_or_null<CXXConstructorDecl>(ND))
- MC->mangleCXXCtor(CD, static_cast<CXXCtorType>(StructorType), FOS);
- else if (const auto *DD = dyn_cast_or_null<CXXDestructorDecl>(ND))
- MC->mangleCXXDtor(DD, static_cast<CXXDtorType>(StructorType), FOS);
-
- std::string BackendBuf;
- llvm::raw_string_ostream BOS(BackendBuf);
-
- llvm::Mangler::getNameWithPrefix(BOS, FOS.str(), DL);
-
- return BOS.str();
- }
-
- std::string getMangledThunk(const CXXMethodDecl *MD, const ThunkInfo &T) {
- std::string FrontendBuf;
- llvm::raw_string_ostream FOS(FrontendBuf);
-
- MC->mangleThunk(MD, T, FOS);
-
- std::string BackendBuf;
- llvm::raw_string_ostream BOS(BackendBuf);
-
- llvm::Mangler::getNameWithPrefix(BOS, FOS.str(), DL);
-
- return BOS.str();
- }
-};
-
CodegenNameGenerator::CodegenNameGenerator(ASTContext &Ctx)
- : Impl(new Implementation(Ctx)) {
+ : Impl(new ASTNameGenerator(Ctx)) {
}
CodegenNameGenerator::~CodegenNameGenerator() {
diff --git a/lib/Index/CommentToXML.cpp b/lib/Index/CommentToXML.cpp
index a2659119a2ff..55923d679fea 100644
--- a/lib/Index/CommentToXML.cpp
+++ b/lib/Index/CommentToXML.cpp
@@ -1,9 +1,8 @@
//===--- CommentToXML.cpp - Convert comments to XML representation --------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -189,11 +188,8 @@ FullCommentParts::FullCommentParts(const FullComment *C,
// Sort params in order they are declared in the function prototype.
// Unresolved parameters are put at the end of the list in the same order
// they were seen in the comment.
- std::stable_sort(Params.begin(), Params.end(),
- ParamCommandCommentCompareIndex());
-
- std::stable_sort(TParams.begin(), TParams.end(),
- TParamCommandCommentComparePosition());
+ llvm::stable_sort(Params, ParamCommandCommentCompareIndex());
+ llvm::stable_sort(TParams, TParamCommandCommentComparePosition());
}
void printHTMLStartTagComment(const HTMLStartTagComment *C,
diff --git a/lib/Index/FileIndexRecord.cpp b/lib/Index/FileIndexRecord.cpp
new file mode 100644
index 000000000000..c9dcb0f5377d
--- /dev/null
+++ b/lib/Index/FileIndexRecord.cpp
@@ -0,0 +1,60 @@
+//===--- FileIndexRecord.cpp - Index data per file --------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "FileIndexRecord.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclTemplate.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/Path.h"
+
+using namespace clang;
+using namespace clang::index;
+
+void FileIndexRecord::addDeclOccurence(SymbolRoleSet Roles, unsigned Offset,
+ const Decl *D,
+ ArrayRef<SymbolRelation> Relations) {
+ assert(D->isCanonicalDecl() &&
+ "Occurrences should be associated with their canonical decl");
+
+ auto IsNextOccurence = [&]() -> bool {
+ if (Decls.empty())
+ return true;
+ auto &Last = Decls.back();
+ return Last.Offset < Offset;
+ };
+
+ if (IsNextOccurence()) {
+ Decls.emplace_back(Roles, Offset, D, Relations);
+ return;
+ }
+
+ DeclOccurrence NewInfo(Roles, Offset, D, Relations);
+ // We keep Decls in order as we need to access them in this order in all cases.
+ auto It = llvm::upper_bound(Decls, NewInfo);
+ Decls.insert(It, std::move(NewInfo));
+}
+
+void FileIndexRecord::print(llvm::raw_ostream &OS) const {
+ OS << "DECLS BEGIN ---\n";
+ for (auto &DclInfo : Decls) {
+ const Decl *D = DclInfo.Dcl;
+ SourceManager &SM = D->getASTContext().getSourceManager();
+ SourceLocation Loc = SM.getFileLoc(D->getLocation());
+ PresumedLoc PLoc = SM.getPresumedLoc(Loc);
+ OS << llvm::sys::path::filename(PLoc.getFilename()) << ':' << PLoc.getLine()
+ << ':' << PLoc.getColumn();
+
+ if (auto ND = dyn_cast<NamedDecl>(D)) {
+ OS << ' ' << ND->getNameAsString();
+ }
+
+ OS << '\n';
+ }
+ OS << "DECLS END ---\n";
+}
diff --git a/lib/Index/FileIndexRecord.h b/lib/Index/FileIndexRecord.h
new file mode 100644
index 000000000000..37bf96a71964
--- /dev/null
+++ b/lib/Index/FileIndexRecord.h
@@ -0,0 +1,57 @@
+//===--- FileIndexRecord.h - Index data per file ----------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_INDEX_FILEINDEXRECORD_H
+#define LLVM_CLANG_LIB_INDEX_FILEINDEXRECORD_H
+
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Index/DeclOccurrence.h"
+#include "clang/Index/IndexSymbol.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SmallVector.h"
+#include <vector>
+
+namespace clang {
+class IdentifierInfo;
+
+namespace index {
+
+/// Stores the declaration occurrences seen in a particular source or header
+/// file of a translation unit
+class FileIndexRecord {
+private:
+ FileID FID;
+ bool IsSystem;
+ std::vector<DeclOccurrence> Decls;
+
+public:
+ FileIndexRecord(FileID FID, bool IsSystem) : FID(FID), IsSystem(IsSystem) {}
+
+ ArrayRef<DeclOccurrence> getDeclOccurrencesSortedByOffset() const {
+ return Decls;
+ }
+
+ FileID getFileID() const { return FID; }
+ bool isSystem() const { return IsSystem; }
+
+ /// Adds an occurrence of the canonical declaration \c D at the supplied
+ /// \c Offset
+ ///
+ /// \param Roles the roles the occurrence fulfills in this position.
+ /// \param Offset the offset in the file of this occurrence.
+ /// \param D the canonical declaration this is an occurrence of.
+ /// \param Relations the set of symbols related to this occurrence.
+ void addDeclOccurence(SymbolRoleSet Roles, unsigned Offset, const Decl *D,
+ ArrayRef<SymbolRelation> Relations);
+ void print(llvm::raw_ostream &OS) const;
+};
+
+} // end namespace index
+} // end namespace clang
+
+#endif // LLVM_CLANG_LIB_INDEX_FILEINDEXRECORD_H
diff --git a/lib/Index/IndexBody.cpp b/lib/Index/IndexBody.cpp
index 54a6df2496a9..07a94f30c883 100644
--- a/lib/Index/IndexBody.cpp
+++ b/lib/Index/IndexBody.cpp
@@ -1,9 +1,8 @@
//===- IndexBody.cpp - Indexing statements --------------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Index/IndexDecl.cpp b/lib/Index/IndexDecl.cpp
index a7725f9dd97f..5bbbb0d32bf4 100644
--- a/lib/Index/IndexDecl.cpp
+++ b/lib/Index/IndexDecl.cpp
@@ -1,9 +1,8 @@
//===- IndexDecl.cpp - Indexing declarations ------------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -89,12 +88,11 @@ public:
/*isBase=*/false, isIBType);
IndexCtx.indexNestedNameSpecifierLoc(D->getQualifierLoc(), Parent);
if (IndexCtx.shouldIndexFunctionLocalSymbols()) {
- // Only index parameters in definitions, parameters in declarations are
- // not useful.
if (const ParmVarDecl *Parm = dyn_cast<ParmVarDecl>(D)) {
auto *DC = Parm->getDeclContext();
if (auto *FD = dyn_cast<FunctionDecl>(DC)) {
- if (FD->isThisDeclarationADefinition())
+ if (IndexCtx.shouldIndexParametersInDeclarations() ||
+ FD->isThisDeclarationADefinition())
IndexCtx.handleDecl(Parm);
} else if (auto *MD = dyn_cast<ObjCMethodDecl>(DC)) {
if (MD->isThisDeclarationADefinition())
@@ -103,7 +101,8 @@ public:
IndexCtx.handleDecl(Parm);
}
} else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
- if (FD->isThisDeclarationADefinition()) {
+ if (IndexCtx.shouldIndexParametersInDeclarations() ||
+ FD->isThisDeclarationADefinition()) {
for (auto PI : FD->parameters()) {
IndexCtx.handleDecl(PI);
}
@@ -248,7 +247,8 @@ public:
if (const CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(D)) {
IndexCtx.handleReference(Ctor->getParent(), Ctor->getLocation(),
- Ctor->getParent(), Ctor->getDeclContext());
+ Ctor->getParent(), Ctor->getDeclContext(),
+ (unsigned)SymbolRole::NameReference);
// Constructor initializers.
for (const auto *Init : Ctor->inits()) {
@@ -264,7 +264,8 @@ public:
if (auto TypeNameInfo = Dtor->getNameInfo().getNamedTypeInfo()) {
IndexCtx.handleReference(Dtor->getParent(),
TypeNameInfo->getTypeLoc().getBeginLoc(),
- Dtor->getParent(), Dtor->getDeclContext());
+ Dtor->getParent(), Dtor->getDeclContext(),
+ (unsigned)SymbolRole::NameReference);
}
} else if (const auto *Guide = dyn_cast<CXXDeductionGuideDecl>(D)) {
IndexCtx.handleReference(Guide->getDeducedTemplate()->getTemplatedDecl(),
@@ -325,6 +326,7 @@ public:
}
bool VisitMSPropertyDecl(const MSPropertyDecl *D) {
+ TRY_DECL(D, IndexCtx.handleDecl(D));
handleDeclarator(D);
return true;
}
@@ -414,7 +416,7 @@ public:
if (D->isThisDeclarationADefinition()) {
TRY_DECL(D, IndexCtx.handleDecl(D));
TRY_TO(handleReferencedProtocols(D->getReferencedProtocols(), D,
- /*superLoc=*/SourceLocation()));
+ /*SuperLoc=*/SourceLocation()));
TRY_TO(IndexCtx.indexDeclContext(D));
} else {
return IndexCtx.handleReference(D, D->getLocation(), nullptr,
@@ -464,7 +466,7 @@ public:
CategoryLoc = D->getLocation();
TRY_TO(IndexCtx.handleDecl(D, CategoryLoc));
TRY_TO(handleReferencedProtocols(D->getReferencedProtocols(), D,
- /*superLoc=*/SourceLocation()));
+ /*SuperLoc=*/SourceLocation()));
TRY_TO(IndexCtx.indexDeclContext(D));
return true;
}
@@ -581,9 +583,10 @@ public:
}
bool VisitUsingDecl(const UsingDecl *D) {
+ IndexCtx.handleDecl(D);
+
const DeclContext *DC = D->getDeclContext()->getRedeclContext();
const NamedDecl *Parent = dyn_cast<NamedDecl>(DC);
-
IndexCtx.indexNestedNameSpecifierLoc(D->getQualifierLoc(), Parent,
D->getLexicalDeclContext());
for (const auto *I : D->shadows())
@@ -649,10 +652,10 @@ public:
}
static bool shouldIndexTemplateParameterDefaultValue(const NamedDecl *D) {
- if (!D)
- return false;
// We want to index the template parameters only once when indexing the
// canonical declaration.
+ if (!D)
+ return false;
if (const auto *FD = dyn_cast<FunctionDecl>(D))
return FD->getCanonicalDecl() == FD;
else if (const auto *TD = dyn_cast<TagDecl>(D))
@@ -673,6 +676,8 @@ public:
shouldIndexTemplateParameterDefaultValue(Parent)) {
const TemplateParameterList *Params = D->getTemplateParameters();
for (const NamedDecl *TP : *Params) {
+ if (IndexCtx.shouldIndexTemplateParameters())
+ IndexCtx.handleDecl(TP);
if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(TP)) {
if (TTP->hasDefaultArgument())
IndexCtx.indexTypeSourceInfo(TTP->getDefaultArgumentInfo(), Parent);
diff --git a/lib/Index/IndexSymbol.cpp b/lib/Index/IndexSymbol.cpp
index 1cdc0984f780..064f3ae32f9e 100644
--- a/lib/Index/IndexSymbol.cpp
+++ b/lib/Index/IndexSymbol.cpp
@@ -1,9 +1,8 @@
//===--- IndexSymbol.cpp - Types and functions for indexing symbols -------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -56,9 +55,6 @@ bool index::isFunctionLocalSymbol(const Decl *D) {
if (isa<ParmVarDecl>(D))
return true;
- if (isa<TemplateTemplateParmDecl>(D))
- return true;
-
if (isa<ObjCTypeParamDecl>(D))
return true;
@@ -100,6 +96,13 @@ SymbolInfo index::getSymbolInfo(const Decl *D) {
Info.Properties |= (SymbolPropertySet)SymbolProperty::ProtocolInterface;
}
+ if (auto *VT = dyn_cast<VarTemplateDecl>(D)) {
+ Info.Properties |= (SymbolPropertySet)SymbolProperty::Generic;
+ Info.Lang = SymbolLanguage::CXX;
+ // All other fields are filled from the templated decl.
+ D = VT->getTemplatedDecl();
+ }
+
if (const TagDecl *TD = dyn_cast<TagDecl>(D)) {
switch (TD->getTagKind()) {
case TTK_Struct:
@@ -172,6 +175,7 @@ SymbolInfo index::getSymbolInfo(const Decl *D) {
Info.Kind = SymbolKind::Function;
break;
case Decl::Field:
+ case Decl::IndirectField:
Info.Kind = SymbolKind::Field;
if (const CXXRecordDecl *
CXXRec = dyn_cast<CXXRecordDecl>(D->getDeclContext())) {
@@ -320,10 +324,39 @@ SymbolInfo index::getSymbolInfo(const Decl *D) {
Info.Lang = SymbolLanguage::CXX;
Info.Properties |= (SymbolPropertySet)SymbolProperty::Generic;
break;
+ case Decl::Using:
+ Info.Kind = SymbolKind::Using;
+ Info.Lang = SymbolLanguage::CXX;
+ break;
case Decl::Binding:
Info.Kind = SymbolKind::Variable;
Info.Lang = SymbolLanguage::CXX;
break;
+ case Decl::MSProperty:
+ Info.Kind = SymbolKind::InstanceProperty;
+ if (const CXXRecordDecl *CXXRec =
+ dyn_cast<CXXRecordDecl>(D->getDeclContext())) {
+ if (!CXXRec->isCLike())
+ Info.Lang = SymbolLanguage::CXX;
+ }
+ break;
+ case Decl::ClassTemplatePartialSpecialization:
+ case Decl::ClassScopeFunctionSpecialization:
+ case Decl::ClassTemplateSpecialization:
+ case Decl::CXXRecord:
+ case Decl::Enum:
+ case Decl::Record:
+ llvm_unreachable("records handled before");
+ break;
+ case Decl::VarTemplateSpecialization:
+ case Decl::VarTemplatePartialSpecialization:
+ case Decl::ImplicitParam:
+ case Decl::ParmVar:
+ case Decl::Var:
+ case Decl::VarTemplate:
+ llvm_unreachable("variables handled before");
+ break;
+ // Other decls get the 'unknown' kind.
default:
break;
}
@@ -388,6 +421,7 @@ bool index::applyForEachSymbolRoleInterruptible(SymbolRoleSet Roles,
APPLY_FOR_ROLE(RelationContainedBy);
APPLY_FOR_ROLE(RelationIBTypeOf);
APPLY_FOR_ROLE(RelationSpecializationOf);
+ APPLY_FOR_ROLE(NameReference);
#undef APPLY_FOR_ROLE
@@ -430,6 +464,7 @@ void index::printSymbolRoles(SymbolRoleSet Roles, raw_ostream &OS) {
case SymbolRole::RelationContainedBy: OS << "RelCont"; break;
case SymbolRole::RelationIBTypeOf: OS << "RelIBType"; break;
case SymbolRole::RelationSpecializationOf: OS << "RelSpecialization"; break;
+ case SymbolRole::NameReference: OS << "NameReference"; break;
}
});
}
diff --git a/lib/Index/IndexTypeSourceInfo.cpp b/lib/Index/IndexTypeSourceInfo.cpp
index 85afc6345053..959d5f1197fe 100644
--- a/lib/Index/IndexTypeSourceInfo.cpp
+++ b/lib/Index/IndexTypeSourceInfo.cpp
@@ -1,9 +1,8 @@
//===- IndexTypeSourceInfo.cpp - Indexing types ---------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -46,6 +45,13 @@ public:
return false; \
} while (0)
+ bool VisitTemplateTypeParmTypeLoc(TemplateTypeParmTypeLoc TTPL) {
+ SourceLocation Loc = TTPL.getNameLoc();
+ TemplateTypeParmDecl *TTPD = TTPL.getDecl();
+ return IndexCtx.handleReference(TTPD, Loc, Parent, ParentDC,
+ SymbolRoleSet());
+ }
+
bool VisitTypedefTypeLoc(TypedefTypeLoc TL) {
SourceLocation Loc = TL.getNameLoc();
TypedefNameDecl *ND = TL.getTypedefNameDecl();
@@ -127,29 +133,41 @@ public:
return true;
}
- template<typename TypeLocType>
- bool HandleTemplateSpecializationTypeLoc(TypeLocType TL) {
- if (const auto *T = TL.getTypePtr()) {
- if (IndexCtx.shouldIndexImplicitInstantiation()) {
- if (CXXRecordDecl *RD = T->getAsCXXRecordDecl()) {
- IndexCtx.handleReference(RD, TL.getTemplateNameLoc(),
- Parent, ParentDC, SymbolRoleSet(), Relations);
- return true;
- }
- }
- if (const TemplateDecl *D = T->getTemplateName().getAsTemplateDecl())
- IndexCtx.handleReference(D, TL.getTemplateNameLoc(), Parent, ParentDC,
- SymbolRoleSet(), Relations);
+ void HandleTemplateSpecializationTypeLoc(TemplateName TemplName,
+ SourceLocation TemplNameLoc,
+ CXXRecordDecl *ResolvedClass,
+ bool IsTypeAlias) {
+ // In presence of type aliases, the resolved class was never written in
+ // the code so don't report it.
+ if (!IsTypeAlias && ResolvedClass &&
+ (!ResolvedClass->isImplicit() ||
+ IndexCtx.shouldIndexImplicitInstantiation())) {
+ IndexCtx.handleReference(ResolvedClass, TemplNameLoc, Parent, ParentDC,
+ SymbolRoleSet(), Relations);
+ } else if (const TemplateDecl *D = TemplName.getAsTemplateDecl()) {
+ IndexCtx.handleReference(D, TemplNameLoc, Parent, ParentDC,
+ SymbolRoleSet(), Relations);
}
- return true;
}
bool VisitTemplateSpecializationTypeLoc(TemplateSpecializationTypeLoc TL) {
- return HandleTemplateSpecializationTypeLoc(TL);
+ auto *T = TL.getTypePtr();
+ if (!T)
+ return true;
+ HandleTemplateSpecializationTypeLoc(
+ T->getTemplateName(), TL.getTemplateNameLoc(), T->getAsCXXRecordDecl(),
+ T->isTypeAlias());
+ return true;
}
bool VisitDeducedTemplateSpecializationTypeLoc(DeducedTemplateSpecializationTypeLoc TL) {
- return HandleTemplateSpecializationTypeLoc(TL);
+ auto *T = TL.getTypePtr();
+ if (!T)
+ return true;
+ HandleTemplateSpecializationTypeLoc(
+ T->getTemplateName(), TL.getTemplateNameLoc(), T->getAsCXXRecordDecl(),
+ /*IsTypeAlias=*/false);
+ return true;
}
bool VisitDependentNameTypeLoc(DependentNameTypeLoc TL) {
diff --git a/lib/Index/IndexingAction.cpp b/lib/Index/IndexingAction.cpp
index 5cdec4b4528e..5a805c4abcd6 100644
--- a/lib/Index/IndexingAction.cpp
+++ b/lib/Index/IndexingAction.cpp
@@ -1,9 +1,8 @@
//===- IndexingAction.cpp - Frontend index action -------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Index/IndexingContext.cpp b/lib/Index/IndexingContext.cpp
index bba6c8390b56..e29856007149 100644
--- a/lib/Index/IndexingContext.cpp
+++ b/lib/Index/IndexingContext.cpp
@@ -1,9 +1,8 @@
//===- IndexingContext.cpp - Indexing context data ------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -41,6 +40,14 @@ bool IndexingContext::shouldIndexImplicitInstantiation() const {
return IndexOpts.IndexImplicitInstantiation;
}
+bool IndexingContext::shouldIndexParametersInDeclarations() const {
+ return IndexOpts.IndexParametersInDeclarations;
+}
+
+bool IndexingContext::shouldIndexTemplateParameters() const {
+ return IndexOpts.IndexTemplateParameters;
+}
+
bool IndexingContext::handleDecl(const Decl *D,
SymbolRoleSet Roles,
ArrayRef<SymbolRelation> Relations) {
@@ -73,8 +80,11 @@ bool IndexingContext::handleReference(const NamedDecl *D, SourceLocation Loc,
if (!shouldIndexFunctionLocalSymbols() && isFunctionLocalSymbol(D))
return true;
- if (isa<NonTypeTemplateParmDecl>(D) || isa<TemplateTypeParmDecl>(D))
+ if (!shouldIndexTemplateParameters() &&
+ (isa<NonTypeTemplateParmDecl>(D) || isa<TemplateTypeParmDecl>(D) ||
+ isa<TemplateTemplateParmDecl>(D))) {
return true;
+ }
return handleDeclOccurrence(D, Loc, /*IsRef=*/true, Parent, Roles, Relations,
RefE, RefD, DC);
@@ -322,6 +332,7 @@ static bool shouldReportOccurrenceForSystemDeclOnlyMode(
case SymbolRole::RelationCalledBy:
case SymbolRole::RelationContainedBy:
case SymbolRole::RelationSpecializationOf:
+ case SymbolRole::NameReference:
return true;
}
llvm_unreachable("Unsupported SymbolRole value!");
@@ -400,10 +411,9 @@ bool IndexingContext::handleDeclOccurrence(const Decl *D, SourceLocation Loc,
FinalRelations.reserve(Relations.size()+1);
auto addRelation = [&](SymbolRelation Rel) {
- auto It = std::find_if(FinalRelations.begin(), FinalRelations.end(),
- [&](SymbolRelation Elem)->bool {
- return Elem.RelatedSymbol == Rel.RelatedSymbol;
- });
+ auto It = llvm::find_if(FinalRelations, [&](SymbolRelation Elem) -> bool {
+ return Elem.RelatedSymbol == Rel.RelatedSymbol;
+ });
if (It != FinalRelations.end()) {
It->Roles |= Rel.Roles;
} else {
diff --git a/lib/Index/IndexingContext.h b/lib/Index/IndexingContext.h
index 04960086d092..3136878c080c 100644
--- a/lib/Index/IndexingContext.h
+++ b/lib/Index/IndexingContext.h
@@ -1,9 +1,8 @@
//===- IndexingContext.h - Indexing context data ----------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -62,6 +61,10 @@ public:
bool shouldIndexImplicitInstantiation() const;
+ bool shouldIndexParametersInDeclarations() const;
+
+ bool shouldIndexTemplateParameters() const;
+
static bool isTemplateImplicitInstantiation(const Decl *D);
bool handleDecl(const Decl *D, SymbolRoleSet Roles = SymbolRoleSet(),
diff --git a/lib/Index/SimpleFormatContext.h b/lib/Index/SimpleFormatContext.h
index 24adcac60201..17793154a3ae 100644
--- a/lib/Index/SimpleFormatContext.h
+++ b/lib/Index/SimpleFormatContext.h
@@ -1,9 +1,8 @@
//===--- SimpleFormatContext.h ----------------------------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Index/USRGeneration.cpp b/lib/Index/USRGeneration.cpp
index 84ca753bf840..228651de928e 100644
--- a/lib/Index/USRGeneration.cpp
+++ b/lib/Index/USRGeneration.cpp
@@ -1,9 +1,8 @@
//===- USRGeneration.cpp - Routines for USR generation --------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -112,7 +111,12 @@ public:
}
void VisitUsingDecl(const UsingDecl *D) {
- IgnoreResults = true;
+ VisitDeclContext(D->getDeclContext());
+ Out << "@UD@";
+
+ bool EmittedDeclName = !EmitDeclName(D);
+ assert(EmittedDeclName && "EmitDeclName can not fail for UsingDecls");
+ (void)EmittedDeclName;
}
bool ShouldGenerateLocation(const NamedDecl *D);
@@ -271,7 +275,7 @@ void USRGenerator::VisitFunctionDecl(const FunctionDecl *D) {
if (MD->isStatic())
Out << 'S';
// FIXME: OpenCL: Need to consider address spaces
- if (unsigned quals = MD->getTypeQualifiers().getCVRUQualifiers())
+ if (unsigned quals = MD->getMethodQualifiers().getCVRUQualifiers())
Out << (char)('0' + quals);
switch (MD->getRefQualifier()) {
case RQ_None: break;
diff --git a/lib/Lex/DependencyDirectivesSourceMinimizer.cpp b/lib/Lex/DependencyDirectivesSourceMinimizer.cpp
new file mode 100644
index 000000000000..cfc37c5d3c62
--- /dev/null
+++ b/lib/Lex/DependencyDirectivesSourceMinimizer.cpp
@@ -0,0 +1,763 @@
+//===- DependencyDirectivesSourceMinimizer.cpp - -------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This is the implementation for minimizing header and source files to the
+/// minimum necessary preprocessor directives for evaluating includes. It
+/// reduces the source down to #define, #include, #import, @import, and any
+/// conditional preprocessor logic that contains one of those.
+///
+//===----------------------------------------------------------------------===//
+
+#include "clang/Lex/DependencyDirectivesSourceMinimizer.h"
+#include "clang/Basic/CharInfo.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Lex/LexDiagnostic.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Support/MemoryBuffer.h"
+
+using namespace llvm;
+using namespace clang;
+using namespace clang::minimize_source_to_dependency_directives;
+
+namespace {
+
+struct Minimizer {
+ /// Minimized output.
+ SmallVectorImpl<char> &Out;
+ /// The known tokens encountered during the minimization.
+ SmallVectorImpl<Token> &Tokens;
+
+ Minimizer(SmallVectorImpl<char> &Out, SmallVectorImpl<Token> &Tokens,
+ StringRef Input, DiagnosticsEngine *Diags,
+ SourceLocation InputSourceLoc)
+ : Out(Out), Tokens(Tokens), Input(Input), Diags(Diags),
+ InputSourceLoc(InputSourceLoc) {}
+
+ /// Lex the provided source and emit the minimized output.
+ ///
+ /// \returns True on error.
+ bool minimize();
+
+private:
+ struct IdInfo {
+ const char *Last;
+ StringRef Name;
+ };
+
+ /// Lex an identifier.
+ ///
+ /// \pre First points at a valid identifier head.
+ LLVM_NODISCARD IdInfo lexIdentifier(const char *First, const char *const End);
+ LLVM_NODISCARD bool isNextIdentifier(StringRef Id, const char *&First,
+ const char *const End);
+ LLVM_NODISCARD bool minimizeImpl(const char *First, const char *const End);
+ LLVM_NODISCARD bool lexPPLine(const char *&First, const char *const End);
+ LLVM_NODISCARD bool lexAt(const char *&First, const char *const End);
+ LLVM_NODISCARD bool lexDefine(const char *&First, const char *const End);
+ LLVM_NODISCARD bool lexPragma(const char *&First, const char *const End);
+ LLVM_NODISCARD bool lexEndif(const char *&First, const char *const End);
+ LLVM_NODISCARD bool lexDefault(TokenKind Kind, StringRef Directive,
+ const char *&First, const char *const End);
+ Token &makeToken(TokenKind K) {
+ Tokens.emplace_back(K, Out.size());
+ return Tokens.back();
+ }
+ void popToken() {
+ Out.resize(Tokens.back().Offset);
+ Tokens.pop_back();
+ }
+ TokenKind top() const { return Tokens.empty() ? pp_none : Tokens.back().K; }
+
+ Minimizer &put(char Byte) {
+ Out.push_back(Byte);
+ return *this;
+ }
+ Minimizer &append(StringRef S) { return append(S.begin(), S.end()); }
+ Minimizer &append(const char *First, const char *Last) {
+ Out.append(First, Last);
+ return *this;
+ }
+
+ void printToNewline(const char *&First, const char *const End);
+ void printAdjacentModuleNameParts(const char *&First, const char *const End);
+ LLVM_NODISCARD bool printAtImportBody(const char *&First,
+ const char *const End);
+ void printDirectiveBody(const char *&First, const char *const End);
+ void printAdjacentMacroArgs(const char *&First, const char *const End);
+ LLVM_NODISCARD bool printMacroArgs(const char *&First, const char *const End);
+
+ /// Reports a diagnostic if the diagnostic engine is provided. Always returns
+ /// true at the end.
+ bool reportError(const char *CurPtr, unsigned Err);
+
+ StringMap<char> SplitIds;
+ StringRef Input;
+ DiagnosticsEngine *Diags;
+ SourceLocation InputSourceLoc;
+};
+
+} // end anonymous namespace
+
+bool Minimizer::reportError(const char *CurPtr, unsigned Err) {
+ if (!Diags)
+ return true;
+ assert(CurPtr >= Input.data() && "invalid buffer ptr");
+ Diags->Report(InputSourceLoc.getLocWithOffset(CurPtr - Input.data()), Err);
+ return true;
+}
+
+static void skipOverSpaces(const char *&First, const char *const End) {
+ while (First != End && isHorizontalWhitespace(*First))
+ ++First;
+}
+
+LLVM_NODISCARD static bool isRawStringLiteral(const char *First,
+ const char *Current) {
+ assert(First <= Current);
+
+ // Check if we can even back up.
+ if (*Current != '"' || First == Current)
+ return false;
+
+ // Check for an "R".
+ --Current;
+ if (*Current != 'R')
+ return false;
+ if (First == Current || !isIdentifierBody(*--Current))
+ return true;
+
+ // Check for a prefix of "u", "U", or "L".
+ if (*Current == 'u' || *Current == 'U' || *Current == 'L')
+ return First == Current || !isIdentifierBody(*--Current);
+
+ // Check for a prefix of "u8".
+ if (*Current != '8' || First == Current || *Current-- != 'u')
+ return false;
+ return First == Current || !isIdentifierBody(*--Current);
+}
+
+static void skipRawString(const char *&First, const char *const End) {
+ assert(First[0] == '"');
+ assert(First[-1] == 'R');
+
+ const char *Last = ++First;
+ while (Last != End && *Last != '(')
+ ++Last;
+ if (Last == End) {
+ First = Last; // Hit the end... just give up.
+ return;
+ }
+
+ StringRef Terminator(First, Last - First);
+ for (;;) {
+ // Move First to just past the next ")".
+ First = Last;
+ while (First != End && *First != ')')
+ ++First;
+ if (First == End)
+ return;
+ ++First;
+
+ // Look ahead for the terminator sequence.
+ Last = First;
+ while (Last != End && size_t(Last - First) < Terminator.size() &&
+ Terminator[Last - First] == *Last)
+ ++Last;
+
+ // Check if we hit it (or the end of the file).
+ if (Last == End) {
+ First = Last;
+ return;
+ }
+ if (size_t(Last - First) < Terminator.size())
+ continue;
+ if (*Last != '"')
+ continue;
+ First = Last + 1;
+ return;
+ }
+}
+
+static void skipString(const char *&First, const char *const End) {
+ assert(*First == '\'' || *First == '"');
+ const char Terminator = *First;
+ for (++First; First != End && *First != Terminator; ++First)
+ if (*First == '\\')
+ if (++First == End)
+ return;
+ if (First != End)
+ ++First; // Finish off the string.
+}
+
+static void skipNewline(const char *&First, const char *End) {
+ assert(isVerticalWhitespace(*First));
+ ++First;
+ if (First == End)
+ return;
+
+ // Check for "\n\r" and "\r\n".
+ if (LLVM_UNLIKELY(isVerticalWhitespace(*First) && First[-1] != First[0]))
+ ++First;
+}
+
+static void skipToNewlineRaw(const char *&First, const char *const End) {
+ for (;;) {
+ if (First == End)
+ return;
+
+ if (isVerticalWhitespace(*First))
+ return;
+
+ while (!isVerticalWhitespace(*First))
+ if (++First == End)
+ return;
+
+ if (First[-1] != '\\')
+ return;
+
+ ++First; // Keep going...
+ }
+}
+
+static const char *reverseOverSpaces(const char *First, const char *Last) {
+ assert(First <= Last);
+ while (First != Last && isHorizontalWhitespace(Last[-1]))
+ --Last;
+ return Last;
+}
+
+static void skipLineComment(const char *&First, const char *const End) {
+ assert(First[0] == '/' && First[1] == '/');
+ First += 2;
+ skipToNewlineRaw(First, End);
+}
+
+static void skipBlockComment(const char *&First, const char *const End) {
+ assert(First[0] == '/' && First[1] == '*');
+ if (End - First < 4) {
+ First = End;
+ return;
+ }
+ for (First += 3; First != End; ++First)
+ if (First[-1] == '*' && First[0] == '/') {
+ ++First;
+ return;
+ }
+}
+
+/// \returns True if the current single quotation mark character is a C++ 14
+/// digit separator.
+static bool isQuoteCppDigitSeparator(const char *const Start,
+ const char *const Cur,
+ const char *const End) {
+ assert(*Cur == '\'' && "expected quotation character");
+ // skipLine called in places where we don't expect a valid number
+ // body before `start` on the same line, so always return false at the start.
+ if (Start == Cur)
+ return false;
+ // The previous character must be a valid PP number character.
+ // Make sure that the L, u, U, u8 prefixes don't get marked as a
+ // separator though.
+ char Prev = *(Cur - 1);
+ if (Prev == 'L' || Prev == 'U' || Prev == 'u')
+ return false;
+ if (Prev == '8' && (Cur - 1 != Start) && *(Cur - 2) == 'u')
+ return false;
+ if (!isPreprocessingNumberBody(Prev))
+ return false;
+ // The next character should be a valid identifier body character.
+ return (Cur + 1) < End && isIdentifierBody(*(Cur + 1));
+}
+
+static void skipLine(const char *&First, const char *const End) {
+ do {
+ assert(First <= End);
+ if (First == End)
+ return;
+
+ if (isVerticalWhitespace(*First)) {
+ skipNewline(First, End);
+ return;
+ }
+ const char *Start = First;
+ while (First != End && !isVerticalWhitespace(*First)) {
+ // Iterate over strings correctly to avoid comments and newlines.
+ if (*First == '"' ||
+ (*First == '\'' && !isQuoteCppDigitSeparator(Start, First, End))) {
+ if (isRawStringLiteral(Start, First))
+ skipRawString(First, End);
+ else
+ skipString(First, End);
+ continue;
+ }
+
+ // Iterate over comments correctly.
+ if (*First != '/' || End - First < 2) {
+ ++First;
+ continue;
+ }
+
+ if (First[1] == '/') {
+ // "//...".
+ skipLineComment(First, End);
+ continue;
+ }
+
+ if (First[1] != '*') {
+ ++First;
+ continue;
+ }
+
+ // "/*...*/".
+ skipBlockComment(First, End);
+ }
+ if (First == End)
+ return;
+
+ // Skip over the newline.
+ assert(isVerticalWhitespace(*First));
+ skipNewline(First, End);
+ } while (First[-2] == '\\'); // Continue past line-continuations.
+}
+
+static void skipDirective(StringRef Name, const char *&First,
+ const char *const End) {
+ if (llvm::StringSwitch<bool>(Name)
+ .Case("warning", true)
+ .Case("error", true)
+ .Default(false))
+ // Do not process quotes or comments.
+ skipToNewlineRaw(First, End);
+ else
+ skipLine(First, End);
+}
+
+void Minimizer::printToNewline(const char *&First, const char *const End) {
+ while (First != End && !isVerticalWhitespace(*First)) {
+ const char *Last = First;
+ do {
+ // Iterate over strings correctly to avoid comments and newlines.
+ if (*Last == '"' || *Last == '\'') {
+ if (LLVM_UNLIKELY(isRawStringLiteral(First, Last)))
+ skipRawString(Last, End);
+ else
+ skipString(Last, End);
+ continue;
+ }
+ if (*Last != '/' || End - Last < 2) {
+ ++Last;
+ continue; // Gather the rest up to print verbatim.
+ }
+
+ if (Last[1] != '/' && Last[1] != '*') {
+ ++Last;
+ continue;
+ }
+
+ // Deal with "//..." and "/*...*/".
+ append(First, reverseOverSpaces(First, Last));
+ First = Last;
+
+ if (Last[1] == '/') {
+ skipLineComment(First, End);
+ return;
+ }
+
+ put(' ');
+ skipBlockComment(First, End);
+ skipOverSpaces(First, End);
+ Last = First;
+ } while (Last != End && !isVerticalWhitespace(*Last));
+
+ // Print out the string.
+ if (Last == End || Last == First || Last[-1] != '\\') {
+ append(First, reverseOverSpaces(First, Last));
+ return;
+ }
+
+ // Print up to the backslash, backing up over spaces.
+ append(First, reverseOverSpaces(First, Last - 1));
+
+ First = Last;
+ skipNewline(First, End);
+ skipOverSpaces(First, End);
+ }
+}
+
+static void skipWhitespace(const char *&First, const char *const End) {
+ for (;;) {
+ assert(First <= End);
+ skipOverSpaces(First, End);
+
+ if (End - First < 2)
+ return;
+
+ if (First[0] == '\\' && isVerticalWhitespace(First[1])) {
+ skipNewline(++First, End);
+ continue;
+ }
+
+ // Check for a non-comment character.
+ if (First[0] != '/')
+ return;
+
+ // "// ...".
+ if (First[1] == '/') {
+ skipLineComment(First, End);
+ return;
+ }
+
+ // Cannot be a comment.
+ if (First[1] != '*')
+ return;
+
+ // "/*...*/".
+ skipBlockComment(First, End);
+ }
+}
+
+void Minimizer::printAdjacentModuleNameParts(const char *&First,
+ const char *const End) {
+ // Skip over parts of the body.
+ const char *Last = First;
+ do
+ ++Last;
+ while (Last != End && (isIdentifierBody(*Last) || *Last == '.'));
+ append(First, Last);
+ First = Last;
+}
+
+bool Minimizer::printAtImportBody(const char *&First, const char *const End) {
+ for (;;) {
+ skipWhitespace(First, End);
+ if (First == End)
+ return true;
+
+ if (isVerticalWhitespace(*First)) {
+ skipNewline(First, End);
+ continue;
+ }
+
+ // Found a semicolon.
+ if (*First == ';') {
+ put(*First++).put('\n');
+ return false;
+ }
+
+ // Don't handle macro expansions inside @import for now.
+ if (!isIdentifierBody(*First) && *First != '.')
+ return true;
+
+ printAdjacentModuleNameParts(First, End);
+ }
+}
+
+void Minimizer::printDirectiveBody(const char *&First, const char *const End) {
+ skipWhitespace(First, End); // Skip initial whitespace.
+ printToNewline(First, End);
+ while (Out.back() == ' ')
+ Out.pop_back();
+ put('\n');
+}
+
+LLVM_NODISCARD static const char *lexRawIdentifier(const char *First,
+ const char *const End) {
+ assert(isIdentifierBody(*First) && "invalid identifer");
+ const char *Last = First + 1;
+ while (Last != End && isIdentifierBody(*Last))
+ ++Last;
+ return Last;
+}
+
+LLVM_NODISCARD static const char *
+getIdentifierContinuation(const char *First, const char *const End) {
+ if (End - First < 3 || First[0] != '\\' || !isVerticalWhitespace(First[1]))
+ return nullptr;
+
+ ++First;
+ skipNewline(First, End);
+ if (First == End)
+ return nullptr;
+ return isIdentifierBody(First[0]) ? First : nullptr;
+}
+
+Minimizer::IdInfo Minimizer::lexIdentifier(const char *First,
+ const char *const End) {
+ const char *Last = lexRawIdentifier(First, End);
+ const char *Next = getIdentifierContinuation(Last, End);
+ if (LLVM_LIKELY(!Next))
+ return IdInfo{Last, StringRef(First, Last - First)};
+
+ // Slow path, where identifiers are split over lines.
+ SmallVector<char, 64> Id(First, Last);
+ while (Next) {
+ Last = lexRawIdentifier(Next, End);
+ Id.append(Next, Last);
+ Next = getIdentifierContinuation(Last, End);
+ }
+ return IdInfo{
+ Last,
+ SplitIds.try_emplace(StringRef(Id.begin(), Id.size()), 0).first->first()};
+}
+
+void Minimizer::printAdjacentMacroArgs(const char *&First,
+ const char *const End) {
+ // Skip over parts of the body.
+ const char *Last = First;
+ do
+ ++Last;
+ while (Last != End &&
+ (isIdentifierBody(*Last) || *Last == '.' || *Last == ','));
+ append(First, Last);
+ First = Last;
+}
+
+bool Minimizer::printMacroArgs(const char *&First, const char *const End) {
+ assert(*First == '(');
+ put(*First++);
+ for (;;) {
+ skipWhitespace(First, End);
+ if (First == End)
+ return true;
+
+ if (*First == ')') {
+ put(*First++);
+ return false;
+ }
+
+ // This is intentionally fairly liberal.
+ if (!(isIdentifierBody(*First) || *First == '.' || *First == ','))
+ return true;
+
+ printAdjacentMacroArgs(First, End);
+ }
+}
+
+/// Looks for an identifier starting from Last.
+///
+/// Updates "First" to just past the next identifier, if any. Returns true iff
+/// the identifier matches "Id".
+bool Minimizer::isNextIdentifier(StringRef Id, const char *&First,
+ const char *const End) {
+ skipWhitespace(First, End);
+ if (First == End || !isIdentifierHead(*First))
+ return false;
+
+ IdInfo FoundId = lexIdentifier(First, End);
+ First = FoundId.Last;
+ return FoundId.Name == Id;
+}
+
+bool Minimizer::lexAt(const char *&First, const char *const End) {
+ // Handle "@import".
+ const char *ImportLoc = First++;
+ if (!isNextIdentifier("import", First, End)) {
+ skipLine(First, End);
+ return false;
+ }
+ makeToken(decl_at_import);
+ append("@import ");
+ if (printAtImportBody(First, End))
+ return reportError(
+ ImportLoc, diag::err_dep_source_minimizer_missing_sema_after_at_import);
+ skipWhitespace(First, End);
+ if (First == End)
+ return false;
+ if (!isVerticalWhitespace(*First))
+ return reportError(
+ ImportLoc, diag::err_dep_source_minimizer_unexpected_tokens_at_import);
+ skipNewline(First, End);
+ return false;
+}
+
+bool Minimizer::lexDefine(const char *&First, const char *const End) {
+ makeToken(pp_define);
+ append("#define ");
+ skipWhitespace(First, End);
+
+ if (!isIdentifierHead(*First))
+ return reportError(First, diag::err_pp_macro_not_identifier);
+
+ IdInfo Id = lexIdentifier(First, End);
+ const char *Last = Id.Last;
+ append(Id.Name);
+ if (Last == End)
+ return false;
+ if (*Last == '(') {
+ size_t Size = Out.size();
+ if (printMacroArgs(Last, End)) {
+ // Be robust to bad macro arguments, since they can show up in disabled
+ // code.
+ Out.resize(Size);
+ append("(/* invalid */\n");
+ skipLine(Last, End);
+ return false;
+ }
+ }
+ skipWhitespace(Last, End);
+ if (Last == End)
+ return false;
+ if (!isVerticalWhitespace(*Last))
+ put(' ');
+ printDirectiveBody(Last, End);
+ First = Last;
+ return false;
+}
+
+bool Minimizer::lexPragma(const char *&First, const char *const End) {
+ // #pragma.
+ if (!isNextIdentifier("clang", First, End)) {
+ skipLine(First, End);
+ return false;
+ }
+
+ // #pragma clang.
+ if (!isNextIdentifier("module", First, End)) {
+ skipLine(First, End);
+ return false;
+ }
+
+ // #pragma clang module.
+ if (!isNextIdentifier("import", First, End)) {
+ skipLine(First, End);
+ return false;
+ }
+
+ // #pragma clang module import.
+ makeToken(pp_pragma_import);
+ append("#pragma clang module import ");
+ printDirectiveBody(First, End);
+ return false;
+}
+
+bool Minimizer::lexEndif(const char *&First, const char *const End) {
+ // Strip out "#else" if it's empty.
+ if (top() == pp_else)
+ popToken();
+
+ // Strip out "#elif" if they're empty.
+ while (top() == pp_elif)
+ popToken();
+
+ // If "#if" is empty, strip it and skip the "#endif".
+ if (top() == pp_if || top() == pp_ifdef || top() == pp_ifndef) {
+ popToken();
+ skipLine(First, End);
+ return false;
+ }
+
+ return lexDefault(pp_endif, "endif", First, End);
+}
+
+bool Minimizer::lexDefault(TokenKind Kind, StringRef Directive,
+ const char *&First, const char *const End) {
+ makeToken(Kind);
+ put('#').append(Directive).put(' ');
+ printDirectiveBody(First, End);
+ return false;
+}
+
+bool Minimizer::lexPPLine(const char *&First, const char *const End) {
+ assert(First != End);
+
+ skipWhitespace(First, End);
+ assert(First <= End);
+ if (First == End)
+ return false;
+
+ if (*First != '#' && *First != '@') {
+ skipLine(First, End);
+ assert(First <= End);
+ return false;
+ }
+
+ // Handle "@import".
+ if (*First == '@')
+ return lexAt(First, End);
+
+ // Handle preprocessing directives.
+ ++First; // Skip over '#'.
+ skipWhitespace(First, End);
+
+ if (First == End)
+ return reportError(First, diag::err_pp_expected_eol);
+
+ if (!isIdentifierHead(*First)) {
+ skipLine(First, End);
+ return false;
+ }
+
+ // Figure out the token.
+ IdInfo Id = lexIdentifier(First, End);
+ First = Id.Last;
+ auto Kind = llvm::StringSwitch<TokenKind>(Id.Name)
+ .Case("include", pp_include)
+ .Case("__include_macros", pp___include_macros)
+ .Case("define", pp_define)
+ .Case("undef", pp_undef)
+ .Case("import", pp_import)
+ .Case("include_next", pp_include_next)
+ .Case("if", pp_if)
+ .Case("ifdef", pp_ifdef)
+ .Case("ifndef", pp_ifndef)
+ .Case("elif", pp_elif)
+ .Case("else", pp_else)
+ .Case("endif", pp_endif)
+ .Case("pragma", pp_pragma_import)
+ .Default(pp_none);
+ if (Kind == pp_none) {
+ skipDirective(Id.Name, First, End);
+ return false;
+ }
+
+ if (Kind == pp_endif)
+ return lexEndif(First, End);
+
+ if (Kind == pp_define)
+ return lexDefine(First, End);
+
+ if (Kind == pp_pragma_import)
+ return lexPragma(First, End);
+
+ // Everything else.
+ return lexDefault(Kind, Id.Name, First, End);
+}
+
+bool Minimizer::minimizeImpl(const char *First, const char *const End) {
+ while (First != End)
+ if (lexPPLine(First, End))
+ return true;
+ return false;
+}
+
+bool Minimizer::minimize() {
+ bool Error = minimizeImpl(Input.begin(), Input.end());
+
+ if (!Error) {
+ // Add a trailing newline and an EOF on success.
+ if (!Out.empty() && Out.back() != '\n')
+ Out.push_back('\n');
+ makeToken(pp_eof);
+ }
+
+ // Null-terminate the output. This way the memory buffer that's passed to
+ // Clang will not have to worry about the terminating '\0'.
+ Out.push_back(0);
+ Out.pop_back();
+ return Error;
+}
+
+bool clang::minimizeSourceToDependencyDirectives(
+ StringRef Input, SmallVectorImpl<char> &Output,
+ SmallVectorImpl<Token> &Tokens, DiagnosticsEngine *Diags,
+ SourceLocation InputSourceLoc) {
+ Output.clear();
+ Tokens.clear();
+ return Minimizer(Output, Tokens, Input, Diags, InputSourceLoc).minimize();
+}
diff --git a/lib/Lex/HeaderMap.cpp b/lib/Lex/HeaderMap.cpp
index 23cb053c2d71..e0bf58b67505 100644
--- a/lib/Lex/HeaderMap.cpp
+++ b/lib/Lex/HeaderMap.cpp
@@ -1,9 +1,8 @@
//===--- HeaderMap.cpp - A file that acts like dir of symlinks ------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Lex/HeaderSearch.cpp b/lib/Lex/HeaderSearch.cpp
index c65fb47c0fe5..108630cc26f6 100644
--- a/lib/Lex/HeaderSearch.cpp
+++ b/lib/Lex/HeaderSearch.cpp
@@ -1,9 +1,8 @@
//===- HeaderSearch.cpp - Resolve Header File Locations -------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -335,6 +334,7 @@ const FileEntry *DirectoryLookup::LookupFile(
Module *RequestingModule,
ModuleMap::KnownHeader *SuggestedModule,
bool &InUserSpecifiedSystemFramework,
+ bool &IsFrameworkFound,
bool &HasBeenMapped,
SmallVectorImpl<char> &MappedName) const {
InUserSpecifiedSystemFramework = false;
@@ -363,7 +363,7 @@ const FileEntry *DirectoryLookup::LookupFile(
if (isFramework())
return DoFrameworkLookup(Filename, HS, SearchPath, RelativePath,
RequestingModule, SuggestedModule,
- InUserSpecifiedSystemFramework);
+ InUserSpecifiedSystemFramework, IsFrameworkFound);
assert(isHeaderMap() && "Unknown directory lookup");
const HeaderMap *HM = getHeaderMap();
@@ -463,7 +463,7 @@ const FileEntry *DirectoryLookup::DoFrameworkLookup(
StringRef Filename, HeaderSearch &HS, SmallVectorImpl<char> *SearchPath,
SmallVectorImpl<char> *RelativePath, Module *RequestingModule,
ModuleMap::KnownHeader *SuggestedModule,
- bool &InUserSpecifiedSystemFramework) const {
+ bool &InUserSpecifiedSystemFramework, bool &IsFrameworkFound) const {
FileManager &FileMgr = HS.getFileMgr();
// Framework names must have a '/' in the filename.
@@ -472,7 +472,7 @@ const FileEntry *DirectoryLookup::DoFrameworkLookup(
// Find out if this is the home for the specified framework, by checking
// HeaderSearch. Possible answers are yes/no and unknown.
- HeaderSearch::FrameworkCacheEntry &CacheEntry =
+ FrameworkCacheEntry &CacheEntry =
HS.LookupFrameworkCache(Filename.substr(0, SlashPos));
// If it is known and in some other directory, fail.
@@ -517,8 +517,9 @@ const FileEntry *DirectoryLookup::DoFrameworkLookup(
}
}
- // Set the 'user-specified system framework' flag.
+ // Set out flags.
InUserSpecifiedSystemFramework = CacheEntry.IsUserSpecifiedSystemFramework;
+ IsFrameworkFound = CacheEntry.Directory;
if (RelativePath) {
RelativePath->clear();
@@ -538,7 +539,7 @@ const FileEntry *DirectoryLookup::DoFrameworkLookup(
FrameworkName.append(Filename.begin()+SlashPos+1, Filename.end());
const FileEntry *FE = FileMgr.getFile(FrameworkName,
- /*openFile=*/!SuggestedModule);
+ /*OpenFile=*/!SuggestedModule);
if (!FE) {
// Check "/System/Library/Frameworks/Cocoa.framework/PrivateHeaders/file.h"
const char *Private = "Private";
@@ -548,7 +549,7 @@ const FileEntry *DirectoryLookup::DoFrameworkLookup(
SearchPath->insert(SearchPath->begin()+OrigSize, Private,
Private+strlen(Private));
- FE = FileMgr.getFile(FrameworkName, /*openFile=*/!SuggestedModule);
+ FE = FileMgr.getFile(FrameworkName, /*OpenFile=*/!SuggestedModule);
}
// If we found the header and are allowed to suggest a module, do so now.
@@ -697,10 +698,14 @@ const FileEntry *HeaderSearch::LookupFile(
ArrayRef<std::pair<const FileEntry *, const DirectoryEntry *>> Includers,
SmallVectorImpl<char> *SearchPath, SmallVectorImpl<char> *RelativePath,
Module *RequestingModule, ModuleMap::KnownHeader *SuggestedModule,
- bool *IsMapped, bool SkipCache, bool BuildSystemModule) {
+ bool *IsMapped, bool *IsFrameworkFound, bool SkipCache,
+ bool BuildSystemModule) {
if (IsMapped)
*IsMapped = false;
+ if (IsFrameworkFound)
+ *IsFrameworkFound = false;
+
if (SuggestedModule)
*SuggestedModule = ModuleMap::KnownHeader();
@@ -852,16 +857,22 @@ const FileEntry *HeaderSearch::LookupFile(
for (; i != SearchDirs.size(); ++i) {
bool InUserSpecifiedSystemFramework = false;
bool HasBeenMapped = false;
+ bool IsFrameworkFoundInDir = false;
const FileEntry *FE = SearchDirs[i].LookupFile(
Filename, *this, IncludeLoc, SearchPath, RelativePath, RequestingModule,
- SuggestedModule, InUserSpecifiedSystemFramework, HasBeenMapped,
- MappedName);
+ SuggestedModule, InUserSpecifiedSystemFramework, IsFrameworkFoundInDir,
+ HasBeenMapped, MappedName);
if (HasBeenMapped) {
CacheLookup.MappedName =
copyString(Filename, LookupFileCache.getAllocator());
if (IsMapped)
*IsMapped = true;
}
+ if (IsFrameworkFound)
+ // Because we keep a filename remapped for subsequent search directory
+ // lookups, ignore IsFrameworkFoundInDir after the first remapping and not
+ // just for remapping in a current search directory.
+ *IsFrameworkFound |= (IsFrameworkFoundInDir && !CacheLookup.MappedName);
if (!FE) continue;
CurDir = &SearchDirs[i];
@@ -927,10 +938,10 @@ const FileEntry *HeaderSearch::LookupFile(
ScratchFilename += '/';
ScratchFilename += Filename;
- const FileEntry *FE =
- LookupFile(ScratchFilename, IncludeLoc, /*isAngled=*/true, FromDir,
- CurDir, Includers.front(), SearchPath, RelativePath,
- RequestingModule, SuggestedModule, IsMapped);
+ const FileEntry *FE = LookupFile(
+ ScratchFilename, IncludeLoc, /*isAngled=*/true, FromDir, CurDir,
+ Includers.front(), SearchPath, RelativePath, RequestingModule,
+ SuggestedModule, IsMapped, /*IsFrameworkFound=*/nullptr);
if (checkMSVCHeaderSearch(Diags, MSFE, FE, IncludeLoc)) {
if (SuggestedModule)
@@ -1036,7 +1047,7 @@ LookupSubframeworkHeader(StringRef Filename,
}
HeadersFilename.append(Filename.begin()+SlashPos+1, Filename.end());
- if (!(FE = FileMgr.getFile(HeadersFilename, /*openFile=*/true))) {
+ if (!(FE = FileMgr.getFile(HeadersFilename, /*OpenFile=*/true))) {
// Check ".../Frameworks/HIToolbox.framework/PrivateHeaders/HIToolbox.h"
HeadersFilename = FrameworkName;
HeadersFilename += "PrivateHeaders/";
@@ -1047,7 +1058,7 @@ LookupSubframeworkHeader(StringRef Filename,
}
HeadersFilename.append(Filename.begin()+SlashPos+1, Filename.end());
- if (!(FE = FileMgr.getFile(HeadersFilename, /*openFile=*/true)))
+ if (!(FE = FileMgr.getFile(HeadersFilename, /*OpenFile=*/true)))
return nullptr;
}
@@ -1571,7 +1582,7 @@ void HeaderSearch::collectAllModules(SmallVectorImpl<Module *> &Modules) {
DirNative);
// Search each of the ".framework" directories to load them as modules.
- llvm::vfs::FileSystem &FS = *FileMgr.getVirtualFileSystem();
+ llvm::vfs::FileSystem &FS = FileMgr.getVirtualFileSystem();
for (llvm::vfs::directory_iterator Dir = FS.dir_begin(DirNative, EC),
DirEnd;
Dir != DirEnd && !EC; Dir.increment(EC)) {
@@ -1642,7 +1653,7 @@ void HeaderSearch::loadSubdirectoryModuleMaps(DirectoryLookup &SearchDir) {
FileMgr.makeAbsolutePath(Dir);
SmallString<128> DirNative;
llvm::sys::path::native(Dir, DirNative);
- llvm::vfs::FileSystem &FS = *FileMgr.getVirtualFileSystem();
+ llvm::vfs::FileSystem &FS = FileMgr.getVirtualFileSystem();
for (llvm::vfs::directory_iterator Dir = FS.dir_begin(DirNative, EC), DirEnd;
Dir != DirEnd && !EC; Dir.increment(EC)) {
bool IsFramework = llvm::sys::path::extension(Dir->path()) == ".framework";
@@ -1654,34 +1665,30 @@ void HeaderSearch::loadSubdirectoryModuleMaps(DirectoryLookup &SearchDir) {
SearchDir.setSearchedAllModuleMaps(true);
}
-std::string HeaderSearch::suggestPathToFileForDiagnostics(const FileEntry *File,
- bool *IsSystem) {
+std::string HeaderSearch::suggestPathToFileForDiagnostics(
+ const FileEntry *File, llvm::StringRef MainFile, bool *IsSystem) {
// FIXME: We assume that the path name currently cached in the FileEntry is
// the most appropriate one for this analysis (and that it's spelled the
// same way as the corresponding header search path).
- return suggestPathToFileForDiagnostics(File->getName(), /*BuildDir=*/"",
- IsSystem);
+ return suggestPathToFileForDiagnostics(File->getName(), /*WorkingDir=*/"",
+ MainFile, IsSystem);
}
std::string HeaderSearch::suggestPathToFileForDiagnostics(
- llvm::StringRef File, llvm::StringRef WorkingDir, bool *IsSystem) {
+ llvm::StringRef File, llvm::StringRef WorkingDir, llvm::StringRef MainFile,
+ bool *IsSystem) {
using namespace llvm::sys;
unsigned BestPrefixLength = 0;
- unsigned BestSearchDir;
-
- for (unsigned I = 0; I != SearchDirs.size(); ++I) {
- // FIXME: Support this search within frameworks and header maps.
- if (!SearchDirs[I].isNormalDir())
- continue;
-
- StringRef Dir = SearchDirs[I].getDir()->getName();
+ // Checks whether Dir and File shares a common prefix, if they do and that's
+ // the longest prefix we've seen so for it returns true and updates the
+ // BestPrefixLength accordingly.
+ auto CheckDir = [&](llvm::StringRef Dir) -> bool {
llvm::SmallString<32> DirPath(Dir.begin(), Dir.end());
- if (!WorkingDir.empty() && !path::is_absolute(Dir)) {
+ if (!WorkingDir.empty() && !path::is_absolute(Dir))
fs::make_absolute(WorkingDir, DirPath);
- path::remove_dots(DirPath, /*remove_dot_dot=*/true);
- Dir = DirPath;
- }
+ path::remove_dots(DirPath, /*remove_dot_dot=*/true);
+ Dir = DirPath;
for (auto NI = path::begin(File), NE = path::end(File),
DI = path::begin(Dir), DE = path::end(Dir);
/*termination condition in loop*/; ++NI, ++DI) {
@@ -1700,17 +1707,37 @@ std::string HeaderSearch::suggestPathToFileForDiagnostics(
unsigned PrefixLength = NI - path::begin(File);
if (PrefixLength > BestPrefixLength) {
BestPrefixLength = PrefixLength;
- BestSearchDir = I;
+ return true;
}
break;
}
+ // Consider all path separators equal.
+ if (NI->size() == 1 && DI->size() == 1 &&
+ path::is_separator(NI->front()) && path::is_separator(DI->front()))
+ continue;
+
if (*NI != *DI)
break;
}
+ return false;
+ };
+
+ for (unsigned I = 0; I != SearchDirs.size(); ++I) {
+ // FIXME: Support this search within frameworks and header maps.
+ if (!SearchDirs[I].isNormalDir())
+ continue;
+
+ StringRef Dir = SearchDirs[I].getDir()->getName();
+ if (CheckDir(Dir) && IsSystem)
+ *IsSystem = BestPrefixLength ? I >= SystemDirIdx : false;
}
- if (IsSystem)
- *IsSystem = BestPrefixLength ? BestSearchDir >= SystemDirIdx : false;
- return File.drop_front(BestPrefixLength);
+ // Try to shorten include path using TUs directory, if we couldn't find any
+ // suitable prefix in include search paths.
+ if (!BestPrefixLength && CheckDir(path::parent_path(MainFile)) && IsSystem)
+ *IsSystem = false;
+
+
+ return path::convert_to_slash(File.drop_front(BestPrefixLength));
}
diff --git a/lib/Lex/Lexer.cpp b/lib/Lex/Lexer.cpp
index d4723091114a..db53e6bec044 100644
--- a/lib/Lex/Lexer.cpp
+++ b/lib/Lex/Lexer.cpp
@@ -1,9 +1,8 @@
//===- Lexer.cpp - C Language Family Lexer --------------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -688,7 +687,6 @@ PreambleBounds Lexer::ComputePreamble(StringRef Buffer,
// We only end up here if we didn't recognize the preprocessor
// directive or it was one that can't occur in the preamble at this
// point. Roll back the current token to the location of the '#'.
- InPreprocessorDirective = false;
TheTok = HashTok;
}
@@ -2073,7 +2071,7 @@ bool Lexer::LexAngledStringLiteral(Token &Result, const char *CurPtr) {
// Update the location of token as well as BufferPtr.
const char *TokStart = BufferPtr;
- FormTokenWithChars(Result, CurPtr, tok::angle_string_literal);
+ FormTokenWithChars(Result, CurPtr, tok::header_name);
Result.setLiteralData(TokStart);
return true;
}
@@ -3233,7 +3231,7 @@ LexNextToken:
case '\r':
if (CurPtr[0] == '\n')
- Char = getAndAdvanceChar(CurPtr, Result);
+ (void)getAndAdvanceChar(CurPtr, Result);
LLVM_FALLTHROUGH;
case '\n':
// If we are inside a preprocessor directive and we see the end of line,
@@ -3466,7 +3464,9 @@ LexNextToken:
case '"':
// Notify MIOpt that we read a non-whitespace/non-comment token.
MIOpt.ReadToken();
- return LexStringLiteral(Result, CurPtr, tok::string_literal);
+ return LexStringLiteral(Result, CurPtr,
+ ParsingFilename ? tok::header_name
+ : tok::string_literal);
// C99 6.4.6: Punctuators.
case '?':
diff --git a/lib/Lex/LiteralSupport.cpp b/lib/Lex/LiteralSupport.cpp
index fa0815eb9c6c..2108408377fb 100644
--- a/lib/Lex/LiteralSupport.cpp
+++ b/lib/Lex/LiteralSupport.cpp
@@ -1,9 +1,8 @@
//===--- LiteralSupport.cpp - Code to parse and process literals ----------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -617,10 +616,14 @@ NumericLiteralParser::NumericLiteralParser(StringRef TokSpelling,
if (isHalf || isFloat || isLong || isFloat128)
break; // HF, FF, LF, QF invalid.
- if (s + 2 < ThisTokEnd && s[1] == '1' && s[2] == '6') {
- s += 2; // success, eat up 2 characters.
- isFloat16 = true;
- continue;
+ // CUDA host and device may have different _Float16 support, therefore
+ // allows f16 literals to avoid false alarm.
+ // ToDo: more precise check for CUDA.
+ if ((PP.getTargetInfo().hasFloat16Type() || PP.getLangOpts().CUDA) &&
+ s + 2 < ThisTokEnd && s[1] == '1' && s[2] == '6') {
+ s += 2; // success, eat up 2 characters.
+ isFloat16 = true;
+ continue;
}
isFloat = true;
diff --git a/lib/Lex/MacroArgs.cpp b/lib/Lex/MacroArgs.cpp
index dc2ba3074a8b..5aa4679fad46 100644
--- a/lib/Lex/MacroArgs.cpp
+++ b/lib/Lex/MacroArgs.cpp
@@ -1,9 +1,8 @@
//===--- MacroArgs.cpp - Formal argument info for Macros ------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -136,15 +135,12 @@ const Token *MacroArgs::getUnexpArgument(unsigned Arg) const {
return Result;
}
-// This function assumes that the variadic arguments are the tokens
-// corresponding to the last parameter (ellipsis) - and since tokens are
-// separated by the 'eof' token, if that is the only token corresponding to that
-// last parameter, we know no variadic arguments were supplied.
-bool MacroArgs::invokedWithVariadicArgument(const MacroInfo *const MI) const {
+bool MacroArgs::invokedWithVariadicArgument(const MacroInfo *const MI,
+ Preprocessor &PP) {
if (!MI->isVariadic())
return false;
const int VariadicArgIndex = getNumMacroArguments() - 1;
- return getUnexpArgument(VariadicArgIndex)->isNot(tok::eof);
+ return getPreExpArgument(VariadicArgIndex, PP).front().isNot(tok::eof);
}
/// ArgNeedsPreexpansion - If we can prove that the argument won't be affected
@@ -185,7 +181,7 @@ const std::vector<Token> &MacroArgs::getPreExpArgument(unsigned Arg,
// list. With this installed, we lex expanded tokens until we hit the EOF
// token at the end of the unexp list.
PP.EnterTokenStream(AT, NumToks, false /*disable expand*/,
- false /*owns tokens*/);
+ false /*owns tokens*/, false /*is reinject*/);
// Lex all of the macro-expanded tokens into Result.
do {
diff --git a/lib/Lex/MacroInfo.cpp b/lib/Lex/MacroInfo.cpp
index 434c12007596..1ccd140364ae 100644
--- a/lib/Lex/MacroInfo.cpp
+++ b/lib/Lex/MacroInfo.cpp
@@ -1,9 +1,8 @@
//===- MacroInfo.cpp - Information about #defined identifiers -------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Lex/ModuleMap.cpp b/lib/Lex/ModuleMap.cpp
index cff950b703a6..5e0be1a57da4 100644
--- a/lib/Lex/ModuleMap.cpp
+++ b/lib/Lex/ModuleMap.cpp
@@ -1,9 +1,8 @@
//===- ModuleMap.cpp - Describe the layout of modules ---------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -807,7 +806,7 @@ std::pair<Module *, bool> ModuleMap::findOrCreateModule(StringRef Name,
return std::make_pair(Result, true);
}
-Module *ModuleMap::createGlobalModuleForInterfaceUnit(SourceLocation Loc) {
+Module *ModuleMap::createGlobalModuleFragmentForModuleUnit(SourceLocation Loc) {
PendingSubmodules.emplace_back(
new Module("<global>", Loc, nullptr, /*IsFramework*/ false,
/*IsExplicit*/ true, NumCreatedModules++));
@@ -815,6 +814,16 @@ Module *ModuleMap::createGlobalModuleForInterfaceUnit(SourceLocation Loc) {
return PendingSubmodules.back().get();
}
+Module *
+ModuleMap::createPrivateModuleFragmentForInterfaceUnit(Module *Parent,
+ SourceLocation Loc) {
+ auto *Result =
+ new Module("<private>", Loc, Parent, /*IsFramework*/ false,
+ /*IsExplicit*/ true, NumCreatedModules++);
+ Result->Kind = Module::PrivateModuleFragment;
+ return Result;
+}
+
Module *ModuleMap::createModuleForInterfaceUnit(SourceLocation Loc,
StringRef Name,
Module *GlobalModule) {
@@ -1022,7 +1031,7 @@ Module *ModuleMap::inferFrameworkModule(const DirectoryEntry *FrameworkDir,
= StringRef(FrameworkDir->getName());
llvm::sys::path::append(SubframeworksDirName, "Frameworks");
llvm::sys::path::native(SubframeworksDirName);
- llvm::vfs::FileSystem &FS = *FileMgr.getVirtualFileSystem();
+ llvm::vfs::FileSystem &FS = FileMgr.getVirtualFileSystem();
for (llvm::vfs::directory_iterator
Dir = FS.dir_begin(SubframeworksDirName, EC),
DirEnd;
@@ -2398,7 +2407,7 @@ void ModuleMapParser::parseUmbrellaDirDecl(SourceLocation UmbrellaLoc) {
std::error_code EC;
SmallVector<Module::Header, 6> Headers;
llvm::vfs::FileSystem &FS =
- *SourceMgr.getFileManager().getVirtualFileSystem();
+ SourceMgr.getFileManager().getVirtualFileSystem();
for (llvm::vfs::recursive_directory_iterator I(FS, Dir->getName(), EC), E;
I != E && !EC; I.increment(EC)) {
if (const FileEntry *FE = SourceMgr.getFileManager().getFile(I->path())) {
diff --git a/lib/Lex/PPCaching.cpp b/lib/Lex/PPCaching.cpp
index 9758557d7b44..31548d246d5a 100644
--- a/lib/Lex/PPCaching.cpp
+++ b/lib/Lex/PPCaching.cpp
@@ -1,9 +1,8 @@
//===--- PPCaching.cpp - Handle caching lexed tokens ----------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -24,6 +23,7 @@ using namespace clang;
// be called multiple times and CommitBacktrackedTokens/Backtrack calls will
// be combined with the EnableBacktrackAtThisPos calls in reverse order.
void Preprocessor::EnableBacktrackAtThisPos() {
+ assert(LexLevel == 0 && "cannot use lookahead while lexing");
BacktrackPositions.push_back(CachedLexPos);
EnterCachingLexMode();
}
@@ -35,29 +35,6 @@ void Preprocessor::CommitBacktrackedTokens() {
BacktrackPositions.pop_back();
}
-Preprocessor::CachedTokensRange Preprocessor::LastCachedTokenRange() {
- assert(isBacktrackEnabled());
- auto PrevCachedLexPos = BacktrackPositions.back();
- return CachedTokensRange{PrevCachedLexPos, CachedLexPos};
-}
-
-void Preprocessor::EraseCachedTokens(CachedTokensRange TokenRange) {
- assert(TokenRange.Begin <= TokenRange.End);
- if (CachedLexPos == TokenRange.Begin && TokenRange.Begin != TokenRange.End) {
- // We have backtracked to the start of the token range as we want to consume
- // them again. Erase the tokens only after consuming then.
- assert(!CachedTokenRangeToErase);
- CachedTokenRangeToErase = TokenRange;
- return;
- }
- // The cached tokens were committed, so they should be erased now.
- assert(TokenRange.End == CachedLexPos);
- CachedTokens.erase(CachedTokens.begin() + TokenRange.Begin,
- CachedTokens.begin() + TokenRange.End);
- CachedLexPos = TokenRange.Begin;
- ExitCachingLexMode();
-}
-
// Make Preprocessor re-lex the tokens that were lexed since
// EnableBacktrackAtThisPos() was previously called.
void Preprocessor::Backtrack() {
@@ -72,15 +49,13 @@ void Preprocessor::CachingLex(Token &Result) {
if (!InCachingLexMode())
return;
+ // The assert in EnterCachingLexMode should prevent this from happening.
+ assert(LexLevel == 1 &&
+ "should not use token caching within the preprocessor");
+
if (CachedLexPos < CachedTokens.size()) {
Result = CachedTokens[CachedLexPos++];
- // Erase the some of the cached tokens after they are consumed when
- // asked to do so.
- if (CachedTokenRangeToErase &&
- CachedTokenRangeToErase->End == CachedLexPos) {
- EraseCachedTokens(*CachedTokenRangeToErase);
- CachedTokenRangeToErase = None;
- }
+ Result.setFlag(Token::IsReinjected);
return;
}
@@ -89,14 +64,14 @@ void Preprocessor::CachingLex(Token &Result) {
if (isBacktrackEnabled()) {
// Cache the lexed token.
- EnterCachingLexMode();
+ EnterCachingLexModeUnchecked();
CachedTokens.push_back(Result);
++CachedLexPos;
return;
}
if (CachedLexPos < CachedTokens.size()) {
- EnterCachingLexMode();
+ EnterCachingLexModeUnchecked();
} else {
// All cached tokens were consumed.
CachedTokens.clear();
@@ -105,11 +80,23 @@ void Preprocessor::CachingLex(Token &Result) {
}
void Preprocessor::EnterCachingLexMode() {
+ // The caching layer sits on top of all the other lexers, so it's incorrect
+ // to cache tokens while inside a nested lex action. The cached tokens would
+ // be retained after returning to the enclosing lex action and, at best,
+ // would appear at the wrong position in the token stream.
+ assert(LexLevel == 0 &&
+ "entered caching lex mode while lexing something else");
+
if (InCachingLexMode()) {
assert(CurLexerKind == CLK_CachingLexer && "Unexpected lexer kind");
return;
}
+ EnterCachingLexModeUnchecked();
+}
+
+void Preprocessor::EnterCachingLexModeUnchecked() {
+ assert(CurLexerKind != CLK_CachingLexer && "already in caching lex mode");
PushIncludeMacroStack();
CurLexerKind = CLK_CachingLexer;
}
diff --git a/lib/Lex/PPCallbacks.cpp b/lib/Lex/PPCallbacks.cpp
index 952b926005b0..cd8b04b20d24 100644
--- a/lib/Lex/PPCallbacks.cpp
+++ b/lib/Lex/PPCallbacks.cpp
@@ -1,9 +1,8 @@
//===--- PPCallbacks.cpp - Callbacks for Preprocessor actions ---*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Lex/PPConditionalDirectiveRecord.cpp b/lib/Lex/PPConditionalDirectiveRecord.cpp
index 12a77849b8b3..facee28007c7 100644
--- a/lib/Lex/PPConditionalDirectiveRecord.cpp
+++ b/lib/Lex/PPConditionalDirectiveRecord.cpp
@@ -1,9 +1,8 @@
//===--- PPConditionalDirectiveRecord.h - Preprocessing Directives-*- C++ -*-=//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -26,9 +25,8 @@ bool PPConditionalDirectiveRecord::rangeIntersectsConditionalDirective(
if (Range.isInvalid())
return false;
- CondDirectiveLocsTy::const_iterator
- low = std::lower_bound(CondDirectiveLocs.begin(), CondDirectiveLocs.end(),
- Range.getBegin(), CondDirectiveLoc::Comp(SourceMgr));
+ CondDirectiveLocsTy::const_iterator low = llvm::lower_bound(
+ CondDirectiveLocs, Range.getBegin(), CondDirectiveLoc::Comp(SourceMgr));
if (low == CondDirectiveLocs.end())
return false;
@@ -56,9 +54,8 @@ SourceLocation PPConditionalDirectiveRecord::findConditionalDirectiveRegionLoc(
Loc))
return CondDirectiveStack.back();
- CondDirectiveLocsTy::const_iterator
- low = std::lower_bound(CondDirectiveLocs.begin(), CondDirectiveLocs.end(),
- Loc, CondDirectiveLoc::Comp(SourceMgr));
+ CondDirectiveLocsTy::const_iterator low = llvm::lower_bound(
+ CondDirectiveLocs, Loc, CondDirectiveLoc::Comp(SourceMgr));
assert(low != CondDirectiveLocs.end());
return low->getRegionLoc();
}
diff --git a/lib/Lex/PPDirectives.cpp b/lib/Lex/PPDirectives.cpp
index d62a3513c777..2756042f23eb 100644
--- a/lib/Lex/PPDirectives.cpp
+++ b/lib/Lex/PPDirectives.cpp
@@ -1,9 +1,8 @@
//===--- PPDirectives.cpp - Directive Handling for Preprocessor -----------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
@@ -79,12 +78,18 @@ Preprocessor::AllocateVisibilityMacroDirective(SourceLocation Loc,
/// Read and discard all tokens remaining on the current line until
/// the tok::eod token is found.
-void Preprocessor::DiscardUntilEndOfDirective() {
+SourceRange Preprocessor::DiscardUntilEndOfDirective() {
Token Tmp;
- do {
- LexUnexpandedToken(Tmp);
+ SourceRange Res;
+
+ LexUnexpandedToken(Tmp);
+ Res.setBegin(Tmp.getLocation());
+ while (Tmp.isNot(tok::eod)) {
assert(Tmp.isNot(tok::eof) && "EOF seen while discarding directive tokens");
- } while (Tmp.isNot(tok::eod));
+ LexUnexpandedToken(Tmp);
+ }
+ Res.setEnd(Tmp.getLocation());
+ return Res;
}
/// Enumerates possible cases of #define/#undef a reserved identifier.
@@ -331,7 +336,10 @@ void Preprocessor::ReadMacroName(Token &MacroNameTok, MacroUse isDefineUndef,
///
/// If not, emit a diagnostic and consume up until the eod. If EnableMacros is
/// true, then we consider macros that expand to zero tokens as being ok.
-void Preprocessor::CheckEndOfDirective(const char *DirType, bool EnableMacros) {
+///
+/// Returns the location of the end of the directive.
+SourceLocation Preprocessor::CheckEndOfDirective(const char *DirType,
+ bool EnableMacros) {
Token Tmp;
// Lex unexpanded tokens for most directives: macros might expand to zero
// tokens, causing us to miss diagnosing invalid lines. Some directives (like
@@ -346,18 +354,19 @@ void Preprocessor::CheckEndOfDirective(const char *DirType, bool EnableMacros) {
while (Tmp.is(tok::comment)) // Skip comments in -C mode.
LexUnexpandedToken(Tmp);
- if (Tmp.isNot(tok::eod)) {
- // Add a fixit in GNU/C99/C++ mode. Don't offer a fixit for strict-C89,
- // or if this is a macro-style preprocessing directive, because it is more
- // trouble than it is worth to insert /**/ and check that there is no /**/
- // in the range also.
- FixItHint Hint;
- if ((LangOpts.GNUMode || LangOpts.C99 || LangOpts.CPlusPlus) &&
- !CurTokenLexer)
- Hint = FixItHint::CreateInsertion(Tmp.getLocation(),"//");
- Diag(Tmp, diag::ext_pp_extra_tokens_at_eol) << DirType << Hint;
- DiscardUntilEndOfDirective();
- }
+ if (Tmp.is(tok::eod))
+ return Tmp.getLocation();
+
+ // Add a fixit in GNU/C99/C++ mode. Don't offer a fixit for strict-C89,
+ // or if this is a macro-style preprocessing directive, because it is more
+ // trouble than it is worth to insert /**/ and check that there is no /**/
+ // in the range also.
+ FixItHint Hint;
+ if ((LangOpts.GNUMode || LangOpts.C99 || LangOpts.CPlusPlus) &&
+ !CurTokenLexer)
+ Hint = FixItHint::CreateInsertion(Tmp.getLocation(),"//");
+ Diag(Tmp, diag::ext_pp_extra_tokens_at_eol) << DirType << Hint;
+ return DiscardUntilEndOfDirective().getEnd();
}
/// SkipExcludedConditionalBlock - We just read a \#if or related directive and
@@ -538,19 +547,19 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation HashTokenLoc,
if (CondInfo.WasSkipping || CondInfo.FoundNonSkip) {
DiscardUntilEndOfDirective();
} else {
- const SourceLocation CondBegin = CurPPLexer->getSourceLocation();
// Restore the value of LexingRawMode so that identifiers are
// looked up, etc, inside the #elif expression.
assert(CurPPLexer->LexingRawMode && "We have to be skipping here!");
CurPPLexer->LexingRawMode = false;
IdentifierInfo *IfNDefMacro = nullptr;
- const bool CondValue = EvaluateDirectiveExpression(IfNDefMacro).Conditional;
+ DirectiveEvalResult DER = EvaluateDirectiveExpression(IfNDefMacro);
+ const bool CondValue = DER.Conditional;
CurPPLexer->LexingRawMode = true;
if (Callbacks) {
- const SourceLocation CondEnd = CurPPLexer->getSourceLocation();
- Callbacks->Elif(Tok.getLocation(),
- SourceRange(CondBegin, CondEnd),
- (CondValue ? PPCallbacks::CVK_True : PPCallbacks::CVK_False), CondInfo.IfLoc);
+ Callbacks->Elif(
+ Tok.getLocation(), DER.ExprRange,
+ (CondValue ? PPCallbacks::CVK_True : PPCallbacks::CVK_False),
+ CondInfo.IfLoc);
}
// If this condition is true, enter it!
if (CondValue) {
@@ -605,9 +614,16 @@ Preprocessor::getModuleHeaderToIncludeForDiagnostics(SourceLocation IncLoc,
SourceLocation Loc) {
assert(M && "no module to include");
+ // If the context is the global module fragment of some module, we never
+ // want to return that file; instead, we want the innermost include-guarded
+ // header that it included.
+ bool InGlobalModuleFragment = M->Kind == Module::GlobalModuleFragment;
+
// If we have a module import syntax, we shouldn't include a header to
// make a particular module visible.
- if (getLangOpts().ObjC)
+ if ((getLangOpts().ObjC || getLangOpts().CPlusPlusModules ||
+ getLangOpts().ModulesTS) &&
+ !InGlobalModuleFragment)
return nullptr;
Module *TopM = M->getTopLevelModule();
@@ -624,6 +640,13 @@ Preprocessor::getModuleHeaderToIncludeForDiagnostics(SourceLocation IncLoc,
if (!FE)
break;
+ if (InGlobalModuleFragment) {
+ if (getHeaderSearchInfo().isFileMultipleIncludeGuarded(FE))
+ return FE;
+ Loc = SM.getIncludeLoc(ID);
+ continue;
+ }
+
bool InTextualHeader = false;
for (auto Header : HeaderInfo.getModuleMap().findAllModulesForHeader(FE)) {
if (!Header.getModule()->isSubModuleOf(TopM))
@@ -660,7 +683,8 @@ const FileEntry *Preprocessor::LookupFile(
const DirectoryLookup *FromDir, const FileEntry *FromFile,
const DirectoryLookup *&CurDir, SmallVectorImpl<char> *SearchPath,
SmallVectorImpl<char> *RelativePath,
- ModuleMap::KnownHeader *SuggestedModule, bool *IsMapped, bool SkipCache) {
+ ModuleMap::KnownHeader *SuggestedModule, bool *IsMapped,
+ bool *IsFrameworkFound, bool SkipCache) {
Module *RequestingModule = getModuleForLocation(FilenameLoc);
bool RequestingModuleIsModuleInterface = !SourceMgr.isInMainFile(FilenameLoc);
@@ -718,7 +742,8 @@ const FileEntry *Preprocessor::LookupFile(
while (const FileEntry *FE = HeaderInfo.LookupFile(
Filename, FilenameLoc, isAngled, TmpFromDir, TmpCurDir,
Includers, SearchPath, RelativePath, RequestingModule,
- SuggestedModule, /*IsMapped=*/nullptr, SkipCache)) {
+ SuggestedModule, /*IsMapped=*/nullptr,
+ /*IsFrameworkFound=*/nullptr, SkipCache)) {
// Keep looking as if this file did a #include_next.
TmpFromDir = TmpCurDir;
++TmpFromDir;
@@ -734,8 +759,8 @@ const FileEntry *Preprocessor::LookupFile(
// Do a standard file entry lookup.
const FileEntry *FE = HeaderInfo.LookupFile(
Filename, FilenameLoc, isAngled, FromDir, CurDir, Includers, SearchPath,
- RelativePath, RequestingModule, SuggestedModule, IsMapped, SkipCache,
- BuildSystemModule);
+ RelativePath, RequestingModule, SuggestedModule, IsMapped,
+ IsFrameworkFound, SkipCache, BuildSystemModule);
if (FE) {
if (SuggestedModule && !LangOpts.AsmPreprocessor)
HeaderInfo.getModuleMap().diagnoseHeaderInclusion(
@@ -822,10 +847,10 @@ void Preprocessor::HandleSkippedDirectiveWhileUsingPCH(Token &Result,
return HandleIncludeDirective(HashLoc, Result);
}
if (SkippingUntilPragmaHdrStop && II->getPPKeywordID() == tok::pp_pragma) {
- Token P = LookAhead(0);
- auto *II = P.getIdentifierInfo();
+ Lex(Result);
+ auto *II = Result.getIdentifierInfo();
if (II && II->getName() == "hdrstop")
- return HandlePragmaDirective(HashLoc, PIK_HashPragma);
+ return HandlePragmaHdrstop(Result);
}
}
DiscardUntilEndOfDirective();
@@ -879,6 +904,8 @@ void Preprocessor::HandleDirective(Token &Result) {
case tok::pp___include_macros:
case tok::pp_pragma:
Diag(Result, diag::err_embedded_directive) << II->getName();
+ Diag(*ArgMacro, diag::note_macro_expansion_here)
+ << ArgMacro->getIdentifierInfo();
DiscardUntilEndOfDirective();
return;
default:
@@ -955,7 +982,7 @@ void Preprocessor::HandleDirective(Token &Result) {
// C99 6.10.6 - Pragma Directive.
case tok::pp_pragma:
- return HandlePragmaDirective(SavedHash.getLocation(), PIK_HashPragma);
+ return HandlePragmaDirective({PIK_HashPragma, SavedHash.getLocation()});
// GNU Extensions.
case tok::pp_import:
@@ -1008,7 +1035,7 @@ void Preprocessor::HandleDirective(Token &Result) {
// Enter this token stream so that we re-lex the tokens. Make sure to
// enable macro expansion, in case the token after the # is an identifier
// that is expanded.
- EnterTokenStream(std::move(Toks), 2, false);
+ EnterTokenStream(std::move(Toks), 2, false, /*IsReinject*/false);
return;
}
@@ -1116,19 +1143,24 @@ void Preprocessor::HandleLineDirective() {
; // ok
else if (StrTok.isNot(tok::string_literal)) {
Diag(StrTok, diag::err_pp_line_invalid_filename);
- return DiscardUntilEndOfDirective();
+ DiscardUntilEndOfDirective();
+ return;
} else if (StrTok.hasUDSuffix()) {
Diag(StrTok, diag::err_invalid_string_udl);
- return DiscardUntilEndOfDirective();
+ DiscardUntilEndOfDirective();
+ return;
} else {
// Parse and validate the string, converting it into a unique ID.
StringLiteralParser Literal(StrTok, *this);
assert(Literal.isAscii() && "Didn't allow wide strings in");
- if (Literal.hadError)
- return DiscardUntilEndOfDirective();
+ if (Literal.hadError) {
+ DiscardUntilEndOfDirective();
+ return;
+ }
if (Literal.Pascal) {
Diag(StrTok, diag::err_pp_linemarker_invalid_filename);
- return DiscardUntilEndOfDirective();
+ DiscardUntilEndOfDirective();
+ return;
}
FilenameID = SourceMgr.getLineTableFilenameID(Literal.GetString());
@@ -1261,19 +1293,24 @@ void Preprocessor::HandleDigitDirective(Token &DigitTok) {
FileKind = SourceMgr.getFileCharacteristic(DigitTok.getLocation());
} else if (StrTok.isNot(tok::string_literal)) {
Diag(StrTok, diag::err_pp_linemarker_invalid_filename);
- return DiscardUntilEndOfDirective();
+ DiscardUntilEndOfDirective();
+ return;
} else if (StrTok.hasUDSuffix()) {
Diag(StrTok, diag::err_invalid_string_udl);
- return DiscardUntilEndOfDirective();
+ DiscardUntilEndOfDirective();
+ return;
} else {
// Parse and validate the string, converting it into a unique ID.
StringLiteralParser Literal(StrTok, *this);
assert(Literal.isAscii() && "Didn't allow wide strings in");
- if (Literal.hadError)
- return DiscardUntilEndOfDirective();
+ if (Literal.hadError) {
+ DiscardUntilEndOfDirective();
+ return;
+ }
if (Literal.Pascal) {
Diag(StrTok, diag::err_pp_linemarker_invalid_filename);
- return DiscardUntilEndOfDirective();
+ DiscardUntilEndOfDirective();
+ return;
}
FilenameID = SourceMgr.getLineTableFilenameID(Literal.GetString());
@@ -1343,7 +1380,8 @@ void Preprocessor::HandleIdentSCCSDirective(Token &Tok) {
if (StrTok.hasUDSuffix()) {
Diag(StrTok, diag::err_invalid_string_udl);
- return DiscardUntilEndOfDirective();
+ DiscardUntilEndOfDirective();
+ return;
}
// Verify that there is nothing after the string, other than EOD.
@@ -1381,7 +1419,7 @@ void Preprocessor::HandleMacroPublicDirective(Token &Tok) {
// Note that this macro has now been exported.
appendMacroDirective(II, AllocateVisibilityMacroDirective(
- MacroNameTok.getLocation(), /*IsPublic=*/true));
+ MacroNameTok.getLocation(), /*isPublic=*/true));
}
/// Handle a #private directive.
@@ -1408,7 +1446,7 @@ void Preprocessor::HandleMacroPrivateDirective() {
// Note that this macro has now been marked private.
appendMacroDirective(II, AllocateVisibilityMacroDirective(
- MacroNameTok.getLocation(), /*IsPublic=*/false));
+ MacroNameTok.getLocation(), /*isPublic=*/false));
}
//===----------------------------------------------------------------------===//
@@ -1426,6 +1464,14 @@ bool Preprocessor::GetIncludeFilenameSpelling(SourceLocation Loc,
// Get the text form of the filename.
assert(!Buffer.empty() && "Can't have tokens with empty spellings!");
+ // FIXME: Consider warning on some of the cases described in C11 6.4.7/3 and
+ // C++20 [lex.header]/2:
+ //
+ // If `"`, `'`, `\`, `/*`, or `//` appears in a header-name, then
+ // in C: behavior is undefined
+ // in C++: program is conditionally-supported with implementation-defined
+ // semantics
+
// Make sure the filename is <x> or "x".
bool isAngled;
if (Buffer[0] == '<') {
@@ -1460,67 +1506,6 @@ bool Preprocessor::GetIncludeFilenameSpelling(SourceLocation Loc,
return isAngled;
}
-// Handle cases where the \#include name is expanded from a macro
-// as multiple tokens, which need to be glued together.
-//
-// This occurs for code like:
-// \code
-// \#define FOO <a/b.h>
-// \#include FOO
-// \endcode
-// because in this case, "<a/b.h>" is returned as 7 tokens, not one.
-//
-// This code concatenates and consumes tokens up to the '>' token. It returns
-// false if the > was found, otherwise it returns true if it finds and consumes
-// the EOD marker.
-bool Preprocessor::ConcatenateIncludeName(SmallString<128> &FilenameBuffer,
- SourceLocation &End) {
- Token CurTok;
-
- Lex(CurTok);
- while (CurTok.isNot(tok::eod)) {
- End = CurTok.getLocation();
-
- // FIXME: Provide code completion for #includes.
- if (CurTok.is(tok::code_completion)) {
- setCodeCompletionReached();
- Lex(CurTok);
- continue;
- }
-
- // Append the spelling of this token to the buffer. If there was a space
- // before it, add it now.
- if (CurTok.hasLeadingSpace())
- FilenameBuffer.push_back(' ');
-
- // Get the spelling of the token, directly into FilenameBuffer if possible.
- size_t PreAppendSize = FilenameBuffer.size();
- FilenameBuffer.resize(PreAppendSize+CurTok.getLength());
-
- const char *BufPtr = &FilenameBuffer[PreAppendSize];
- unsigned ActualLen = getSpelling(CurTok, BufPtr);
-
- // If the token was spelled somewhere else, copy it into FilenameBuffer.
- if (BufPtr != &FilenameBuffer[PreAppendSize])
- memcpy(&FilenameBuffer[PreAppendSize], BufPtr, ActualLen);
-
- // Resize FilenameBuffer to the correct size.
- if (CurTok.getLength() != ActualLen)
- FilenameBuffer.resize(PreAppendSize+ActualLen);
-
- // If we found the '>' marker, return success.
- if (CurTok.is(tok::greater))
- return false;
-
- Lex(CurTok);
- }
-
- // If we hit the eod marker, emit an error and return true so that the caller
- // knows the EOD has been read.
- Diag(CurTok.getLocation(), diag::err_pp_expects_filename);
- return true;
-}
-
/// Push a token onto the token stream containing an annotation.
void Preprocessor::EnterAnnotationToken(SourceRange Range,
tok::TokenKind Kind,
@@ -1533,7 +1518,7 @@ void Preprocessor::EnterAnnotationToken(SourceRange Range,
Tok[0].setLocation(Range.getBegin());
Tok[0].setAnnotationEndLoc(Range.getEnd());
Tok[0].setAnnotationValue(AnnotationVal);
- EnterTokenStream(std::move(Tok), 1, true);
+ EnterTokenStream(std::move(Tok), 1, true, /*IsReinject*/ false);
}
/// Produce a diagnostic informing the user that a #include or similar
@@ -1542,7 +1527,13 @@ static void diagnoseAutoModuleImport(
Preprocessor &PP, SourceLocation HashLoc, Token &IncludeTok,
ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> Path,
SourceLocation PathEnd) {
- assert(PP.getLangOpts().ObjC && "no import syntax available");
+ StringRef ImportKeyword;
+ if (PP.getLangOpts().ObjC)
+ ImportKeyword = "@import";
+ else if (PP.getLangOpts().ModulesTS || PP.getLangOpts().CPlusPlusModules)
+ ImportKeyword = "import";
+ else
+ return; // no import syntax available
SmallString<128> PathString;
for (size_t I = 0, N = Path.size(); I != N; ++I) {
@@ -1577,8 +1568,8 @@ static void diagnoseAutoModuleImport(
/*IsTokenRange=*/false);
PP.Diag(HashLoc, diag::warn_auto_module_import)
<< IncludeKind << PathString
- << FixItHint::CreateReplacement(ReplaceRange,
- ("@import " + PathString + ";").str());
+ << FixItHint::CreateReplacement(
+ ReplaceRange, (ImportKeyword + " " + PathString + ";").str());
}
// Given a vector of path components and a string containing the real
@@ -1648,72 +1639,79 @@ bool Preprocessor::checkModuleIsAvailable(const LangOptions &LangOpts,
void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
Token &IncludeTok,
const DirectoryLookup *LookupFrom,
- const FileEntry *LookupFromFile,
- bool isImport) {
+ const FileEntry *LookupFromFile) {
Token FilenameTok;
- CurPPLexer->LexIncludeFilename(FilenameTok);
-
- // Reserve a buffer to get the spelling.
- SmallString<128> FilenameBuffer;
- StringRef Filename;
- SourceLocation End;
- SourceLocation CharEnd; // the end of this directive, in characters
+ if (LexHeaderName(FilenameTok))
+ return;
- switch (FilenameTok.getKind()) {
- case tok::eod:
- // If the token kind is EOD, the error has already been diagnosed.
+ if (FilenameTok.isNot(tok::header_name)) {
+ Diag(FilenameTok.getLocation(), diag::err_pp_expects_filename);
+ if (FilenameTok.isNot(tok::eod))
+ DiscardUntilEndOfDirective();
return;
+ }
- case tok::angle_string_literal:
- case tok::string_literal:
- Filename = getSpelling(FilenameTok, FilenameBuffer);
- End = FilenameTok.getLocation();
- CharEnd = End.getLocWithOffset(FilenameTok.getLength());
- break;
+ // Verify that there is nothing after the filename, other than EOD. Note
+ // that we allow macros that expand to nothing after the filename, because
+ // this falls into the category of "#include pp-tokens new-line" specified
+ // in C99 6.10.2p4.
+ SourceLocation EndLoc =
+ CheckEndOfDirective(IncludeTok.getIdentifierInfo()->getNameStart(), true);
- case tok::less:
- // This could be a <foo/bar.h> file coming from a macro expansion. In this
- // case, glue the tokens together into FilenameBuffer and interpret those.
- FilenameBuffer.push_back('<');
- if (ConcatenateIncludeName(FilenameBuffer, End))
- return; // Found <eod> but no ">"? Diagnostic already emitted.
- Filename = FilenameBuffer;
- CharEnd = End.getLocWithOffset(1);
+ auto Action = HandleHeaderIncludeOrImport(HashLoc, IncludeTok, FilenameTok,
+ EndLoc, LookupFrom, LookupFromFile);
+ switch (Action.Kind) {
+ case ImportAction::None:
+ case ImportAction::SkippedModuleImport:
+ break;
+ case ImportAction::ModuleBegin:
+ EnterAnnotationToken(SourceRange(HashLoc, EndLoc),
+ tok::annot_module_begin, Action.ModuleForHeader);
+ break;
+ case ImportAction::ModuleImport:
+ EnterAnnotationToken(SourceRange(HashLoc, EndLoc),
+ tok::annot_module_include, Action.ModuleForHeader);
break;
- default:
- Diag(FilenameTok.getLocation(), diag::err_pp_expects_filename);
- DiscardUntilEndOfDirective();
- return;
}
+}
+
+/// Handle either a #include-like directive or an import declaration that names
+/// a header file.
+///
+/// \param HashLoc The location of the '#' token for an include, or
+/// SourceLocation() for an import declaration.
+/// \param IncludeTok The include / include_next / import token.
+/// \param FilenameTok The header-name token.
+/// \param EndLoc The location at which any imported macros become visible.
+/// \param LookupFrom For #include_next, the starting directory for the
+/// directory lookup.
+/// \param LookupFromFile For #include_next, the starting file for the directory
+/// lookup.
+Preprocessor::ImportAction Preprocessor::HandleHeaderIncludeOrImport(
+ SourceLocation HashLoc, Token &IncludeTok, Token &FilenameTok,
+ SourceLocation EndLoc, const DirectoryLookup *LookupFrom,
+ const FileEntry *LookupFromFile) {
+ SmallString<128> FilenameBuffer;
+ StringRef Filename = getSpelling(FilenameTok, FilenameBuffer);
+ SourceLocation CharEnd = FilenameTok.getEndLoc();
CharSourceRange FilenameRange
= CharSourceRange::getCharRange(FilenameTok.getLocation(), CharEnd);
StringRef OriginalFilename = Filename;
bool isAngled =
GetIncludeFilenameSpelling(FilenameTok.getLocation(), Filename);
+
// If GetIncludeFilenameSpelling set the start ptr to null, there was an
// error.
- if (Filename.empty()) {
- DiscardUntilEndOfDirective();
- return;
- }
+ if (Filename.empty())
+ return {ImportAction::None};
- // Verify that there is nothing after the filename, other than EOD. Note that
- // we allow macros that expand to nothing after the filename, because this
- // falls into the category of "#include pp-tokens new-line" specified in
- // C99 6.10.2p4.
- CheckEndOfDirective(IncludeTok.getIdentifierInfo()->getNameStart(), true);
-
- // Check that we don't have infinite #include recursion.
- if (IncludeMacroStack.size() == MaxAllowedIncludeStackDepth-1) {
- Diag(FilenameTok, diag::err_pp_include_too_deep);
- HasReachedMaxIncludeDepth = true;
- return;
- }
+ bool IsImportDecl = HashLoc.isInvalid();
+ SourceLocation StartLoc = IsImportDecl ? IncludeTok.getLocation() : HashLoc;
// Complain about attempts to #include files in an audit pragma.
if (PragmaARCCFCodeAuditedLoc.isValid()) {
- Diag(HashLoc, diag::err_pp_include_in_arc_cf_code_audited);
+ Diag(StartLoc, diag::err_pp_include_in_arc_cf_code_audited) << IsImportDecl;
Diag(PragmaARCCFCodeAuditedLoc, diag::note_pragma_entered_here);
// Immediately leave the pragma.
@@ -1722,7 +1720,7 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
// Complain about attempts to #include files in an assume-nonnull pragma.
if (PragmaAssumeNonNullLoc.isValid()) {
- Diag(HashLoc, diag::err_pp_include_in_assume_nonnull);
+ Diag(StartLoc, diag::err_pp_include_in_assume_nonnull) << IsImportDecl;
Diag(PragmaAssumeNonNullLoc, diag::note_pragma_entered_here);
// Immediately leave the pragma.
@@ -1740,6 +1738,7 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
// Search include directories.
bool IsMapped = false;
+ bool IsFrameworkFound = false;
const DirectoryLookup *CurDir;
SmallString<1024> SearchPath;
SmallString<1024> RelativePath;
@@ -1758,7 +1757,7 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
FilenameLoc, LangOpts.MSVCCompat ? NormalizedPath.c_str() : Filename,
isAngled, LookupFrom, LookupFromFile, CurDir,
Callbacks ? &SearchPath : nullptr, Callbacks ? &RelativePath : nullptr,
- &SuggestedModule, &IsMapped);
+ &SuggestedModule, &IsMapped, &IsFrameworkFound);
if (!File) {
if (Callbacks) {
@@ -1775,7 +1774,8 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
FilenameLoc,
LangOpts.MSVCCompat ? NormalizedPath.c_str() : Filename, isAngled,
LookupFrom, LookupFromFile, CurDir, nullptr, nullptr,
- &SuggestedModule, &IsMapped, /*SkipCache*/ true);
+ &SuggestedModule, &IsMapped, /*IsFrameworkFound=*/nullptr,
+ /*SkipCache*/ true);
}
}
}
@@ -1790,12 +1790,14 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
LangOpts.MSVCCompat ? NormalizedPath.c_str() : Filename, false,
LookupFrom, LookupFromFile, CurDir,
Callbacks ? &SearchPath : nullptr,
- Callbacks ? &RelativePath : nullptr, &SuggestedModule, &IsMapped);
+ Callbacks ? &RelativePath : nullptr, &SuggestedModule, &IsMapped,
+ /*IsFrameworkFound=*/nullptr);
if (File) {
- SourceRange Range(FilenameTok.getLocation(), CharEnd);
- Diag(FilenameTok, diag::err_pp_file_not_found_angled_include_not_fatal) <<
- Filename <<
- FixItHint::CreateReplacement(Range, "\"" + Filename.str() + "\"");
+ Diag(FilenameTok,
+ diag::err_pp_file_not_found_angled_include_not_fatal)
+ << Filename << IsImportDecl
+ << FixItHint::CreateReplacement(FilenameRange,
+ "\"" + Filename.str() + "\"");
}
}
@@ -1826,14 +1828,15 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
: TypoCorrectionName,
isAngled, LookupFrom, LookupFromFile, CurDir,
Callbacks ? &SearchPath : nullptr,
- Callbacks ? &RelativePath : nullptr, &SuggestedModule, &IsMapped);
+ Callbacks ? &RelativePath : nullptr, &SuggestedModule, &IsMapped,
+ /*IsFrameworkFound=*/nullptr);
if (File) {
- SourceRange Range(FilenameTok.getLocation(), CharEnd);
- auto Hint = isAngled
- ? FixItHint::CreateReplacement(
- Range, "<" + TypoCorrectionName.str() + ">")
- : FixItHint::CreateReplacement(
- Range, "\"" + TypoCorrectionName.str() + "\"");
+ auto Hint =
+ isAngled
+ ? FixItHint::CreateReplacement(
+ FilenameRange, "<" + TypoCorrectionName.str() + ">")
+ : FixItHint::CreateReplacement(
+ FilenameRange, "\"" + TypoCorrectionName.str() + "\"");
Diag(FilenameTok, diag::err_pp_file_not_found_typo_not_fatal)
<< OriginalFilename << TypoCorrectionName << Hint;
// We found the file, so set the Filename to the name after typo
@@ -1843,38 +1846,63 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
}
// If the file is still not found, just go with the vanilla diagnostic
- if (!File)
+ if (!File) {
Diag(FilenameTok, diag::err_pp_file_not_found) << OriginalFilename
<< FilenameRange;
+ if (IsFrameworkFound) {
+ size_t SlashPos = OriginalFilename.find('/');
+ assert(SlashPos != StringRef::npos &&
+ "Include with framework name should have '/' in the filename");
+ StringRef FrameworkName = OriginalFilename.substr(0, SlashPos);
+ FrameworkCacheEntry &CacheEntry =
+ HeaderInfo.LookupFrameworkCache(FrameworkName);
+ assert(CacheEntry.Directory && "Found framework should be in cache");
+ Diag(FilenameTok, diag::note_pp_framework_without_header)
+ << OriginalFilename.substr(SlashPos + 1) << FrameworkName
+ << CacheEntry.Directory->getName();
+ }
+ }
}
}
if (usingPCHWithThroughHeader() && SkippingUntilPCHThroughHeader) {
if (isPCHThroughHeader(File))
SkippingUntilPCHThroughHeader = false;
- return;
+ return {ImportAction::None};
+ }
+
+ // Check for circular inclusion of the main file.
+ // We can't generate a consistent preamble with regard to the conditional
+ // stack if the main file is included again as due to the preamble bounds
+ // some directives (e.g. #endif of a header guard) will never be seen.
+ // Since this will lead to confusing errors, avoid the inclusion.
+ if (File && PreambleConditionalStack.isRecording() &&
+ SourceMgr.translateFile(File) == SourceMgr.getMainFileID()) {
+ Diag(FilenameTok.getLocation(),
+ diag::err_pp_including_mainfile_in_preamble);
+ return {ImportAction::None};
}
- // Should we enter the source file? Set to false if either the source file is
+ // Should we enter the source file? Set to Skip if either the source file is
// known to have no effect beyond its effect on module visibility -- that is,
- // if it's got an include guard that is already defined or is a modular header
- // we've imported or already built.
- bool ShouldEnter = true;
+ // if it's got an include guard that is already defined, set to Import if it
+ // is a modular header we've already built and should import.
+ enum { Enter, Import, Skip, IncludeLimitReached } Action = Enter;
if (PPOpts->SingleFileParseMode)
- ShouldEnter = false;
+ Action = IncludeLimitReached;
// If we've reached the max allowed include depth, it is usually due to an
// include cycle. Don't enter already processed files again as it can lead to
// reaching the max allowed include depth again.
- if (ShouldEnter && HasReachedMaxIncludeDepth && File &&
+ if (Action == Enter && HasReachedMaxIncludeDepth && File &&
HeaderInfo.getFileInfo(File).NumIncludes)
- ShouldEnter = false;
+ Action = IncludeLimitReached;
// Determine whether we should try to import the module for this #include, if
// there is one. Don't do so if precompiled module support is disabled or we
// are processing this module textually (because we're building the module).
- if (ShouldEnter && File && SuggestedModule && getLangOpts().Modules &&
+ if (Action == Enter && File && SuggestedModule && getLangOpts().Modules &&
!isForModuleBuilding(SuggestedModule.getModule(),
getLangOpts().CurrentModule,
getLangOpts().ModuleName)) {
@@ -1887,7 +1915,7 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
Diag(FilenameTok.getLocation(),
diag::note_implicit_top_level_module_import_here)
<< SuggestedModule.getModule()->getTopLevelModuleName();
- return;
+ return {ImportAction::None};
}
// Compute the module access path corresponding to this module.
@@ -1900,9 +1928,8 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
std::reverse(Path.begin(), Path.end());
// Warn that we're replacing the include/import with a module import.
- // We only do this in Objective-C, where we have a module-import syntax.
- if (getLangOpts().ObjC)
- diagnoseAutoModuleImport(*this, HashLoc, IncludeTok, Path, CharEnd);
+ if (!IsImportDecl)
+ diagnoseAutoModuleImport(*this, StartLoc, IncludeTok, Path, CharEnd);
// Load the module to import its macros. We'll make the declarations
// visible when the parser gets here.
@@ -1910,13 +1937,13 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
// and making the module loader convert it back again.
ModuleLoadResult Imported = TheModuleLoader.loadModule(
IncludeTok.getLocation(), Path, Module::Hidden,
- /*IsIncludeDirective=*/true);
+ /*IsInclusionDirective=*/true);
assert((Imported == nullptr || Imported == SuggestedModule.getModule()) &&
"the imported module is different than the suggested one");
- if (Imported)
- ShouldEnter = false;
- else if (Imported.isMissingExpected()) {
+ if (Imported) {
+ Action = Import;
+ } else if (Imported.isMissingExpected()) {
// We failed to find a submodule that we assumed would exist (because it
// was in the directory of an umbrella header, for instance), but no
// actual module containing it exists (because the umbrella header is
@@ -1935,7 +1962,7 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
CurLexer->FormTokenWithChars(Result, CurLexer->BufferEnd, tok::eof);
CurLexer->cutOffLexing();
}
- return;
+ return {ImportAction::None};
}
}
@@ -1947,33 +1974,54 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
if (File)
FileCharacter = std::max(HeaderInfo.getFileDirFlavor(File), FileCharacter);
+ // If this is a '#import' or an import-declaration, don't re-enter the file.
+ //
+ // FIXME: If we have a suggested module for a '#include', and we've already
+ // visited this file, don't bother entering it again. We know it has no
+ // further effect.
+ bool EnterOnce =
+ IsImportDecl ||
+ IncludeTok.getIdentifierInfo()->getPPKeywordID() == tok::pp_import;
+
// Ask HeaderInfo if we should enter this #include file. If not, #including
// this file will have no effect.
- bool SkipHeader = false;
- if (ShouldEnter && File &&
- !HeaderInfo.ShouldEnterIncludeFile(*this, File, isImport,
+ if (Action == Enter && File &&
+ !HeaderInfo.ShouldEnterIncludeFile(*this, File, EnterOnce,
getLangOpts().Modules,
SuggestedModule.getModule())) {
- ShouldEnter = false;
- SkipHeader = true;
+ // Even if we've already preprocessed this header once and know that we
+ // don't need to see its contents again, we still need to import it if it's
+ // modular because we might not have imported it from this submodule before.
+ //
+ // FIXME: We don't do this when compiling a PCH because the AST
+ // serialization layer can't cope with it. This means we get local
+ // submodule visibility semantics wrong in that case.
+ Action = (SuggestedModule && !getLangOpts().CompilingPCH) ? Import : Skip;
}
- if (Callbacks) {
+ if (Callbacks && !IsImportDecl) {
// Notify the callback object that we've seen an inclusion directive.
+ // FIXME: Use a different callback for a pp-import?
Callbacks->InclusionDirective(
HashLoc, IncludeTok,
LangOpts.MSVCCompat ? NormalizedPath.c_str() : Filename, isAngled,
FilenameRange, File, SearchPath, RelativePath,
- ShouldEnter ? nullptr : SuggestedModule.getModule(), FileCharacter);
- if (SkipHeader && !SuggestedModule.getModule())
+ Action == Import ? SuggestedModule.getModule() : nullptr,
+ FileCharacter);
+ if (Action == Skip)
Callbacks->FileSkipped(*File, FilenameTok, FileCharacter);
}
if (!File)
- return;
+ return {ImportAction::None};
- // FIXME: If we have a suggested module, and we've already visited this file,
- // don't bother entering it again. We know it has no further effect.
+ // If this is a C++20 pp-import declaration, diagnose if we didn't find any
+ // module corresponding to the named header.
+ if (IsImportDecl && !SuggestedModule) {
+ Diag(FilenameTok, diag::err_header_import_not_header_unit)
+ << OriginalFilename << File->getName();
+ return {ImportAction::None};
+ }
// Issue a diagnostic if the name of the file on disk has a different case
// than the one we're about to open.
@@ -2005,37 +2053,50 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
// For other system headers, we don't. They can be controlled separately.
auto DiagId = (FileCharacter == SrcMgr::C_User || warnByDefaultOnWrongCase(Name)) ?
diag::pp_nonportable_path : diag::pp_nonportable_system_path;
- SourceRange Range(FilenameTok.getLocation(), CharEnd);
Diag(FilenameTok, DiagId) << Path <<
- FixItHint::CreateReplacement(Range, Path);
+ FixItHint::CreateReplacement(FilenameRange, Path);
}
}
- // If we don't need to enter the file, stop now.
- if (!ShouldEnter) {
+ switch (Action) {
+ case Skip:
+ // If we don't need to enter the file, stop now.
+ if (Module *M = SuggestedModule.getModule())
+ return {ImportAction::SkippedModuleImport, M};
+ return {ImportAction::None};
+
+ case IncludeLimitReached:
+ // If we reached our include limit and don't want to enter any more files,
+ // don't go any further.
+ return {ImportAction::None};
+
+ case Import: {
// If this is a module import, make it visible if needed.
- if (auto *M = SuggestedModule.getModule()) {
- // When building a pch, -fmodule-name tells the compiler to textually
- // include headers in the specified module. But it is possible that
- // ShouldEnter is false because we are skipping the header. In that
- // case, We are not importing the specified module.
- if (SkipHeader && getLangOpts().CompilingPCH &&
- isForModuleBuilding(M, getLangOpts().CurrentModule,
- getLangOpts().ModuleName))
- return;
+ Module *M = SuggestedModule.getModule();
+ assert(M && "no module to import");
- makeModuleVisible(M, HashLoc);
+ makeModuleVisible(M, EndLoc);
- if (IncludeTok.getIdentifierInfo()->getPPKeywordID() !=
- tok::pp___include_macros)
- EnterAnnotationToken(SourceRange(HashLoc, End),
- tok::annot_module_include, M);
- }
- return;
+ if (IncludeTok.getIdentifierInfo()->getPPKeywordID() ==
+ tok::pp___include_macros)
+ return {ImportAction::None};
+
+ return {ImportAction::ModuleImport, M};
+ }
+
+ case Enter:
+ break;
+ }
+
+ // Check that we don't have infinite #include recursion.
+ if (IncludeMacroStack.size() == MaxAllowedIncludeStackDepth-1) {
+ Diag(FilenameTok, diag::err_pp_include_too_deep);
+ HasReachedMaxIncludeDepth = true;
+ return {ImportAction::None};
}
// Look up the file, create a File ID for it.
- SourceLocation IncludePos = End;
+ SourceLocation IncludePos = FilenameTok.getLocation();
// If the filename string was the result of macro expansions, set the include
// position on the file where it will be included and after the expansions.
if (IncludePos.isMacroID())
@@ -2045,7 +2106,7 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
// If all is good, enter the new file!
if (EnterSourceFile(FID, CurDir, FilenameTok.getLocation()))
- return;
+ return {ImportAction::None};
// Determine if we're switching to building a new submodule, and which one.
if (auto *M = SuggestedModule.getModule()) {
@@ -2056,29 +2117,37 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
<< M->getFullModuleName();
Diag(M->getTopLevelModule()->ShadowingModule->DefinitionLoc,
diag::note_previous_definition);
- return;
+ return {ImportAction::None};
}
// When building a pch, -fmodule-name tells the compiler to textually
// include headers in the specified module. We are not building the
// specified module.
+ //
+ // FIXME: This is the wrong way to handle this. We should produce a PCH
+ // that behaves the same as the header would behave in a compilation using
+ // that PCH, which means we should enter the submodule. We need to teach
+ // the AST serialization layer to deal with the resulting AST.
if (getLangOpts().CompilingPCH &&
isForModuleBuilding(M, getLangOpts().CurrentModule,
getLangOpts().ModuleName))
- return;
+ return {ImportAction::None};
assert(!CurLexerSubmodule && "should not have marked this as a module yet");
CurLexerSubmodule = M;
// Let the macro handling code know that any future macros are within
// the new submodule.
- EnterSubmodule(M, HashLoc, /*ForPragma*/false);
+ EnterSubmodule(M, EndLoc, /*ForPragma*/false);
// Let the parser know that any future declarations are within the new
// submodule.
// FIXME: There's no point doing this if we're handling a #__include_macros
// directive.
- EnterAnnotationToken(SourceRange(HashLoc, End), tok::annot_module_begin, M);
+ return {ImportAction::ModuleBegin, M};
}
+
+ assert(!IsImportDecl && "failed to diagnose missing module for import decl");
+ return {ImportAction::None};
}
/// HandleIncludeNextDirective - Implements \#include_next.
@@ -2106,6 +2175,10 @@ void Preprocessor::HandleIncludeNextDirective(SourceLocation HashLoc,
LookupFromFile = CurPPLexer->getFileEntry();
Lookup = nullptr;
} else if (!Lookup) {
+ // The current file was not found by walking the include path. Either it
+ // is the primary file (handled above), or it was found by absolute path,
+ // or it was found relative to such a file.
+ // FIXME: Track enough information so we know which case we're in.
Diag(IncludeNextTok, diag::pp_include_next_absolute_path);
} else {
// Start looking up in the next directory.
@@ -2139,7 +2212,7 @@ void Preprocessor::HandleImportDirective(SourceLocation HashLoc,
return HandleMicrosoftImportDirective(ImportTok);
Diag(ImportTok, diag::ext_pp_import_directive);
}
- return HandleIncludeDirective(HashLoc, ImportTok, nullptr, nullptr, true);
+ return HandleIncludeDirective(HashLoc, ImportTok);
}
/// HandleIncludeMacrosDirective - The -imacros command line option turns into a
@@ -2198,8 +2271,7 @@ bool Preprocessor::ReadMacroParameterList(MacroInfo *MI, Token &Tok) {
// OpenCL v1.2 s6.9.e: variadic macros are not supported.
if (LangOpts.OpenCL) {
- Diag(Tok, diag::err_pp_opencl_variadic_macros);
- return true;
+ Diag(Tok, diag::ext_pp_opencl_variadic_macros);
}
// Lex the token after the identifier.
@@ -2228,8 +2300,7 @@ bool Preprocessor::ReadMacroParameterList(MacroInfo *MI, Token &Tok) {
// If this is already used as a parameter, it is used multiple times (e.g.
// #define X(A,A.
- if (std::find(Parameters.begin(), Parameters.end(), II) !=
- Parameters.end()) { // C99 6.10.3p6
+ if (llvm::find(Parameters, II) != Parameters.end()) { // C99 6.10.3p6
Diag(Tok, diag::err_pp_duplicate_name_in_arg_list) << II;
return true;
}
@@ -2791,10 +2862,8 @@ void Preprocessor::HandleIfDirective(Token &IfToken,
// Parse and evaluate the conditional expression.
IdentifierInfo *IfNDefMacro = nullptr;
- const SourceLocation ConditionalBegin = CurPPLexer->getSourceLocation();
const DirectiveEvalResult DER = EvaluateDirectiveExpression(IfNDefMacro);
const bool ConditionalTrue = DER.Conditional;
- const SourceLocation ConditionalEnd = CurPPLexer->getSourceLocation();
// If this condition is equivalent to #ifndef X, and if this is the first
// directive seen, handle it for the multiple-include optimization.
@@ -2807,9 +2876,9 @@ void Preprocessor::HandleIfDirective(Token &IfToken,
}
if (Callbacks)
- Callbacks->If(IfToken.getLocation(),
- SourceRange(ConditionalBegin, ConditionalEnd),
- (ConditionalTrue ? PPCallbacks::CVK_True : PPCallbacks::CVK_False));
+ Callbacks->If(
+ IfToken.getLocation(), DER.ExprRange,
+ (ConditionalTrue ? PPCallbacks::CVK_True : PPCallbacks::CVK_False));
// Should we include the stuff contained by this directive?
if (PPOpts->SingleFileParseMode && DER.IncludedUndefinedIds) {
@@ -2902,9 +2971,7 @@ void Preprocessor::HandleElifDirective(Token &ElifToken,
// #elif directive in a non-skipping conditional... start skipping.
// We don't care what the condition is, because we will always skip it (since
// the block immediately before it was included).
- const SourceLocation ConditionalBegin = CurPPLexer->getSourceLocation();
- DiscardUntilEndOfDirective();
- const SourceLocation ConditionalEnd = CurPPLexer->getSourceLocation();
+ SourceRange ConditionRange = DiscardUntilEndOfDirective();
PPConditionalInfo CI;
if (CurPPLexer->popConditionalLevel(CI)) {
@@ -2920,8 +2987,7 @@ void Preprocessor::HandleElifDirective(Token &ElifToken,
if (CI.FoundElse) Diag(ElifToken, diag::pp_err_elif_after_else);
if (Callbacks)
- Callbacks->Elif(ElifToken.getLocation(),
- SourceRange(ConditionalBegin, ConditionalEnd),
+ Callbacks->Elif(ElifToken.getLocation(), ConditionRange,
PPCallbacks::CVK_NotEvaluated, CI.IfLoc);
if (PPOpts->SingleFileParseMode && !CI.FoundNonSkip) {
diff --git a/lib/Lex/PPExpressions.cpp b/lib/Lex/PPExpressions.cpp
index ac01efad9bf6..e5ec2b99f507 100644
--- a/lib/Lex/PPExpressions.cpp
+++ b/lib/Lex/PPExpressions.cpp
@@ -1,9 +1,8 @@
//===--- PPExpressions.cpp - Preprocessor Expression Evaluation -----------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -152,8 +151,8 @@ static bool EvaluateDefined(PPValue &Result, Token &PeekTok, DefinedTracker &DT,
return true;
}
// Consume the ).
- Result.setEnd(PeekTok.getLocation());
PP.LexNonComment(PeekTok);
+ Result.setEnd(PeekTok.getLocation());
} else {
// Consume identifier.
Result.setEnd(PeekTok.getLocation());
@@ -842,14 +841,22 @@ Preprocessor::EvaluateDirectiveExpression(IdentifierInfo *&IfNDefMacro) {
PPValue ResVal(BitWidth);
DefinedTracker DT;
+ SourceLocation ExprStartLoc = SourceMgr.getExpansionLoc(Tok.getLocation());
if (EvaluateValue(ResVal, Tok, DT, true, *this)) {
// Parse error, skip the rest of the macro line.
+ SourceRange ConditionRange = ExprStartLoc;
if (Tok.isNot(tok::eod))
- DiscardUntilEndOfDirective();
+ ConditionRange = DiscardUntilEndOfDirective();
// Restore 'DisableMacroExpansion'.
DisableMacroExpansion = DisableMacroExpansionAtStartOfDirective;
- return {false, DT.IncludedUndefinedIds};
+
+ // We cannot trust the source range from the value because there was a
+ // parse error. Track the range manually -- the end of the directive is the
+ // end of the condition range.
+ return {false,
+ DT.IncludedUndefinedIds,
+ {ExprStartLoc, ConditionRange.getEnd()}};
}
// If we are at the end of the expression after just parsing a value, there
@@ -863,7 +870,7 @@ Preprocessor::EvaluateDirectiveExpression(IdentifierInfo *&IfNDefMacro) {
// Restore 'DisableMacroExpansion'.
DisableMacroExpansion = DisableMacroExpansionAtStartOfDirective;
- return {ResVal.Val != 0, DT.IncludedUndefinedIds};
+ return {ResVal.Val != 0, DT.IncludedUndefinedIds, ResVal.getRange()};
}
// Otherwise, we must have a binary operator (e.g. "#if 1 < 2"), so parse the
@@ -876,7 +883,7 @@ Preprocessor::EvaluateDirectiveExpression(IdentifierInfo *&IfNDefMacro) {
// Restore 'DisableMacroExpansion'.
DisableMacroExpansion = DisableMacroExpansionAtStartOfDirective;
- return {false, DT.IncludedUndefinedIds};
+ return {false, DT.IncludedUndefinedIds, ResVal.getRange()};
}
// If we aren't at the tok::eod token, something bad happened, like an extra
@@ -888,5 +895,5 @@ Preprocessor::EvaluateDirectiveExpression(IdentifierInfo *&IfNDefMacro) {
// Restore 'DisableMacroExpansion'.
DisableMacroExpansion = DisableMacroExpansionAtStartOfDirective;
- return {ResVal.Val != 0, DT.IncludedUndefinedIds};
+ return {ResVal.Val != 0, DT.IncludedUndefinedIds, ResVal.getRange()};
}
diff --git a/lib/Lex/PPLexerChange.cpp b/lib/Lex/PPLexerChange.cpp
index e321dd38fed6..7cce5f9c9fe4 100644
--- a/lib/Lex/PPLexerChange.cpp
+++ b/lib/Lex/PPLexerChange.cpp
@@ -1,9 +1,8 @@
//===--- PPLexerChange.cpp - Handle changing lexers in the preprocessor ---===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -155,10 +154,11 @@ void Preprocessor::EnterMacro(Token &Tok, SourceLocation ILEnd,
/// must be freed.
///
void Preprocessor::EnterTokenStream(const Token *Toks, unsigned NumToks,
- bool DisableMacroExpansion,
- bool OwnsTokens) {
+ bool DisableMacroExpansion, bool OwnsTokens,
+ bool IsReinject) {
if (CurLexerKind == CLK_CachingLexer) {
if (CachedLexPos < CachedTokens.size()) {
+ assert(IsReinject && "new tokens in the middle of cached stream");
// We're entering tokens into the middle of our cached token stream. We
// can't represent that, so just insert the tokens into the buffer.
CachedTokens.insert(CachedTokens.begin() + CachedLexPos,
@@ -171,7 +171,8 @@ void Preprocessor::EnterTokenStream(const Token *Toks, unsigned NumToks,
// New tokens are at the end of the cached token sequnece; insert the
// token stream underneath the caching lexer.
ExitCachingLexMode();
- EnterTokenStream(Toks, NumToks, DisableMacroExpansion, OwnsTokens);
+ EnterTokenStream(Toks, NumToks, DisableMacroExpansion, OwnsTokens,
+ IsReinject);
EnterCachingLexMode();
return;
}
@@ -180,10 +181,11 @@ void Preprocessor::EnterTokenStream(const Token *Toks, unsigned NumToks,
std::unique_ptr<TokenLexer> TokLexer;
if (NumCachedTokenLexers == 0) {
TokLexer = llvm::make_unique<TokenLexer>(
- Toks, NumToks, DisableMacroExpansion, OwnsTokens, *this);
+ Toks, NumToks, DisableMacroExpansion, OwnsTokens, IsReinject, *this);
} else {
TokLexer = std::move(TokenLexerCache[--NumCachedTokenLexers]);
- TokLexer->Init(Toks, NumToks, DisableMacroExpansion, OwnsTokens);
+ TokLexer->Init(Toks, NumToks, DisableMacroExpansion, OwnsTokens,
+ IsReinject);
}
// Save our current state.
@@ -271,7 +273,7 @@ void Preprocessor::diagnoseMissingHeaderInUmbrellaDir(const Module &Mod) {
ModuleMap &ModMap = getHeaderSearchInfo().getModuleMap();
const DirectoryEntry *Dir = Mod.getUmbrellaDir().Entry;
- llvm::vfs::FileSystem &FS = *FileMgr.getVirtualFileSystem();
+ llvm::vfs::FileSystem &FS = FileMgr.getVirtualFileSystem();
std::error_code EC;
for (llvm::vfs::recursive_directory_iterator Entry(FS, Dir->getName(), EC),
End;
@@ -645,6 +647,8 @@ void Preprocessor::EnterSubmodule(Module *M, SourceLocation ImportLoc,
BuildingSubmoduleStack.push_back(
BuildingSubmoduleInfo(M, ImportLoc, ForPragma, CurSubmoduleState,
PendingModuleMacroNames.size()));
+ if (Callbacks)
+ Callbacks->EnteredSubmodule(M, ImportLoc, ForPragma);
return;
}
@@ -689,6 +693,9 @@ void Preprocessor::EnterSubmodule(Module *M, SourceLocation ImportLoc,
BuildingSubmoduleInfo(M, ImportLoc, ForPragma, CurSubmoduleState,
PendingModuleMacroNames.size()));
+ if (Callbacks)
+ Callbacks->EnteredSubmodule(M, ImportLoc, ForPragma);
+
// Switch to this submodule as the current submodule.
CurSubmoduleState = &State;
@@ -729,6 +736,10 @@ Module *Preprocessor::LeaveSubmodule(bool ForPragma) {
// are tracking macro visibility, don't build any, and preserve the list
// of pending names for the surrounding submodule.
BuildingSubmoduleStack.pop_back();
+
+ if (Callbacks)
+ Callbacks->LeftSubmodule(LeavingMod, ImportLoc, ForPragma);
+
makeModuleVisible(LeavingMod, ImportLoc);
return LeavingMod;
}
@@ -813,6 +824,9 @@ Module *Preprocessor::LeaveSubmodule(bool ForPragma) {
BuildingSubmoduleStack.pop_back();
+ if (Callbacks)
+ Callbacks->LeftSubmodule(LeavingMod, ImportLoc, ForPragma);
+
// A nested #include makes the included submodule visible.
makeModuleVisible(LeavingMod, ImportLoc);
return LeavingMod;
diff --git a/lib/Lex/PPMacroExpansion.cpp b/lib/Lex/PPMacroExpansion.cpp
index c70ff46ec904..687b9a9d3b7b 100644
--- a/lib/Lex/PPMacroExpansion.cpp
+++ b/lib/Lex/PPMacroExpansion.cpp
@@ -1,9 +1,8 @@
-//===--- MacroExpansion.cpp - Top level Macro Expansion -------------------===//
+//===--- PPMacroExpansion.cpp - Top level Macro Expansion -----------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -44,6 +43,7 @@
#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Format.h"
+#include "llvm/Support/Path.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cassert>
@@ -364,6 +364,7 @@ void Preprocessor::RegisterBuiltinMacros() {
}
// Clang Extensions.
+ Ident__FILE_NAME__ = RegisterBuiltinMacro(*this, "__FILE_NAME__");
Ident__has_feature = RegisterBuiltinMacro(*this, "__has_feature");
Ident__has_extension = RegisterBuiltinMacro(*this, "__has_extension");
Ident__has_builtin = RegisterBuiltinMacro(*this, "__has_builtin");
@@ -493,10 +494,13 @@ bool Preprocessor::HandleMacroExpandedIdentifier(Token &Identifier,
// Preprocessor directives used inside macro arguments are not portable, and
// this enables the warning.
InMacroArgs = true;
+ ArgMacro = &Identifier;
+
Args = ReadMacroCallArgumentList(Identifier, MI, ExpansionEnd);
// Finished parsing args.
InMacroArgs = false;
+ ArgMacro = nullptr;
// If there was an error parsing the arguments, bail out.
if (!Args) return true;
@@ -802,7 +806,7 @@ MacroArgs *Preprocessor::ReadMacroCallArgumentList(Token &MacroName,
// Do not lose the EOF/EOD.
auto Toks = llvm::make_unique<Token[]>(1);
Toks[0] = Tok;
- EnterTokenStream(std::move(Toks), 1, true);
+ EnterTokenStream(std::move(Toks), 1, true, /*IsReinject*/ false);
break;
} else if (Tok.is(tok::r_paren)) {
// If we found the ) token, the macro arg list is done.
@@ -1151,8 +1155,11 @@ static bool EvaluateHasIncludeCommon(Token &Tok,
return false;
}
- // Get '('.
- PP.LexNonComment(Tok);
+ // Get '('. If we don't have a '(', try to form a header-name token.
+ do {
+ if (PP.LexHeaderName(Tok))
+ return false;
+ } while (Tok.getKind() == tok::comment);
// Ensure we have a '('.
if (Tok.isNot(tok::l_paren)) {
@@ -1161,58 +1168,27 @@ static bool EvaluateHasIncludeCommon(Token &Tok,
PP.Diag(LParenLoc, diag::err_pp_expected_after) << II << tok::l_paren;
// If the next token looks like a filename or the start of one,
// assume it is and process it as such.
- if (!Tok.is(tok::angle_string_literal) && !Tok.is(tok::string_literal) &&
- !Tok.is(tok::less))
+ if (Tok.isNot(tok::header_name))
return false;
} else {
// Save '(' location for possible missing ')' message.
LParenLoc = Tok.getLocation();
-
- if (PP.getCurrentLexer()) {
- // Get the file name.
- PP.getCurrentLexer()->LexIncludeFilename(Tok);
- } else {
- // We're in a macro, so we can't use LexIncludeFilename; just
- // grab the next token.
- PP.Lex(Tok);
- }
- }
-
- // Reserve a buffer to get the spelling.
- SmallString<128> FilenameBuffer;
- StringRef Filename;
- SourceLocation EndLoc;
-
- switch (Tok.getKind()) {
- case tok::eod:
- // If the token kind is EOD, the error has already been diagnosed.
- return false;
-
- case tok::angle_string_literal:
- case tok::string_literal: {
- bool Invalid = false;
- Filename = PP.getSpelling(Tok, FilenameBuffer, &Invalid);
- if (Invalid)
+ if (PP.LexHeaderName(Tok))
return false;
- break;
}
- case tok::less:
- // This could be a <foo/bar.h> file coming from a macro expansion. In this
- // case, glue the tokens together into FilenameBuffer and interpret those.
- FilenameBuffer.push_back('<');
- if (PP.ConcatenateIncludeName(FilenameBuffer, EndLoc)) {
- // Let the caller know a <eod> was found by changing the Token kind.
- Tok.setKind(tok::eod);
- return false; // Found <eod> but no ">"? Diagnostic already emitted.
- }
- Filename = FilenameBuffer;
- break;
- default:
+ if (Tok.isNot(tok::header_name)) {
PP.Diag(Tok.getLocation(), diag::err_pp_expects_filename);
return false;
}
+ // Reserve a buffer to get the spelling.
+ SmallString<128> FilenameBuffer;
+ bool Invalid = false;
+ StringRef Filename = PP.getSpelling(Tok, FilenameBuffer, &Invalid);
+ if (Invalid)
+ return false;
+
SourceLocation FilenameLoc = Tok.getLocation();
// Get ')'.
@@ -1236,7 +1212,7 @@ static bool EvaluateHasIncludeCommon(Token &Tok,
const DirectoryLookup *CurDir;
const FileEntry *File =
PP.LookupFile(FilenameLoc, Filename, isAngled, LookupFrom, LookupFromFile,
- CurDir, nullptr, nullptr, nullptr, nullptr);
+ CurDir, nullptr, nullptr, nullptr, nullptr, nullptr);
if (PPCallbacks *Callbacks = PP.getPPCallbacks()) {
SrcMgr::CharacteristicKind FileType = SrcMgr::C_User;
@@ -1354,9 +1330,13 @@ already_lexed:
// The last ')' has been reached; return the value if one found or
// a diagnostic and a dummy value.
- if (Result.hasValue())
+ if (Result.hasValue()) {
OS << Result.getValue();
- else {
+ // For strict conformance to __has_cpp_attribute rules, use 'L'
+ // suffix for dated literals.
+ if (Result.getValue() > 1)
+ OS << 'L';
+ } else {
OS << 0;
if (!SuppressDiagnostic)
PP.Diag(Tok.getLocation(), diag::err_too_few_args_in_macro_invoc);
@@ -1478,6 +1458,8 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
// Set up the return result.
Tok.setIdentifierInfo(nullptr);
Tok.clearFlag(Token::NeedsCleaning);
+ bool IsAtStartOfLine = Tok.isAtStartOfLine();
+ bool HasLeadingSpace = Tok.hasLeadingSpace();
if (II == Ident__LINE__) {
// C99 6.10.8: "__LINE__: The presumed line number (within the current
@@ -1500,7 +1482,8 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
// __LINE__ expands to a simple numeric value.
OS << (PLoc.isValid()? PLoc.getLine() : 1);
Tok.setKind(tok::numeric_constant);
- } else if (II == Ident__FILE__ || II == Ident__BASE_FILE__) {
+ } else if (II == Ident__FILE__ || II == Ident__BASE_FILE__ ||
+ II == Ident__FILE_NAME__) {
// C99 6.10.8: "__FILE__: The presumed name of the current source file (a
// character string literal)". This can be affected by #line.
PresumedLoc PLoc = SourceMgr.getPresumedLoc(Tok.getLocation());
@@ -1521,7 +1504,19 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
// Escape this filename. Turn '\' -> '\\' '"' -> '\"'
SmallString<128> FN;
if (PLoc.isValid()) {
- FN += PLoc.getFilename();
+ // __FILE_NAME__ is a Clang-specific extension that expands to the
+ // the last part of __FILE__.
+ if (II == Ident__FILE_NAME__) {
+ // Try to get the last path component, failing that return the original
+ // presumed location.
+ StringRef PLFileName = llvm::sys::path::filename(PLoc.getFilename());
+ if (PLFileName != "")
+ FN += PLFileName;
+ else
+ FN += PLoc.getFilename();
+ } else {
+ FN += PLoc.getFilename();
+ }
Lexer::Stringify(FN);
OS << '"' << FN << '"';
}
@@ -1631,6 +1626,11 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
.Case("__is_target_vendor", true)
.Case("__is_target_os", true)
.Case("__is_target_environment", true)
+ .Case("__builtin_LINE", true)
+ .Case("__builtin_FILE", true)
+ .Case("__builtin_FUNCTION", true)
+ .Case("__builtin_COLUMN", true)
+ .Case("__builtin_bit_cast", true)
.Default(false);
}
});
@@ -1707,7 +1707,7 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
HasLexedNextToken = Tok.is(tok::string_literal);
if (!FinishLexStringLiteral(Tok, WarningName, "'__has_warning'",
- /*MacroExpansion=*/false))
+ /*AllowMacroExpansion=*/false))
return false;
// FIXME: Should we accept "-R..." flags here, or should that be
@@ -1814,6 +1814,8 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
llvm_unreachable("Unknown identifier!");
}
CreateString(OS.str(), Tok, Tok.getLocation(), Tok.getLocation());
+ Tok.setFlagValue(Token::StartOfLine, IsAtStartOfLine);
+ Tok.setFlagValue(Token::LeadingSpace, HasLeadingSpace);
}
void Preprocessor::markMacroAsUsed(MacroInfo *MI) {
diff --git a/lib/Lex/Pragma.cpp b/lib/Lex/Pragma.cpp
index 575935119f6f..4e4db668551f 100644
--- a/lib/Lex/Pragma.cpp
+++ b/lib/Lex/Pragma.cpp
@@ -1,9 +1,8 @@
//===- Pragma.cpp - Pragma registration and handling ----------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -64,7 +63,7 @@ PragmaHandler::~PragmaHandler() = default;
EmptyPragmaHandler::EmptyPragmaHandler(StringRef Name) : PragmaHandler(Name) {}
void EmptyPragmaHandler::HandlePragma(Preprocessor &PP,
- PragmaIntroducerKind Introducer,
+ PragmaIntroducer Introducer,
Token &FirstToken) {}
//===----------------------------------------------------------------------===//
@@ -99,8 +98,7 @@ void PragmaNamespace::RemovePragmaHandler(PragmaHandler *Handler) {
}
void PragmaNamespace::HandlePragma(Preprocessor &PP,
- PragmaIntroducerKind Introducer,
- Token &Tok) {
+ PragmaIntroducer Introducer, Token &Tok) {
// Read the 'namespace' that the directive is in, e.g. STDC. Do not macro
// expand it, the user can have a STDC #define, that should not affect this.
PP.LexUnexpandedToken(Tok);
@@ -125,10 +123,9 @@ void PragmaNamespace::HandlePragma(Preprocessor &PP,
/// HandlePragmaDirective - The "\#pragma" directive has been parsed. Lex the
/// rest of the pragma, passing it to the registered pragma handlers.
-void Preprocessor::HandlePragmaDirective(SourceLocation IntroducerLoc,
- PragmaIntroducerKind Introducer) {
+void Preprocessor::HandlePragmaDirective(PragmaIntroducer Introducer) {
if (Callbacks)
- Callbacks->PragmaDirective(IntroducerLoc, Introducer);
+ Callbacks->PragmaDirective(Introducer.Loc, Introducer.Kind);
if (!PragmasEnabled)
return;
@@ -145,84 +142,73 @@ void Preprocessor::HandlePragmaDirective(SourceLocation IntroducerLoc,
DiscardUntilEndOfDirective();
}
-namespace {
-
-/// Helper class for \see Preprocessor::Handle_Pragma.
-class LexingFor_PragmaRAII {
- Preprocessor &PP;
- bool InMacroArgPreExpansion;
- bool Failed = false;
- Token &OutTok;
- Token PragmaTok;
-
-public:
- LexingFor_PragmaRAII(Preprocessor &PP, bool InMacroArgPreExpansion,
- Token &Tok)
- : PP(PP), InMacroArgPreExpansion(InMacroArgPreExpansion), OutTok(Tok) {
- if (InMacroArgPreExpansion) {
- PragmaTok = OutTok;
- PP.EnableBacktrackAtThisPos();
- }
- }
-
- ~LexingFor_PragmaRAII() {
- if (InMacroArgPreExpansion) {
- // When committing/backtracking the cached pragma tokens in a macro
- // argument pre-expansion we want to ensure that either the tokens which
- // have been committed will be removed from the cache or that the tokens
- // over which we just backtracked won't remain in the cache after they're
- // consumed and that the caching will stop after consuming them.
- // Otherwise the caching will interfere with the way macro expansion
- // works, because we will continue to cache tokens after consuming the
- // backtracked tokens, which shouldn't happen when we're dealing with
- // macro argument pre-expansion.
- auto CachedTokenRange = PP.LastCachedTokenRange();
- if (Failed) {
- PP.CommitBacktrackedTokens();
- } else {
- PP.Backtrack();
- OutTok = PragmaTok;
- }
- PP.EraseCachedTokens(CachedTokenRange);
- }
- }
-
- void failed() {
- Failed = true;
- }
-};
-
-} // namespace
-
/// Handle_Pragma - Read a _Pragma directive, slice it up, process it, then
/// return the first token after the directive. The _Pragma token has just
/// been read into 'Tok'.
void Preprocessor::Handle_Pragma(Token &Tok) {
- // This works differently if we are pre-expanding a macro argument.
- // In that case we don't actually "activate" the pragma now, we only lex it
- // until we are sure it is lexically correct and then we backtrack so that
- // we activate the pragma whenever we encounter the tokens again in the token
- // stream. This ensures that we will activate it in the correct location
- // or that we will ignore it if it never enters the token stream, e.g:
+ // C11 6.10.3.4/3:
+ // all pragma unary operator expressions within [a completely
+ // macro-replaced preprocessing token sequence] are [...] processed [after
+ // rescanning is complete]
+ //
+ // This means that we execute _Pragma operators in two cases:
+ //
+ // 1) on token sequences that would otherwise be produced as the output of
+ // phase 4 of preprocessing, and
+ // 2) on token sequences formed as the macro-replaced token sequence of a
+ // macro argument
//
- // #define EMPTY(x)
- // #define INACTIVE(x) EMPTY(x)
- // INACTIVE(_Pragma("clang diagnostic ignored \"-Wconversion\""))
+ // Case #2 appears to be a wording bug: only _Pragmas that would survive to
+ // the end of phase 4 should actually be executed. Discussion on the WG14
+ // mailing list suggests that a _Pragma operator is notionally checked early,
+ // but only pragmas that survive to the end of phase 4 should be executed.
+ //
+ // In Case #2, we check the syntax now, but then put the tokens back into the
+ // token stream for later consumption.
+
+ struct TokenCollector {
+ Preprocessor &Self;
+ bool Collect;
+ SmallVector<Token, 3> Tokens;
+ Token &Tok;
+
+ void lex() {
+ if (Collect)
+ Tokens.push_back(Tok);
+ Self.Lex(Tok);
+ }
- LexingFor_PragmaRAII _PragmaLexing(*this, InMacroArgPreExpansion, Tok);
+ void revert() {
+ assert(Collect && "did not collect tokens");
+ assert(!Tokens.empty() && "collected unexpected number of tokens");
+
+ // Push the ( "string" ) tokens into the token stream.
+ auto Toks = llvm::make_unique<Token[]>(Tokens.size());
+ std::copy(Tokens.begin() + 1, Tokens.end(), Toks.get());
+ Toks[Tokens.size() - 1] = Tok;
+ Self.EnterTokenStream(std::move(Toks), Tokens.size(),
+ /*DisableMacroExpansion*/ true,
+ /*IsReinject*/ true);
+
+ // ... and return the _Pragma token unchanged.
+ Tok = *Tokens.begin();
+ }
+ };
+
+ TokenCollector Toks = {*this, InMacroArgPreExpansion, {}, Tok};
// Remember the pragma token location.
SourceLocation PragmaLoc = Tok.getLocation();
// Read the '('.
- Lex(Tok);
+ Toks.lex();
if (Tok.isNot(tok::l_paren)) {
Diag(PragmaLoc, diag::err__Pragma_malformed);
- return _PragmaLexing.failed();
+ return;
}
// Read the '"..."'.
- Lex(Tok);
+ Toks.lex();
if (!tok::isStringLiteral(Tok.getKind())) {
Diag(PragmaLoc, diag::err__Pragma_malformed);
// Skip bad tokens, and the ')', if present.
@@ -234,7 +220,7 @@ void Preprocessor::Handle_Pragma(Token &Tok) {
Lex(Tok);
if (Tok.is(tok::r_paren))
Lex(Tok);
- return _PragmaLexing.failed();
+ return;
}
if (Tok.hasUDSuffix()) {
@@ -243,21 +229,24 @@ void Preprocessor::Handle_Pragma(Token &Tok) {
Lex(Tok);
if (Tok.is(tok::r_paren))
Lex(Tok);
- return _PragmaLexing.failed();
+ return;
}
// Remember the string.
Token StrTok = Tok;
// Read the ')'.
- Lex(Tok);
+ Toks.lex();
if (Tok.isNot(tok::r_paren)) {
Diag(PragmaLoc, diag::err__Pragma_malformed);
- return _PragmaLexing.failed();
+ return;
}
- if (InMacroArgPreExpansion)
+ // If we're expanding a macro argument, put the tokens back.
+ if (InMacroArgPreExpansion) {
+ Toks.revert();
return;
+ }
SourceLocation RParenLoc = Tok.getLocation();
std::string StrVal = getSpelling(StrTok);
@@ -330,7 +319,7 @@ void Preprocessor::Handle_Pragma(Token &Tok) {
EnterSourceFileWithLexer(TL, nullptr);
// With everything set up, lex this as a #pragma directive.
- HandlePragmaDirective(PragmaLoc, PIK__Pragma);
+ HandlePragmaDirective({PIK__Pragma, PragmaLoc});
// Finally, return whatever came after the pragma directive.
return Lex(Tok);
@@ -376,10 +365,11 @@ void Preprocessor::HandleMicrosoft__pragma(Token &Tok) {
std::copy(PragmaToks.begin(), PragmaToks.end(), TokArray);
// Push the tokens onto the stack.
- EnterTokenStream(TokArray, PragmaToks.size(), true, true);
+ EnterTokenStream(TokArray, PragmaToks.size(), true, true,
+ /*IsReinject*/ false);
// With everything set up, lex this as a #pragma directive.
- HandlePragmaDirective(PragmaLoc, PIK___pragma);
+ HandlePragmaDirective({PIK___pragma, PragmaLoc});
// Finally, return whatever came after the pragma directive.
return Lex(Tok);
@@ -483,11 +473,14 @@ void Preprocessor::HandlePragmaSystemHeader(Token &SysHeaderTok) {
/// HandlePragmaDependency - Handle \#pragma GCC dependency "foo" blah.
void Preprocessor::HandlePragmaDependency(Token &DependencyTok) {
Token FilenameTok;
- CurPPLexer->LexIncludeFilename(FilenameTok);
+ if (LexHeaderName(FilenameTok, /*AllowConcatenation*/false))
+ return;
- // If the token kind is EOD, the error has already been diagnosed.
- if (FilenameTok.is(tok::eod))
+ // If the next token wasn't a header-name, diagnose the error.
+ if (FilenameTok.isNot(tok::header_name)) {
+ Diag(FilenameTok.getLocation(), diag::err_pp_expects_filename);
return;
+ }
// Reserve a buffer to get the spelling.
SmallString<128> FilenameBuffer;
@@ -507,7 +500,7 @@ void Preprocessor::HandlePragmaDependency(Token &DependencyTok) {
const DirectoryLookup *CurDir;
const FileEntry *File =
LookupFile(FilenameTok.getLocation(), Filename, isAngled, nullptr,
- nullptr, CurDir, nullptr, nullptr, nullptr, nullptr);
+ nullptr, CurDir, nullptr, nullptr, nullptr, nullptr, nullptr);
if (!File) {
if (!SuppressIncludeNotFoundError)
Diag(FilenameTok, diag::err_pp_file_not_found) << Filename;
@@ -663,24 +656,13 @@ void Preprocessor::HandlePragmaIncludeAlias(Token &Tok) {
// We expect either a quoted string literal, or a bracketed name
Token SourceFilenameTok;
- CurPPLexer->LexIncludeFilename(SourceFilenameTok);
- if (SourceFilenameTok.is(tok::eod)) {
- // The diagnostic has already been handled
+ if (LexHeaderName(SourceFilenameTok))
return;
- }
StringRef SourceFileName;
SmallString<128> FileNameBuffer;
- if (SourceFilenameTok.is(tok::string_literal) ||
- SourceFilenameTok.is(tok::angle_string_literal)) {
+ if (SourceFilenameTok.is(tok::header_name)) {
SourceFileName = getSpelling(SourceFilenameTok, FileNameBuffer);
- } else if (SourceFilenameTok.is(tok::less)) {
- // This could be a path instead of just a name
- FileNameBuffer.push_back('<');
- SourceLocation End;
- if (ConcatenateIncludeName(FileNameBuffer, End))
- return; // Diagnostic already emitted
- SourceFileName = FileNameBuffer;
} else {
Diag(Tok, diag::warn_pragma_include_alias_expected_filename);
return;
@@ -695,23 +677,12 @@ void Preprocessor::HandlePragmaIncludeAlias(Token &Tok) {
}
Token ReplaceFilenameTok;
- CurPPLexer->LexIncludeFilename(ReplaceFilenameTok);
- if (ReplaceFilenameTok.is(tok::eod)) {
- // The diagnostic has already been handled
+ if (LexHeaderName(ReplaceFilenameTok))
return;
- }
StringRef ReplaceFileName;
- if (ReplaceFilenameTok.is(tok::string_literal) ||
- ReplaceFilenameTok.is(tok::angle_string_literal)) {
+ if (ReplaceFilenameTok.is(tok::header_name)) {
ReplaceFileName = getSpelling(ReplaceFilenameTok, FileNameBuffer);
- } else if (ReplaceFilenameTok.is(tok::less)) {
- // This could be a path instead of just a name
- FileNameBuffer.push_back('<');
- SourceLocation End;
- if (ConcatenateIncludeName(FileNameBuffer, End))
- return; // Diagnostic already emitted
- ReplaceFileName = FileNameBuffer;
} else {
Diag(Tok, diag::warn_pragma_include_alias_expected_filename);
return;
@@ -986,7 +957,7 @@ namespace {
struct PragmaOnceHandler : public PragmaHandler {
PragmaOnceHandler() : PragmaHandler("once") {}
- void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
Token &OnceTok) override {
PP.CheckEndOfDirective("pragma once");
PP.HandlePragmaOnce(OnceTok);
@@ -998,7 +969,7 @@ struct PragmaOnceHandler : public PragmaHandler {
struct PragmaMarkHandler : public PragmaHandler {
PragmaMarkHandler() : PragmaHandler("mark") {}
- void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
Token &MarkTok) override {
PP.HandlePragmaMark();
}
@@ -1008,7 +979,7 @@ struct PragmaMarkHandler : public PragmaHandler {
struct PragmaPoisonHandler : public PragmaHandler {
PragmaPoisonHandler() : PragmaHandler("poison") {}
- void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
Token &PoisonTok) override {
PP.HandlePragmaPoison();
}
@@ -1019,7 +990,7 @@ struct PragmaPoisonHandler : public PragmaHandler {
struct PragmaSystemHeaderHandler : public PragmaHandler {
PragmaSystemHeaderHandler() : PragmaHandler("system_header") {}
- void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
Token &SHToken) override {
PP.HandlePragmaSystemHeader(SHToken);
PP.CheckEndOfDirective("pragma");
@@ -1029,7 +1000,7 @@ struct PragmaSystemHeaderHandler : public PragmaHandler {
struct PragmaDependencyHandler : public PragmaHandler {
PragmaDependencyHandler() : PragmaHandler("dependency") {}
- void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
Token &DepToken) override {
PP.HandlePragmaDependency(DepToken);
}
@@ -1038,8 +1009,8 @@ struct PragmaDependencyHandler : public PragmaHandler {
struct PragmaDebugHandler : public PragmaHandler {
PragmaDebugHandler() : PragmaHandler("__debug") {}
- void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
- Token &DepToken) override {
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
+ Token &DebugToken) override {
Token Tok;
PP.LexUnexpandedToken(Tok);
if (Tok.isNot(tok::identifier)) {
@@ -1057,7 +1028,7 @@ struct PragmaDebugHandler : public PragmaHandler {
Crasher.startToken();
Crasher.setKind(tok::annot_pragma_parser_crash);
Crasher.setAnnotationRange(SourceRange(Tok.getLocation()));
- PP.EnterToken(Crasher);
+ PP.EnterToken(Crasher, /*IsReinject*/false);
} else if (II->isStr("dump")) {
Token Identifier;
PP.LexUnexpandedToken(Identifier);
@@ -1069,7 +1040,7 @@ struct PragmaDebugHandler : public PragmaHandler {
SourceRange(Tok.getLocation(), Identifier.getLocation()));
DumpAnnot.setAnnotationValue(DumpII);
PP.DiscardUntilEndOfDirective();
- PP.EnterToken(DumpAnnot);
+ PP.EnterToken(DumpAnnot, /*IsReinject*/false);
} else {
PP.Diag(Identifier, diag::warn_pragma_debug_missing_argument)
<< II->getName();
@@ -1101,6 +1072,22 @@ struct PragmaDebugHandler : public PragmaHandler {
else
PP.Diag(MacroName, diag::warn_pragma_debug_missing_argument)
<< II->getName();
+ } else if (II->isStr("module_map")) {
+ llvm::SmallVector<std::pair<IdentifierInfo *, SourceLocation>, 8>
+ ModuleName;
+ if (LexModuleName(PP, Tok, ModuleName))
+ return;
+ ModuleMap &MM = PP.getHeaderSearchInfo().getModuleMap();
+ Module *M = nullptr;
+ for (auto IIAndLoc : ModuleName) {
+ M = MM.lookupModuleQualified(IIAndLoc.first->getName(), M);
+ if (!M) {
+ PP.Diag(IIAndLoc.second, diag::warn_pragma_debug_unknown_module)
+ << IIAndLoc.first;
+ return;
+ }
+ }
+ M->dump();
} else if (II->isStr("overflow_stack")) {
DebugOverflowStack();
} else if (II->isStr("handle_crash")) {
@@ -1136,7 +1123,8 @@ struct PragmaDebugHandler : public PragmaHandler {
Toks[0].setKind(tok::annot_pragma_captured);
Toks[0].setLocation(NameLoc);
- PP.EnterTokenStream(Toks, /*DisableMacroExpansion=*/true);
+ PP.EnterTokenStream(Toks, /*DisableMacroExpansion=*/true,
+ /*IsReinject=*/false);
}
// Disable MSVC warning about runtime stack overflow.
@@ -1161,7 +1149,7 @@ public:
explicit PragmaDiagnosticHandler(const char *NS)
: PragmaHandler("diagnostic"), Namespace(NS) {}
- void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
Token &DiagToken) override {
SourceLocation DiagLoc = DiagToken.getLocation();
Token Tok;
@@ -1203,7 +1191,7 @@ public:
std::string WarningName;
if (!PP.FinishLexStringLiteral(Tok, WarningName, "pragma diagnostic",
- /*MacroExpansion=*/false))
+ /*AllowMacroExpansion=*/false))
return;
if (Tok.isNot(tok::eod)) {
@@ -1240,7 +1228,7 @@ public:
/// "\#pragma hdrstop [<header-name-string>]"
struct PragmaHdrstopHandler : public PragmaHandler {
PragmaHdrstopHandler() : PragmaHandler("hdrstop") {}
- void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
Token &DepToken) override {
PP.HandlePragmaHdrstop(DepToken);
}
@@ -1252,7 +1240,7 @@ struct PragmaHdrstopHandler : public PragmaHandler {
struct PragmaWarningHandler : public PragmaHandler {
PragmaWarningHandler() : PragmaHandler("warning") {}
- void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
Token &Tok) override {
// Parse things like:
// warning(push, 1)
@@ -1369,11 +1357,75 @@ struct PragmaWarningHandler : public PragmaHandler {
}
};
+/// "\#pragma execution_character_set(...)". MSVC supports this pragma only
+/// for "UTF-8". We parse it and ignore it if UTF-8 is provided and warn
+/// otherwise to avoid -Wunknown-pragma warnings.
+struct PragmaExecCharsetHandler : public PragmaHandler {
+ PragmaExecCharsetHandler() : PragmaHandler("execution_character_set") {}
+
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
+ Token &Tok) override {
+ // Parse things like:
+ // execution_character_set(push, "UTF-8")
+ // execution_character_set(pop)
+ SourceLocation DiagLoc = Tok.getLocation();
+ PPCallbacks *Callbacks = PP.getPPCallbacks();
+
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::l_paren)) {
+ PP.Diag(Tok, diag::warn_pragma_exec_charset_expected) << "(";
+ return;
+ }
+
+ PP.Lex(Tok);
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+
+ if (II && II->isStr("push")) {
+ // #pragma execution_character_set( push[ , string ] )
+ PP.Lex(Tok);
+ if (Tok.is(tok::comma)) {
+ PP.Lex(Tok);
+
+ std::string ExecCharset;
+ if (!PP.FinishLexStringLiteral(Tok, ExecCharset,
+ "pragma execution_character_set",
+ /*AllowMacroExpansion=*/false))
+ return;
+
+ // MSVC supports either of these, but nothing else.
+ if (ExecCharset != "UTF-8" && ExecCharset != "utf-8") {
+ PP.Diag(Tok, diag::warn_pragma_exec_charset_push_invalid) << ExecCharset;
+ return;
+ }
+ }
+ if (Callbacks)
+ Callbacks->PragmaExecCharsetPush(DiagLoc, "UTF-8");
+ } else if (II && II->isStr("pop")) {
+ // #pragma execution_character_set( pop )
+ PP.Lex(Tok);
+ if (Callbacks)
+ Callbacks->PragmaExecCharsetPop(DiagLoc);
+ } else {
+ PP.Diag(Tok, diag::warn_pragma_exec_charset_spec_invalid);
+ return;
+ }
+
+ if (Tok.isNot(tok::r_paren)) {
+ PP.Diag(Tok, diag::warn_pragma_exec_charset_expected) << ")";
+ return;
+ }
+
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::eod))
+ PP.Diag(Tok, diag::ext_pp_extra_tokens_at_eol) << "pragma execution_character_set";
+ }
+};
+
/// PragmaIncludeAliasHandler - "\#pragma include_alias("...")".
struct PragmaIncludeAliasHandler : public PragmaHandler {
PragmaIncludeAliasHandler() : PragmaHandler("include_alias") {}
- void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
Token &IncludeAliasTok) override {
PP.HandlePragmaIncludeAlias(IncludeAliasTok);
}
@@ -1416,7 +1468,7 @@ public:
: PragmaHandler(PragmaKind(Kind, true)), Kind(Kind),
Namespace(Namespace) {}
- void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
Token &Tok) override {
SourceLocation MessageLoc = Tok.getLocation();
PP.Lex(Tok);
@@ -1438,7 +1490,7 @@ public:
std::string MessageString;
if (!PP.FinishLexStringLiteral(Tok, MessageString, PragmaKind(Kind),
- /*MacroExpansion=*/true))
+ /*AllowMacroExpansion=*/true))
return;
if (ExpectClosingParen) {
@@ -1472,7 +1524,7 @@ public:
struct PragmaModuleImportHandler : public PragmaHandler {
PragmaModuleImportHandler() : PragmaHandler("import") {}
- void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
Token &Tok) override {
SourceLocation ImportLoc = Tok.getLocation();
@@ -1488,7 +1540,7 @@ struct PragmaModuleImportHandler : public PragmaHandler {
// If we have a non-empty module path, load the named module.
Module *Imported =
PP.getModuleLoader().loadModule(ImportLoc, ModuleName, Module::Hidden,
- /*IsIncludeDirective=*/false);
+ /*IsInclusionDirective=*/false);
if (!Imported)
return;
@@ -1509,7 +1561,7 @@ struct PragmaModuleImportHandler : public PragmaHandler {
struct PragmaModuleBeginHandler : public PragmaHandler {
PragmaModuleBeginHandler() : PragmaHandler("begin") {}
- void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
Token &Tok) override {
SourceLocation BeginLoc = Tok.getLocation();
@@ -1533,16 +1585,15 @@ struct PragmaModuleBeginHandler : public PragmaHandler {
// Find the module we're entering. We require that a module map for it
// be loaded or implicitly loadable.
- // FIXME: We could create the submodule here. We'd need to know whether
- // it's supposed to be explicit, but not much else.
- Module *M = PP.getHeaderSearchInfo().lookupModule(Current);
+ auto &HSI = PP.getHeaderSearchInfo();
+ Module *M = HSI.lookupModule(Current);
if (!M) {
PP.Diag(ModuleName.front().second,
diag::err_pp_module_begin_no_module_map) << Current;
return;
}
for (unsigned I = 1; I != ModuleName.size(); ++I) {
- auto *NewM = M->findSubmodule(ModuleName[I].first->getName());
+ auto *NewM = M->findOrInferSubmodule(ModuleName[I].first->getName());
if (!NewM) {
PP.Diag(ModuleName[I].second, diag::err_pp_module_begin_no_submodule)
<< M->getFullModuleName() << ModuleName[I].first;
@@ -1570,7 +1621,7 @@ struct PragmaModuleBeginHandler : public PragmaHandler {
struct PragmaModuleEndHandler : public PragmaHandler {
PragmaModuleEndHandler() : PragmaHandler("end") {}
- void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
Token &Tok) override {
SourceLocation Loc = Tok.getLocation();
@@ -1590,7 +1641,7 @@ struct PragmaModuleEndHandler : public PragmaHandler {
struct PragmaModuleBuildHandler : public PragmaHandler {
PragmaModuleBuildHandler() : PragmaHandler("build") {}
- void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
Token &Tok) override {
PP.HandlePragmaModuleBuild(Tok);
}
@@ -1600,7 +1651,7 @@ struct PragmaModuleBuildHandler : public PragmaHandler {
struct PragmaModuleLoadHandler : public PragmaHandler {
PragmaModuleLoadHandler() : PragmaHandler("load") {}
- void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
Token &Tok) override {
SourceLocation Loc = Tok.getLocation();
@@ -1615,7 +1666,7 @@ struct PragmaModuleLoadHandler : public PragmaHandler {
// Load the module, don't make it visible.
PP.getModuleLoader().loadModule(Loc, ModuleName, Module::Hidden,
- /*IsIncludeDirective=*/false);
+ /*IsInclusionDirective=*/false);
}
};
@@ -1624,7 +1675,7 @@ struct PragmaModuleLoadHandler : public PragmaHandler {
struct PragmaPushMacroHandler : public PragmaHandler {
PragmaPushMacroHandler() : PragmaHandler("push_macro") {}
- void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
Token &PushMacroTok) override {
PP.HandlePragmaPushMacro(PushMacroTok);
}
@@ -1635,7 +1686,7 @@ struct PragmaPushMacroHandler : public PragmaHandler {
struct PragmaPopMacroHandler : public PragmaHandler {
PragmaPopMacroHandler() : PragmaHandler("pop_macro") {}
- void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
Token &PopMacroTok) override {
PP.HandlePragmaPopMacro(PopMacroTok);
}
@@ -1646,7 +1697,7 @@ struct PragmaPopMacroHandler : public PragmaHandler {
struct PragmaARCCFCodeAuditedHandler : public PragmaHandler {
PragmaARCCFCodeAuditedHandler() : PragmaHandler("arc_cf_code_audited") {}
- void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
Token &NameTok) override {
SourceLocation Loc = NameTok.getLocation();
bool IsBegin;
@@ -1701,7 +1752,7 @@ struct PragmaARCCFCodeAuditedHandler : public PragmaHandler {
struct PragmaAssumeNonNullHandler : public PragmaHandler {
PragmaAssumeNonNullHandler() : PragmaHandler("assume_nonnull") {}
- void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
Token &NameTok) override {
SourceLocation Loc = NameTok.getLocation();
bool IsBegin;
@@ -1770,7 +1821,7 @@ struct PragmaAssumeNonNullHandler : public PragmaHandler {
struct PragmaRegionHandler : public PragmaHandler {
PragmaRegionHandler(const char *pragma) : PragmaHandler(pragma) {}
- void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
Token &NameTok) override {
// #pragma region: endregion matches can be verified
// __pragma(region): no sense, but ignored by msvc
@@ -1824,6 +1875,7 @@ void Preprocessor::RegisterBuiltinPragmas() {
// MS extensions.
if (LangOpts.MicrosoftExt) {
AddPragmaHandler(new PragmaWarningHandler());
+ AddPragmaHandler(new PragmaExecCharsetHandler());
AddPragmaHandler(new PragmaIncludeAliasHandler());
AddPragmaHandler(new PragmaHdrstopHandler());
}
diff --git a/lib/Lex/PreprocessingRecord.cpp b/lib/Lex/PreprocessingRecord.cpp
index b37a8cf1ced4..115256db4809 100644
--- a/lib/Lex/PreprocessingRecord.cpp
+++ b/lib/Lex/PreprocessingRecord.cpp
@@ -1,9 +1,8 @@
//===- PreprocessingRecord.cpp - Record of Preprocessing ------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -239,16 +238,13 @@ unsigned PreprocessingRecord::findBeginLocalPreprocessedEntity(
return First - PreprocessedEntities.begin();
}
-unsigned PreprocessingRecord::findEndLocalPreprocessedEntity(
- SourceLocation Loc) const {
+unsigned
+PreprocessingRecord::findEndLocalPreprocessedEntity(SourceLocation Loc) const {
if (SourceMgr.isLoadedSourceLocation(Loc))
return 0;
- std::vector<PreprocessedEntity *>::const_iterator
- I = std::upper_bound(PreprocessedEntities.begin(),
- PreprocessedEntities.end(),
- Loc,
- PPEntityComp<&SourceRange::getBegin>(SourceMgr));
+ auto I = llvm::upper_bound(PreprocessedEntities, Loc,
+ PPEntityComp<&SourceRange::getBegin>(SourceMgr));
return I - PreprocessedEntities.begin();
}
@@ -306,10 +302,9 @@ PreprocessingRecord::addPreprocessedEntity(PreprocessedEntity *Entity) {
}
// Linear search unsuccessful. Do a binary search.
- pp_iter I = std::upper_bound(PreprocessedEntities.begin(),
- PreprocessedEntities.end(),
- BeginLoc,
- PPEntityComp<&SourceRange::getBegin>(SourceMgr));
+ pp_iter I =
+ llvm::upper_bound(PreprocessedEntities, BeginLoc,
+ PPEntityComp<&SourceRange::getBegin>(SourceMgr));
pp_iter insertI = PreprocessedEntities.insert(I, Entity);
return getPPEntityID(insertI - PreprocessedEntities.begin(),
/*isLoaded=*/false);
diff --git a/lib/Lex/Preprocessor.cpp b/lib/Lex/Preprocessor.cpp
index 047a4caaca73..bdc5fbcd2bea 100644
--- a/lib/Lex/Preprocessor.cpp
+++ b/lib/Lex/Preprocessor.cpp
@@ -1,9 +1,8 @@
-//===- Preprocess.cpp - C Language Family Preprocessor Implementation -----===//
+//===- Preprocessor.cpp - C Language Family Preprocessor Implementation ---===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -78,12 +77,12 @@ ExternalPreprocessorSource::~ExternalPreprocessorSource() = default;
Preprocessor::Preprocessor(std::shared_ptr<PreprocessorOptions> PPOpts,
DiagnosticsEngine &diags, LangOptions &opts,
- SourceManager &SM, MemoryBufferCache &PCMCache,
- HeaderSearch &Headers, ModuleLoader &TheModuleLoader,
+ SourceManager &SM, HeaderSearch &Headers,
+ ModuleLoader &TheModuleLoader,
IdentifierInfoLookup *IILookup, bool OwnsHeaders,
TranslationUnitKind TUKind)
: PPOpts(std::move(PPOpts)), Diags(&diags), LangOpts(opts),
- FileMgr(Headers.getFileMgr()), SourceMgr(SM), PCMCache(PCMCache),
+ FileMgr(Headers.getFileMgr()), SourceMgr(SM),
ScratchBuf(new ScratchBuffer(SourceMgr)), HeaderInfo(Headers),
TheModuleLoader(TheModuleLoader), ExternalSource(nullptr),
// As the language options may have not been loaded yet (when
@@ -103,6 +102,7 @@ Preprocessor::Preprocessor(std::shared_ptr<PreprocessorOptions> PPOpts,
DisableMacroExpansion = false;
MacroExpansionInDirectivesOverride = false;
InMacroArgs = false;
+ ArgMacro = nullptr;
InMacroArgPreExpansion = false;
NumCachedTokenLexers = 0;
PragmasEnabled = true;
@@ -567,7 +567,8 @@ void Preprocessor::EnterMainSourceFile() {
SourceLocation(), PPOpts->PCHThroughHeader,
/*isAngled=*/false, /*FromDir=*/nullptr, /*FromFile=*/nullptr, CurDir,
/*SearchPath=*/nullptr, /*RelativePath=*/nullptr,
- /*SuggestedModule=*/nullptr, /*IsMapped=*/nullptr);
+ /*SuggestedModule=*/nullptr, /*IsMapped=*/nullptr,
+ /*IsFrameworkFound=*/nullptr);
if (!File) {
Diag(SourceLocation(), diag::err_pp_through_header_not_found)
<< PPOpts->PCHThroughHeader;
@@ -624,8 +625,22 @@ void Preprocessor::SkipTokensWhileUsingPCH() {
bool UsingPragmaHdrStop = SkippingUntilPragmaHdrStop;
Token Tok;
while (true) {
- bool InPredefines = (CurLexer->getFileID() == getPredefinesFileID());
- CurLexer->Lex(Tok);
+ bool InPredefines =
+ (CurLexer && CurLexer->getFileID() == getPredefinesFileID());
+ switch (CurLexerKind) {
+ case CLK_Lexer:
+ CurLexer->Lex(Tok);
+ break;
+ case CLK_TokenLexer:
+ CurTokenLexer->Lex(Tok);
+ break;
+ case CLK_CachingLexer:
+ CachingLex(Tok);
+ break;
+ case CLK_LexAfterModuleImport:
+ LexAfterModuleImport(Tok);
+ break;
+ }
if (Tok.is(tok::eof) && !InPredefines) {
ReachedMainFileEOF = true;
break;
@@ -861,6 +876,8 @@ bool Preprocessor::HandleIdentifier(Token &Identifier) {
}
void Preprocessor::Lex(Token &Result) {
+ ++LexLevel;
+
// We loop here until a lex function returns a token; this avoids recursion.
bool ReturnedToken;
do {
@@ -876,8 +893,7 @@ void Preprocessor::Lex(Token &Result) {
ReturnedToken = true;
break;
case CLK_LexAfterModuleImport:
- LexAfterModuleImport(Result);
- ReturnedToken = true;
+ ReturnedToken = LexAfterModuleImport(Result);
break;
}
} while (!ReturnedToken);
@@ -891,17 +907,296 @@ void Preprocessor::Lex(Token &Result) {
Result.setIdentifierInfo(nullptr);
}
+ // Update ImportSeqState to track our position within a C++20 import-seq
+ // if this token is being produced as a result of phase 4 of translation.
+ if (getLangOpts().CPlusPlusModules && LexLevel == 1 &&
+ !Result.getFlag(Token::IsReinjected)) {
+ switch (Result.getKind()) {
+ case tok::l_paren: case tok::l_square: case tok::l_brace:
+ ImportSeqState.handleOpenBracket();
+ break;
+ case tok::r_paren: case tok::r_square:
+ ImportSeqState.handleCloseBracket();
+ break;
+ case tok::r_brace:
+ ImportSeqState.handleCloseBrace();
+ break;
+ case tok::semi:
+ ImportSeqState.handleSemi();
+ break;
+ case tok::header_name:
+ case tok::annot_header_unit:
+ ImportSeqState.handleHeaderName();
+ break;
+ case tok::kw_export:
+ ImportSeqState.handleExport();
+ break;
+ case tok::identifier:
+ if (Result.getIdentifierInfo()->isModulesImport()) {
+ ImportSeqState.handleImport();
+ if (ImportSeqState.afterImportSeq()) {
+ ModuleImportLoc = Result.getLocation();
+ ModuleImportPath.clear();
+ ModuleImportExpectsIdentifier = true;
+ CurLexerKind = CLK_LexAfterModuleImport;
+ }
+ break;
+ }
+ LLVM_FALLTHROUGH;
+ default:
+ ImportSeqState.handleMisc();
+ break;
+ }
+ }
+
LastTokenWasAt = Result.is(tok::at);
+ --LexLevel;
+ if (OnToken && LexLevel == 0 && !Result.getFlag(Token::IsReinjected))
+ OnToken(Result);
+}
+
+/// Lex a header-name token (including one formed from header-name-tokens if
+/// \p AllowConcatenation is \c true).
+///
+/// \param FilenameTok Filled in with the next token. On success, this will
+/// be either a header_name token. On failure, it will be whatever other
+/// token was found instead.
+/// \param AllowMacroExpansion If \c true, allow the header name to be formed
+/// by macro expansion (concatenating tokens as necessary if the first
+/// token is a '<').
+/// \return \c true if we reached EOD or EOF while looking for a > token in
+/// a concatenated header name and diagnosed it. \c false otherwise.
+bool Preprocessor::LexHeaderName(Token &FilenameTok, bool AllowMacroExpansion) {
+ // Lex using header-name tokenization rules if tokens are being lexed from
+ // a file. Just grab a token normally if we're in a macro expansion.
+ if (CurPPLexer)
+ CurPPLexer->LexIncludeFilename(FilenameTok);
+ else
+ Lex(FilenameTok);
+
+ // This could be a <foo/bar.h> file coming from a macro expansion. In this
+ // case, glue the tokens together into an angle_string_literal token.
+ SmallString<128> FilenameBuffer;
+ if (FilenameTok.is(tok::less) && AllowMacroExpansion) {
+ bool StartOfLine = FilenameTok.isAtStartOfLine();
+ bool LeadingSpace = FilenameTok.hasLeadingSpace();
+ bool LeadingEmptyMacro = FilenameTok.hasLeadingEmptyMacro();
+
+ SourceLocation Start = FilenameTok.getLocation();
+ SourceLocation End;
+ FilenameBuffer.push_back('<');
+
+ // Consume tokens until we find a '>'.
+ // FIXME: A header-name could be formed starting or ending with an
+ // alternative token. It's not clear whether that's ill-formed in all
+ // cases.
+ while (FilenameTok.isNot(tok::greater)) {
+ Lex(FilenameTok);
+ if (FilenameTok.isOneOf(tok::eod, tok::eof)) {
+ Diag(FilenameTok.getLocation(), diag::err_expected) << tok::greater;
+ Diag(Start, diag::note_matching) << tok::less;
+ return true;
+ }
+
+ End = FilenameTok.getLocation();
+
+ // FIXME: Provide code completion for #includes.
+ if (FilenameTok.is(tok::code_completion)) {
+ setCodeCompletionReached();
+ Lex(FilenameTok);
+ continue;
+ }
+
+ // Append the spelling of this token to the buffer. If there was a space
+ // before it, add it now.
+ if (FilenameTok.hasLeadingSpace())
+ FilenameBuffer.push_back(' ');
+
+ // Get the spelling of the token, directly into FilenameBuffer if
+ // possible.
+ size_t PreAppendSize = FilenameBuffer.size();
+ FilenameBuffer.resize(PreAppendSize + FilenameTok.getLength());
+
+ const char *BufPtr = &FilenameBuffer[PreAppendSize];
+ unsigned ActualLen = getSpelling(FilenameTok, BufPtr);
+
+ // If the token was spelled somewhere else, copy it into FilenameBuffer.
+ if (BufPtr != &FilenameBuffer[PreAppendSize])
+ memcpy(&FilenameBuffer[PreAppendSize], BufPtr, ActualLen);
+
+ // Resize FilenameBuffer to the correct size.
+ if (FilenameTok.getLength() != ActualLen)
+ FilenameBuffer.resize(PreAppendSize + ActualLen);
+ }
+
+ FilenameTok.startToken();
+ FilenameTok.setKind(tok::header_name);
+ FilenameTok.setFlagValue(Token::StartOfLine, StartOfLine);
+ FilenameTok.setFlagValue(Token::LeadingSpace, LeadingSpace);
+ FilenameTok.setFlagValue(Token::LeadingEmptyMacro, LeadingEmptyMacro);
+ CreateString(FilenameBuffer, FilenameTok, Start, End);
+ } else if (FilenameTok.is(tok::string_literal) && AllowMacroExpansion) {
+ // Convert a string-literal token of the form " h-char-sequence "
+ // (produced by macro expansion) into a header-name token.
+ //
+ // The rules for header-names don't quite match the rules for
+ // string-literals, but all the places where they differ result in
+ // undefined behavior, so we can and do treat them the same.
+ //
+ // A string-literal with a prefix or suffix is not translated into a
+ // header-name. This could theoretically be observable via the C++20
+ // context-sensitive header-name formation rules.
+ StringRef Str = getSpelling(FilenameTok, FilenameBuffer);
+ if (Str.size() >= 2 && Str.front() == '"' && Str.back() == '"')
+ FilenameTok.setKind(tok::header_name);
+ }
+
+ return false;
+}
+
+/// Collect the tokens of a C++20 pp-import-suffix.
+void Preprocessor::CollectPpImportSuffix(SmallVectorImpl<Token> &Toks) {
+ // FIXME: For error recovery, consider recognizing attribute syntax here
+ // and terminating / diagnosing a missing semicolon if we find anything
+ // else? (Can we leave that to the parser?)
+ unsigned BracketDepth = 0;
+ while (true) {
+ Toks.emplace_back();
+ Lex(Toks.back());
+
+ switch (Toks.back().getKind()) {
+ case tok::l_paren: case tok::l_square: case tok::l_brace:
+ ++BracketDepth;
+ break;
+
+ case tok::r_paren: case tok::r_square: case tok::r_brace:
+ if (BracketDepth == 0)
+ return;
+ --BracketDepth;
+ break;
+
+ case tok::semi:
+ if (BracketDepth == 0)
+ return;
+ break;
+
+ case tok::eof:
+ return;
+
+ default:
+ break;
+ }
+ }
}
+
/// Lex a token following the 'import' contextual keyword.
///
-void Preprocessor::LexAfterModuleImport(Token &Result) {
+/// pp-import: [C++20]
+/// import header-name pp-import-suffix[opt] ;
+/// import header-name-tokens pp-import-suffix[opt] ;
+/// [ObjC] @ import module-name ;
+/// [Clang] import module-name ;
+///
+/// header-name-tokens:
+/// string-literal
+/// < [any sequence of preprocessing-tokens other than >] >
+///
+/// module-name:
+/// module-name-qualifier[opt] identifier
+///
+/// module-name-qualifier
+/// module-name-qualifier[opt] identifier .
+///
+/// We respond to a pp-import by importing macros from the named module.
+bool Preprocessor::LexAfterModuleImport(Token &Result) {
// Figure out what kind of lexer we actually have.
recomputeCurLexerKind();
- // Lex the next token.
- Lex(Result);
+ // Lex the next token. The header-name lexing rules are used at the start of
+ // a pp-import.
+ //
+ // For now, we only support header-name imports in C++20 mode.
+ // FIXME: Should we allow this in all language modes that support an import
+ // declaration as an extension?
+ if (ModuleImportPath.empty() && getLangOpts().CPlusPlusModules) {
+ if (LexHeaderName(Result))
+ return true;
+ } else {
+ Lex(Result);
+ }
+
+ // Allocate a holding buffer for a sequence of tokens and introduce it into
+ // the token stream.
+ auto EnterTokens = [this](ArrayRef<Token> Toks) {
+ auto ToksCopy = llvm::make_unique<Token[]>(Toks.size());
+ std::copy(Toks.begin(), Toks.end(), ToksCopy.get());
+ EnterTokenStream(std::move(ToksCopy), Toks.size(),
+ /*DisableMacroExpansion*/ true, /*IsReinject*/ false);
+ };
+
+ // Check for a header-name.
+ SmallVector<Token, 32> Suffix;
+ if (Result.is(tok::header_name)) {
+ // Enter the header-name token into the token stream; a Lex action cannot
+ // both return a token and cache tokens (doing so would corrupt the token
+ // cache if the call to Lex comes from CachingLex / PeekAhead).
+ Suffix.push_back(Result);
+
+ // Consume the pp-import-suffix and expand any macros in it now. We'll add
+ // it back into the token stream later.
+ CollectPpImportSuffix(Suffix);
+ if (Suffix.back().isNot(tok::semi)) {
+ // This is not a pp-import after all.
+ EnterTokens(Suffix);
+ return false;
+ }
+
+ // C++2a [cpp.module]p1:
+ // The ';' preprocessing-token terminating a pp-import shall not have
+ // been produced by macro replacement.
+ SourceLocation SemiLoc = Suffix.back().getLocation();
+ if (SemiLoc.isMacroID())
+ Diag(SemiLoc, diag::err_header_import_semi_in_macro);
+
+ // Reconstitute the import token.
+ Token ImportTok;
+ ImportTok.startToken();
+ ImportTok.setKind(tok::kw_import);
+ ImportTok.setLocation(ModuleImportLoc);
+ ImportTok.setIdentifierInfo(getIdentifierInfo("import"));
+ ImportTok.setLength(6);
+
+ auto Action = HandleHeaderIncludeOrImport(
+ /*HashLoc*/ SourceLocation(), ImportTok, Suffix.front(), SemiLoc);
+ switch (Action.Kind) {
+ case ImportAction::None:
+ break;
+
+ case ImportAction::ModuleBegin:
+ // Let the parser know we're textually entering the module.
+ Suffix.emplace_back();
+ Suffix.back().startToken();
+ Suffix.back().setKind(tok::annot_module_begin);
+ Suffix.back().setLocation(SemiLoc);
+ Suffix.back().setAnnotationEndLoc(SemiLoc);
+ Suffix.back().setAnnotationValue(Action.ModuleForHeader);
+ LLVM_FALLTHROUGH;
+
+ case ImportAction::ModuleImport:
+ case ImportAction::SkippedModuleImport:
+ // We chose to import (or textually enter) the file. Convert the
+ // header-name token into a header unit annotation token.
+ Suffix[0].setKind(tok::annot_header_unit);
+ Suffix[0].setAnnotationEndLoc(Suffix[0].getLocation());
+ Suffix[0].setAnnotationValue(Action.ModuleForHeader);
+ // FIXME: Call the moduleImport callback?
+ break;
+ }
+
+ EnterTokens(Suffix);
+ return false;
+ }
// The token sequence
//
@@ -916,7 +1211,7 @@ void Preprocessor::LexAfterModuleImport(Token &Result) {
Result.getLocation()));
ModuleImportExpectsIdentifier = false;
CurLexerKind = CLK_LexAfterModuleImport;
- return;
+ return true;
}
// If we're expecting a '.' or a ';', and we got a '.', then wait until we
@@ -925,40 +1220,61 @@ void Preprocessor::LexAfterModuleImport(Token &Result) {
if (!ModuleImportExpectsIdentifier && Result.getKind() == tok::period) {
ModuleImportExpectsIdentifier = true;
CurLexerKind = CLK_LexAfterModuleImport;
- return;
+ return true;
}
- // If we have a non-empty module path, load the named module.
- if (!ModuleImportPath.empty()) {
- // Under the Modules TS, the dot is just part of the module name, and not
- // a real hierarchy separator. Flatten such module names now.
- //
- // FIXME: Is this the right level to be performing this transformation?
- std::string FlatModuleName;
- if (getLangOpts().ModulesTS) {
- for (auto &Piece : ModuleImportPath) {
- if (!FlatModuleName.empty())
- FlatModuleName += ".";
- FlatModuleName += Piece.first->getName();
- }
- SourceLocation FirstPathLoc = ModuleImportPath[0].second;
- ModuleImportPath.clear();
- ModuleImportPath.push_back(
- std::make_pair(getIdentifierInfo(FlatModuleName), FirstPathLoc));
+ // If we didn't recognize a module name at all, this is not a (valid) import.
+ if (ModuleImportPath.empty() || Result.is(tok::eof))
+ return true;
+
+ // Consume the pp-import-suffix and expand any macros in it now, if we're not
+ // at the semicolon already.
+ SourceLocation SemiLoc = Result.getLocation();
+ if (Result.isNot(tok::semi)) {
+ Suffix.push_back(Result);
+ CollectPpImportSuffix(Suffix);
+ if (Suffix.back().isNot(tok::semi)) {
+ // This is not an import after all.
+ EnterTokens(Suffix);
+ return false;
}
+ SemiLoc = Suffix.back().getLocation();
+ }
- Module *Imported = nullptr;
- if (getLangOpts().Modules) {
- Imported = TheModuleLoader.loadModule(ModuleImportLoc,
- ModuleImportPath,
- Module::Hidden,
- /*IsIncludeDirective=*/false);
- if (Imported)
- makeModuleVisible(Imported, ModuleImportLoc);
+ // Under the Modules TS, the dot is just part of the module name, and not
+ // a real hierarchy separator. Flatten such module names now.
+ //
+ // FIXME: Is this the right level to be performing this transformation?
+ std::string FlatModuleName;
+ if (getLangOpts().ModulesTS || getLangOpts().CPlusPlusModules) {
+ for (auto &Piece : ModuleImportPath) {
+ if (!FlatModuleName.empty())
+ FlatModuleName += ".";
+ FlatModuleName += Piece.first->getName();
}
- if (Callbacks && (getLangOpts().Modules || getLangOpts().DebuggerSupport))
- Callbacks->moduleImport(ModuleImportLoc, ModuleImportPath, Imported);
+ SourceLocation FirstPathLoc = ModuleImportPath[0].second;
+ ModuleImportPath.clear();
+ ModuleImportPath.push_back(
+ std::make_pair(getIdentifierInfo(FlatModuleName), FirstPathLoc));
+ }
+
+ Module *Imported = nullptr;
+ if (getLangOpts().Modules) {
+ Imported = TheModuleLoader.loadModule(ModuleImportLoc,
+ ModuleImportPath,
+ Module::Hidden,
+ /*IsInclusionDirective=*/false);
+ if (Imported)
+ makeModuleVisible(Imported, SemiLoc);
}
+ if (Callbacks)
+ Callbacks->moduleImport(ModuleImportLoc, ModuleImportPath, Imported);
+
+ if (!Suffix.empty()) {
+ EnterTokens(Suffix);
+ return false;
+ }
+ return true;
}
void Preprocessor::makeModuleVisible(Module *M, SourceLocation Loc) {
@@ -1039,14 +1355,14 @@ bool Preprocessor::parseSimpleIntegerLiteral(Token &Tok, uint64_t &Value) {
void Preprocessor::addCommentHandler(CommentHandler *Handler) {
assert(Handler && "NULL comment handler");
- assert(std::find(CommentHandlers.begin(), CommentHandlers.end(), Handler) ==
- CommentHandlers.end() && "Comment handler already registered");
+ assert(llvm::find(CommentHandlers, Handler) == CommentHandlers.end() &&
+ "Comment handler already registered");
CommentHandlers.push_back(Handler);
}
void Preprocessor::removeCommentHandler(CommentHandler *Handler) {
std::vector<CommentHandler *>::iterator Pos =
- std::find(CommentHandlers.begin(), CommentHandlers.end(), Handler);
+ llvm::find(CommentHandlers, Handler);
assert(Pos != CommentHandlers.end() && "Comment handler not registered");
CommentHandlers.erase(Pos);
}
diff --git a/lib/Lex/PreprocessorLexer.cpp b/lib/Lex/PreprocessorLexer.cpp
index 9f930c3a3c6a..5f6f4a13419b 100644
--- a/lib/Lex/PreprocessorLexer.cpp
+++ b/lib/Lex/PreprocessorLexer.cpp
@@ -1,9 +1,8 @@
//===- PreprocessorLexer.cpp - C Language Family Lexer --------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -31,9 +30,7 @@ PreprocessorLexer::PreprocessorLexer(Preprocessor *pp, FileID fid)
/// After the preprocessor has parsed a \#include, lex and
/// (potentially) macro expand the filename.
void PreprocessorLexer::LexIncludeFilename(Token &FilenameTok) {
- assert(ParsingPreprocessorDirective &&
- ParsingFilename == false &&
- "Must be in a preprocessing directive!");
+ assert(ParsingFilename == false && "reentered LexIncludeFilename");
// We are now parsing a filename!
ParsingFilename = true;
@@ -46,10 +43,6 @@ void PreprocessorLexer::LexIncludeFilename(Token &FilenameTok) {
// We should have obtained the filename now.
ParsingFilename = false;
-
- // No filename?
- if (FilenameTok.is(tok::eod))
- PP->Diag(FilenameTok.getLocation(), diag::err_pp_expects_filename);
}
/// getFileEntry - Return the FileEntry corresponding to this FileID. Like
diff --git a/lib/Lex/ScratchBuffer.cpp b/lib/Lex/ScratchBuffer.cpp
index dc03e16daa8b..19ab93ec54b4 100644
--- a/lib/Lex/ScratchBuffer.cpp
+++ b/lib/Lex/ScratchBuffer.cpp
@@ -1,9 +1,8 @@
//===--- ScratchBuffer.cpp - Scratch space for forming tokens -------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Lex/TokenConcatenation.cpp b/lib/Lex/TokenConcatenation.cpp
index f810c28ccdf1..e626cfcc927f 100644
--- a/lib/Lex/TokenConcatenation.cpp
+++ b/lib/Lex/TokenConcatenation.cpp
@@ -1,9 +1,8 @@
//===--- TokenConcatenation.cpp - Token Concatenation Avoidance -----------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -161,6 +160,11 @@ static char GetFirstChar(const Preprocessor &PP, const Token &Tok) {
bool TokenConcatenation::AvoidConcat(const Token &PrevPrevTok,
const Token &PrevTok,
const Token &Tok) const {
+ // Conservatively assume that every annotation token that has a printable
+ // form requires whitespace.
+ if (PrevTok.isAnnotation())
+ return true;
+
// First, check to see if the tokens were directly adjacent in the original
// source. If they were, it must be okay to stick them together: if there
// were an issue, the tokens would have been lexed differently.
diff --git a/lib/Lex/TokenLexer.cpp b/lib/Lex/TokenLexer.cpp
index 608e0dedebb7..a7957e82e495 100644
--- a/lib/Lex/TokenLexer.cpp
+++ b/lib/Lex/TokenLexer.cpp
@@ -1,9 +1,8 @@
//===- TokenLexer.cpp - Lex from a token stream ---------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -54,6 +53,7 @@ void TokenLexer::Init(Token &Tok, SourceLocation ELEnd, MacroInfo *MI,
Tokens = &*Macro->tokens_begin();
OwnsTokens = false;
DisableMacroExpansion = false;
+ IsReinject = false;
NumTokens = Macro->tokens_end()-Macro->tokens_begin();
MacroExpansionStart = SourceLocation();
@@ -92,7 +92,9 @@ void TokenLexer::Init(Token &Tok, SourceLocation ELEnd, MacroInfo *MI,
/// Create a TokenLexer for the specified token stream. This does not
/// take ownership of the specified token vector.
void TokenLexer::Init(const Token *TokArray, unsigned NumToks,
- bool disableMacroExpansion, bool ownsTokens) {
+ bool disableMacroExpansion, bool ownsTokens,
+ bool isReinject) {
+ assert(!isReinject || disableMacroExpansion);
// If the client is reusing a TokenLexer, make sure to free any memory
// associated with it.
destroy();
@@ -102,6 +104,7 @@ void TokenLexer::Init(const Token *TokArray, unsigned NumToks,
Tokens = TokArray;
OwnsTokens = ownsTokens;
DisableMacroExpansion = disableMacroExpansion;
+ IsReinject = isReinject;
NumTokens = NumToks;
CurTokenIdx = 0;
ExpandLocStart = ExpandLocEnd = SourceLocation();
@@ -244,8 +247,7 @@ void TokenLexer::ExpandFunctionArguments() {
// we install the newly expanded sequence as the new 'Tokens' list.
bool MadeChange = false;
- const bool CalledWithVariadicArguments =
- ActualArgs->invokedWithVariadicArgument(Macro);
+ Optional<bool> CalledWithVariadicArguments;
VAOptExpansionContext VCtx(PP);
@@ -292,7 +294,12 @@ void TokenLexer::ExpandFunctionArguments() {
// this token. Note sawClosingParen() returns true only if the r_paren matches
// the closing r_paren of the __VA_OPT__.
if (!Tokens[I].is(tok::r_paren) || !VCtx.sawClosingParen()) {
- if (!CalledWithVariadicArguments) {
+ // Lazily expand __VA_ARGS__ when we see the first __VA_OPT__.
+ if (!CalledWithVariadicArguments.hasValue()) {
+ CalledWithVariadicArguments =
+ ActualArgs->invokedWithVariadicArgument(Macro, PP);
+ }
+ if (!*CalledWithVariadicArguments) {
// Skip this token.
continue;
}
@@ -315,8 +322,8 @@ void TokenLexer::ExpandFunctionArguments() {
stringifyVAOPTContents(ResultToks, VCtx,
/*ClosingParenLoc*/ Tokens[I].getLocation());
- } else if (/*No tokens within VAOPT*/ !(
- ResultToks.size() - VCtx.getNumberOfTokensPriorToVAOpt())) {
+ } else if (/*No tokens within VAOPT*/
+ ResultToks.size() == VCtx.getNumberOfTokensPriorToVAOpt()) {
// Treat VAOPT as a placemarker token. Eat either the '##' before the
// RHS/VAOPT (if one exists, suggesting that the LHS (if any) to that
// hashhash was not a placemarker) or the '##'
@@ -327,6 +334,26 @@ void TokenLexer::ExpandFunctionArguments() {
} else if ((I + 1 != E) && Tokens[I + 1].is(tok::hashhash)) {
++I; // Skip the following hashhash.
}
+ } else {
+ // If there's a ## before the __VA_OPT__, we might have discovered
+ // that the __VA_OPT__ begins with a placeholder. We delay action on
+ // that to now to avoid messing up our stashed count of tokens before
+ // __VA_OPT__.
+ if (VCtx.beginsWithPlaceholder()) {
+ assert(VCtx.getNumberOfTokensPriorToVAOpt() > 0 &&
+ ResultToks.size() >= VCtx.getNumberOfTokensPriorToVAOpt() &&
+ ResultToks[VCtx.getNumberOfTokensPriorToVAOpt() - 1].is(
+ tok::hashhash) &&
+ "no token paste before __VA_OPT__");
+ ResultToks.erase(ResultToks.begin() +
+ VCtx.getNumberOfTokensPriorToVAOpt() - 1);
+ }
+ // If the expansion of __VA_OPT__ ends with a placeholder, eat any
+ // following '##' token.
+ if (VCtx.endsWithPlaceholder() && I + 1 != E &&
+ Tokens[I + 1].is(tok::hashhash)) {
+ ++I;
+ }
}
VCtx.reset();
// We processed __VA_OPT__'s closing paren (and the exit out of
@@ -387,6 +414,7 @@ void TokenLexer::ExpandFunctionArguments() {
!ResultToks.empty() && ResultToks.back().is(tok::hashhash);
bool PasteBefore = I != 0 && Tokens[I-1].is(tok::hashhash);
bool PasteAfter = I+1 != E && Tokens[I+1].is(tok::hashhash);
+ bool RParenAfter = I+1 != E && Tokens[I+1].is(tok::r_paren);
assert((!NonEmptyPasteBefore || PasteBefore || VCtx.isInVAOpt()) &&
"unexpected ## in ResultToks");
@@ -471,6 +499,18 @@ void TokenLexer::ExpandFunctionArguments() {
NextTokGetsSpace);
ResultToks[FirstResult].setFlagValue(Token::StartOfLine, false);
NextTokGetsSpace = false;
+ } else {
+ // We're creating a placeholder token. Usually this doesn't matter,
+ // but it can affect paste behavior when at the start or end of a
+ // __VA_OPT__.
+ if (NonEmptyPasteBefore) {
+ // We're imagining a placeholder token is inserted here. If this is
+ // the first token in a __VA_OPT__ after a ##, delete the ##.
+ assert(VCtx.isInVAOpt() && "should only happen inside a __VA_OPT__");
+ VCtx.hasPlaceholderAfterHashhashAtStart();
+ }
+ if (RParenAfter)
+ VCtx.hasPlaceholderBeforeRParen();
}
continue;
}
@@ -535,6 +575,9 @@ void TokenLexer::ExpandFunctionArguments() {
continue;
}
+ if (RParenAfter)
+ VCtx.hasPlaceholderBeforeRParen();
+
// If this is on the RHS of a paste operator, we've already copied the
// paste operator to the ResultToks list, unless the LHS was empty too.
// Remove it.
@@ -548,6 +591,8 @@ void TokenLexer::ExpandFunctionArguments() {
if (!VCtx.isInVAOpt() ||
ResultToks.size() > VCtx.getNumberOfTokensPriorToVAOpt())
ResultToks.pop_back();
+ else
+ VCtx.hasPlaceholderAfterHashhashAtStart();
}
// If this is the __VA_ARGS__ token, and if the argument wasn't provided,
@@ -606,6 +651,8 @@ bool TokenLexer::Lex(Token &Tok) {
// Get the next token to return.
Tok = Tokens[CurTokenIdx++];
+ if (IsReinject)
+ Tok.setFlag(Token::IsReinjected);
bool TokenIsFromPaste = false;
diff --git a/lib/Lex/UnicodeCharSets.h b/lib/Lex/UnicodeCharSets.h
index 116d553d2040..d56bc8ef6721 100644
--- a/lib/Lex/UnicodeCharSets.h
+++ b/lib/Lex/UnicodeCharSets.h
@@ -1,9 +1,8 @@
//===--- UnicodeCharSets.h - Contains important sets of characters --------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_LEX_UNICODECHARSETS_H
diff --git a/lib/Parse/ParseAST.cpp b/lib/Parse/ParseAST.cpp
index f7703b1bfd8a..3efd893e499c 100644
--- a/lib/Parse/ParseAST.cpp
+++ b/lib/Parse/ParseAST.cpp
@@ -1,9 +1,8 @@
//===--- ParseAST.cpp - Provide the clang::ParseAST method ----------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -23,6 +22,7 @@
#include "clang/Sema/SemaConsumer.h"
#include "clang/Sema/TemplateInstCallback.h"
#include "llvm/Support/CrashRecoveryContext.h"
+#include "llvm/Support/TimeProfiler.h"
#include <cstdio>
#include <memory>
@@ -151,6 +151,7 @@ void clang::ParseAST(Sema &S, bool PrintStats, bool SkipFunctionBodies) {
bool HaveLexer = S.getPreprocessor().getCurrentLexer();
if (HaveLexer) {
+ llvm::TimeTraceScope TimeScope("Frontend", StringRef(""));
P.Initialize();
Parser::DeclGroupPtrTy ADecl;
for (bool AtEOF = P.ParseFirstTopLevelDecl(ADecl); !AtEOF;
diff --git a/lib/Parse/ParseCXXInlineMethods.cpp b/lib/Parse/ParseCXXInlineMethods.cpp
index fde3ce00f830..a1abf8269c45 100644
--- a/lib/Parse/ParseCXXInlineMethods.cpp
+++ b/lib/Parse/ParseCXXInlineMethods.cpp
@@ -1,9 +1,8 @@
//===--- ParseCXXInlineMethods.cpp - C++ class inline methods parsing------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -110,7 +109,7 @@ NamedDecl *Parser::ParseCXXInlineMethodDef(
// the tokens and store them for parsing at the end of the translation unit.
if (getLangOpts().DelayedTemplateParsing &&
D.getFunctionDefinitionKind() == FDK_Definition &&
- !D.getDeclSpec().isConstexprSpecified() &&
+ !D.getDeclSpec().hasConstexprSpecifier() &&
!(FnD && FnD->getAsFunction() &&
FnD->getAsFunction()->getReturnType()->getContainedAutoType()) &&
((Actions.CurContext->isDependentContext() ||
@@ -324,7 +323,7 @@ void Parser::ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM) {
// Parse the default argument from its saved token stream.
Toks->push_back(Tok); // So that the current token doesn't get lost
- PP.EnterTokenStream(*Toks, true);
+ PP.EnterTokenStream(*Toks, true, /*IsReinject*/ true);
// Consume the previously-pushed token.
ConsumeAnyToken();
@@ -397,7 +396,7 @@ void Parser::ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM) {
// Parse the default argument from its saved token stream.
Toks->push_back(Tok); // So that the current token doesn't get lost
- PP.EnterTokenStream(*Toks, true);
+ PP.EnterTokenStream(*Toks, true, /*IsReinject*/true);
// Consume the previously-pushed token.
ConsumeAnyToken();
@@ -416,7 +415,7 @@ void Parser::ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM) {
Method = cast<CXXMethodDecl>(LM.Method);
Sema::CXXThisScopeRAII ThisScope(Actions, Method->getParent(),
- Method->getTypeQualifiers(),
+ Method->getMethodQualifiers(),
getLangOpts().CPlusPlus11);
// Parse the exception-specification.
@@ -504,7 +503,7 @@ void Parser::ParseLexedMethodDef(LexedMethod &LM) {
// Append the current token at the end of the new token stream so that it
// doesn't get lost.
LM.Toks.push_back(Tok);
- PP.EnterTokenStream(LM.Toks, true);
+ PP.EnterTokenStream(LM.Toks, true, /*IsReinject*/true);
// Consume the previously pushed token.
ConsumeAnyToken(/*ConsumeCodeCompletionTok=*/true);
@@ -618,7 +617,7 @@ void Parser::ParseLexedMemberInitializer(LateParsedMemberInitializer &MI) {
// Append the current token at the end of the new token stream so that it
// doesn't get lost.
MI.Toks.push_back(Tok);
- PP.EnterTokenStream(MI.Toks, true);
+ PP.EnterTokenStream(MI.Toks, true, /*IsReinject*/true);
// Consume the previously pushed token.
ConsumeAnyToken(/*ConsumeCodeCompletionTok=*/true);
@@ -990,7 +989,8 @@ public:
auto Buffer = llvm::make_unique<Token[]>(Toks.size());
std::copy(Toks.begin() + 1, Toks.end(), Buffer.get());
Buffer[Toks.size() - 1] = Self.Tok;
- Self.PP.EnterTokenStream(std::move(Buffer), Toks.size(), true);
+ Self.PP.EnterTokenStream(std::move(Buffer), Toks.size(), true,
+ /*IsReinject*/ true);
Self.Tok = Toks.front();
}
@@ -1058,7 +1058,7 @@ bool Parser::ConsumeAndStoreInitializer(CachedTokens &Toks,
case CIK_DefaultArgument:
bool InvalidAsDeclaration = false;
Result = TryParseParameterDeclarationClause(
- &InvalidAsDeclaration, /*VersusTemplateArgument=*/true);
+ &InvalidAsDeclaration, /*VersusTemplateArg=*/true);
// If this is an expression or a declaration with a missing
// 'typename', assume it's not a declaration.
if (Result == TPResult::Ambiguous && InvalidAsDeclaration)
diff --git a/lib/Parse/ParseDecl.cpp b/lib/Parse/ParseDecl.cpp
index 298a2bad56c6..73b4f50fda46 100644
--- a/lib/Parse/ParseDecl.cpp
+++ b/lib/Parse/ParseDecl.cpp
@@ -1,9 +1,8 @@
//===--- ParseDecl.cpp - Declaration Parsing --------------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -86,6 +85,23 @@ static bool isAttributeLateParsed(const IdentifierInfo &II) {
#undef CLANG_ATTR_LATE_PARSED_LIST
}
+/// Check if the a start and end source location expand to the same macro.
+bool FindLocsWithCommonFileID(Preprocessor &PP, SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ if (!StartLoc.isMacroID() || !EndLoc.isMacroID())
+ return false;
+
+ SourceManager &SM = PP.getSourceManager();
+ if (SM.getFileID(StartLoc) != SM.getFileID(EndLoc))
+ return false;
+
+ bool AttrStartIsInMacro =
+ Lexer::isAtStartOfMacroExpansion(StartLoc, SM, PP.getLangOpts());
+ bool AttrEndIsInMacro =
+ Lexer::isAtEndOfMacroExpansion(EndLoc, SM, PP.getLangOpts());
+ return AttrStartIsInMacro && AttrEndIsInMacro;
+}
+
/// ParseGNUAttributes - Parse a non-empty attributes list.
///
/// [GNU] attributes:
@@ -134,7 +150,10 @@ void Parser::ParseGNUAttributes(ParsedAttributes &attrs,
assert(Tok.is(tok::kw___attribute) && "Not a GNU attribute list!");
while (Tok.is(tok::kw___attribute)) {
- ConsumeToken();
+ SourceLocation AttrTokLoc = ConsumeToken();
+ unsigned OldNumAttrs = attrs.size();
+ unsigned OldNumLateAttrs = LateAttrs ? LateAttrs->size() : 0;
+
if (ExpectAndConsume(tok::l_paren, diag::err_expected_lparen_after,
"attribute")) {
SkipUntil(tok::r_paren, StopAtSemi); // skip until ) or ;
@@ -145,10 +164,10 @@ void Parser::ParseGNUAttributes(ParsedAttributes &attrs,
return;
}
// Parse the attribute-list. e.g. __attribute__(( weak, alias("__f") ))
- while (true) {
- // Allow empty/non-empty attributes. ((__vector_size__(16),,,,))
- if (TryConsumeToken(tok::comma))
- continue;
+ do {
+ // Eat preceeding commas to allow __attribute__((,,,foo))
+ while (TryConsumeToken(tok::comma))
+ ;
// Expect an identifier or declaration specifier (const, int, etc.)
if (Tok.isAnnotation())
@@ -193,7 +212,7 @@ void Parser::ParseGNUAttributes(ParsedAttributes &attrs,
Eof.startToken();
Eof.setLocation(Tok.getLocation());
LA->Toks.push_back(Eof);
- }
+ } while (Tok.is(tok::comma));
if (ExpectAndConsume(tok::r_paren))
SkipUntil(tok::r_paren, StopAtSemi);
@@ -202,6 +221,25 @@ void Parser::ParseGNUAttributes(ParsedAttributes &attrs,
SkipUntil(tok::r_paren, StopAtSemi);
if (endLoc)
*endLoc = Loc;
+
+ // If this was declared in a macro, attach the macro IdentifierInfo to the
+ // parsed attribute.
+ auto &SM = PP.getSourceManager();
+ if (!SM.isWrittenInBuiltinFile(SM.getSpellingLoc(AttrTokLoc)) &&
+ FindLocsWithCommonFileID(PP, AttrTokLoc, Loc)) {
+ CharSourceRange ExpansionRange = SM.getExpansionRange(AttrTokLoc);
+ StringRef FoundName =
+ Lexer::getSourceText(ExpansionRange, SM, PP.getLangOpts());
+ IdentifierInfo *MacroII = PP.getIdentifierInfo(FoundName);
+
+ for (unsigned i = OldNumAttrs; i < attrs.size(); ++i)
+ attrs[i].setMacroIdentifier(MacroII, ExpansionRange.getBegin());
+
+ if (LateAttrs) {
+ for (unsigned i = OldNumLateAttrs; i < LateAttrs->size(); ++i)
+ (*LateAttrs)[i]->MacroII = MacroII;
+ }
+ }
}
}
@@ -223,6 +261,15 @@ static bool attributeHasVariadicIdentifierArg(const IdentifierInfo &II) {
#undef CLANG_ATTR_VARIADIC_IDENTIFIER_ARG_LIST
}
+/// Determine whether the given attribute treats kw_this as an identifier.
+static bool attributeTreatsKeywordThisAsIdentifier(const IdentifierInfo &II) {
+#define CLANG_ATTR_THIS_ISA_IDENTIFIER_ARG_LIST
+ return llvm::StringSwitch<bool>(normalizeAttrName(II.getName()))
+#include "clang/Parse/AttrParserStringSwitches.inc"
+ .Default(false);
+#undef CLANG_ATTR_THIS_ISA_IDENTIFIER_ARG_LIST
+}
+
/// Determine whether the given attribute parses a type argument.
static bool attributeIsTypeArgAttr(const IdentifierInfo &II) {
#define CLANG_ATTR_TYPE_ARG_LIST
@@ -287,6 +334,12 @@ unsigned Parser::ParseAttributeArgsCommon(
// Ignore the left paren location for now.
ConsumeParen();
+ bool ChangeKWThisToIdent = attributeTreatsKeywordThisAsIdentifier(*AttrName);
+
+ // Interpret "kw_this" as an identifier if the attributed requests it.
+ if (ChangeKWThisToIdent && Tok.is(tok::kw_this))
+ Tok.setKind(tok::identifier);
+
ArgsVector ArgExprs;
if (Tok.is(tok::identifier)) {
// If this attribute wants an 'identifier' argument, make it so.
@@ -314,6 +367,10 @@ unsigned Parser::ParseAttributeArgsCommon(
// Parse the non-empty comma-separated list of expressions.
do {
+ // Interpret "kw_this" as an identifier if the attributed requests it.
+ if (ChangeKWThisToIdent && Tok.is(tok::kw_this))
+ Tok.setKind(tok::identifier);
+
ExprResult ArgExpr;
if (Tok.is(tok::identifier) &&
attributeHasVariadicIdentifierArg(*AttrName)) {
@@ -1413,7 +1470,7 @@ void Parser::ParseLexedAttribute(LateParsedAttribute &LA,
// Append the current token at the end of the new token stream so that it
// doesn't get lost.
LA.Toks.push_back(Tok);
- PP.EnterTokenStream(LA.Toks, true);
+ PP.EnterTokenStream(LA.Toks, true, /*IsReinject=*/true);
// Consume the previously pushed token.
ConsumeAnyToken(/*ConsumeCodeCompletionTok=*/true);
@@ -1716,7 +1773,8 @@ Parser::DeclGroupPtrTy Parser::ParseDeclaration(DeclaratorContext Context,
/// [C++11] attribute-specifier-seq decl-specifier-seq[opt]
/// init-declarator-list ';'
///[C90/C++]init-declarator-list ';' [TODO]
-/// [OMP] threadprivate-directive [TODO]
+/// [OMP] threadprivate-directive
+/// [OMP] allocate-directive [TODO]
///
/// for-range-declaration: [C++11 6.5p1: stmt.ranged]
/// attribute-specifier-seq[opt] type-specifier-seq declarator
@@ -2275,7 +2333,8 @@ Decl *Parser::ParseDeclarationAfterDeclaratorAndAttributes(
return nullptr;
}
- ExprResult Init(ParseInitializer());
+ PreferredType.enterVariableInit(Tok.getLocation(), ThisDecl);
+ ExprResult Init = ParseInitializer();
// If this is the only decl in (possibly) range based for statement,
// our best guess is that the user meant ':' instead of '='.
@@ -2313,25 +2372,27 @@ Decl *Parser::ParseDeclarationAfterDeclaratorAndAttributes(
InitializerScopeRAII InitScope(*this, D, ThisDecl);
- llvm::function_ref<void()> ExprListCompleter;
auto ThisVarDecl = dyn_cast_or_null<VarDecl>(ThisDecl);
- auto ConstructorCompleter = [&, ThisVarDecl] {
+ auto RunSignatureHelp = [&]() {
QualType PreferredType = Actions.ProduceConstructorSignatureHelp(
getCurScope(), ThisVarDecl->getType()->getCanonicalTypeInternal(),
ThisDecl->getLocation(), Exprs, T.getOpenLocation());
CalledSignatureHelp = true;
- Actions.CodeCompleteExpression(getCurScope(), PreferredType);
+ return PreferredType;
+ };
+ auto SetPreferredType = [&] {
+ PreferredType.enterFunctionArgument(Tok.getLocation(), RunSignatureHelp);
};
+
+ llvm::function_ref<void()> ExpressionStarts;
if (ThisVarDecl) {
// ParseExpressionList can sometimes succeed even when ThisDecl is not
// VarDecl. This is an error and it is reported in a call to
// Actions.ActOnInitializerError(). However, we call
- // ProduceConstructorSignatureHelp only on VarDecls, falling back to
- // default completer in other cases.
- ExprListCompleter = ConstructorCompleter;
+ // ProduceConstructorSignatureHelp only on VarDecls.
+ ExpressionStarts = SetPreferredType;
}
-
- if (ParseExpressionList(Exprs, CommaLocs, ExprListCompleter)) {
+ if (ParseExpressionList(Exprs, CommaLocs, ExpressionStarts)) {
if (ThisVarDecl && PP.isCodeCompletionReached() && !CalledSignatureHelp) {
Actions.ProduceConstructorSignatureHelp(
getCurScope(), ThisVarDecl->getType()->getCanonicalTypeInternal(),
@@ -2420,14 +2481,15 @@ void Parser::ParseSpecifierQualifierList(DeclSpec &DS, AccessSpecifier AS,
Diag(DS.getInlineSpecLoc(), diag::err_typename_invalid_functionspec);
if (DS.isVirtualSpecified())
Diag(DS.getVirtualSpecLoc(), diag::err_typename_invalid_functionspec);
- if (DS.isExplicitSpecified())
+ if (DS.hasExplicitSpecifier())
Diag(DS.getExplicitSpecLoc(), diag::err_typename_invalid_functionspec);
DS.ClearFunctionSpecs();
}
- // Issue diagnostic and remove constexpr specfier if present.
- if (DS.isConstexprSpecified() && DSC != DeclSpecContext::DSC_condition) {
- Diag(DS.getConstexprSpecLoc(), diag::err_typename_invalid_constexpr);
+ // Issue diagnostic and remove constexpr specifier if present.
+ if (DS.hasConstexprSpecifier() && DSC != DeclSpecContext::DSC_condition) {
+ Diag(DS.getConstexprSpecLoc(), diag::err_typename_invalid_constexpr)
+ << (DS.getConstexprSpecifier() == CSK_consteval);
DS.ClearConstexprSpec();
}
}
@@ -2499,6 +2561,11 @@ bool Parser::ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
return false;
}
+ // Early exit as Sema has a dedicated missing_actual_pipe_type diagnostic
+ // for incomplete declarations such as `pipe p`.
+ if (getLangOpts().OpenCLCPlusPlus && DS.isTypeSpecPipe())
+ return false;
+
if (getLangOpts().CPlusPlus &&
DS.getStorageClassSpec() == DeclSpec::SCS_auto) {
// Don't require a type specifier if we have the 'auto' storage class
@@ -2627,6 +2694,9 @@ bool Parser::ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
case tok::semi:
// This looks like a variable or function declaration. The type is
// probably missing. We're done parsing decl-specifiers.
+ // But only if we are not in a function prototype scope.
+ if (getCurScope()->isFunctionPrototypeScope())
+ break;
if (SS)
AnnotateScopeToken(*SS, /*IsNewAnnotation*/false);
return false;
@@ -2680,7 +2750,7 @@ bool Parser::ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
// TODO: Could inject an invalid typedef decl in an enclosing scope to
// avoid rippling error messages on subsequent uses of the same type,
// could be useful if #include was forgotten.
- return false;
+ return true;
}
/// Determine the declaration specifier context from the declarator
@@ -2832,7 +2902,7 @@ Parser::DiagnoseMissingSemiAfterTagDefinition(DeclSpec &DS, AccessSpecifier AS,
IdentifierInfo *Name = AfterScope.getIdentifierInfo();
Sema::NameClassification Classification = Actions.ClassifyName(
getCurScope(), SS, Name, AfterScope.getLocation(), Next,
- /*IsAddressOfOperand*/false);
+ /*IsAddressOfOperand=*/false, /*CCC=*/nullptr);
switch (Classification.getKind()) {
case Sema::NC_Error:
SkipMalformedDecl();
@@ -2853,6 +2923,7 @@ Parser::DiagnoseMissingSemiAfterTagDefinition(DeclSpec &DS, AccessSpecifier AS,
case Sema::NC_Expression:
case Sema::NC_VarTemplate:
case Sema::NC_FunctionTemplate:
+ case Sema::NC_UndeclaredTemplate:
// Might be a redeclaration of a prior entity.
break;
}
@@ -2943,6 +3014,10 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
const char *PrevSpec = nullptr;
unsigned DiagID = 0;
+ // This value needs to be set to the location of the last token if the last
+ // token of the specifier is already consumed.
+ SourceLocation ConsumedEnd;
+
// HACK: MSVC doesn't consider _Atomic to be a keyword and its STL
// implementation for VS2013 uses _Atomic as an identifier for one of the
// classes in <atomic>.
@@ -3114,7 +3189,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
Actions.getTypeName(*Next.getIdentifierInfo(), Next.getLocation(),
getCurScope(), &SS, false, false, nullptr,
/*IsCtorOrDtorName=*/false,
- /*WantNonTrivialSourceInfo=*/true,
+ /*WantNontrivialTypeSourceInfo=*/true,
isClassTemplateDeductionContext(DSContext));
// If the referenced identifier is not a type, then this declspec is
@@ -3323,7 +3398,8 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
// type-name
case tok::annot_template_id: {
TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Tok);
- if (TemplateId->Kind != TNK_Type_template) {
+ if (TemplateId->Kind != TNK_Type_template &&
+ TemplateId->Kind != TNK_Undeclared_template) {
// This template-id does not refer to a type name, so we're
// done with the type-specifiers.
goto DoneWithDeclSpec;
@@ -3483,7 +3559,8 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
isInvalid = DS.setFunctionSpecInline(Loc, PrevSpec, DiagID);
break;
case tok::kw_virtual:
- // OpenCL C++ v1.0 s2.9: the virtual function qualifier is not supported.
+ // C++ for OpenCL does not allow virtual function qualifier, to avoid
+ // function pointers restricted in OpenCL v2.0 s6.9.a.
if (getLangOpts().OpenCLCPlusPlus) {
DiagID = diag::err_openclcxx_virtual_function;
PrevSpec = Tok.getIdentifierInfo()->getNameStart();
@@ -3493,9 +3570,33 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
isInvalid = DS.setFunctionSpecVirtual(Loc, PrevSpec, DiagID);
}
break;
- case tok::kw_explicit:
- isInvalid = DS.setFunctionSpecExplicit(Loc, PrevSpec, DiagID);
+ case tok::kw_explicit: {
+ SourceLocation ExplicitLoc = Loc;
+ SourceLocation CloseParenLoc;
+ ExplicitSpecifier ExplicitSpec(nullptr, ExplicitSpecKind::ResolvedTrue);
+ ConsumedEnd = ExplicitLoc;
+ ConsumeToken(); // kw_explicit
+ if (Tok.is(tok::l_paren)) {
+ if (getLangOpts().CPlusPlus2a) {
+ ExprResult ExplicitExpr(static_cast<Expr *>(nullptr));
+ BalancedDelimiterTracker Tracker(*this, tok::l_paren);
+ Tracker.consumeOpen();
+ ExplicitExpr = ParseConstantExpression();
+ ConsumedEnd = Tok.getLocation();
+ if (ExplicitExpr.isUsable()) {
+ CloseParenLoc = Tok.getLocation();
+ Tracker.consumeClose();
+ ExplicitSpec =
+ Actions.ActOnExplicitBoolSpecifier(ExplicitExpr.get());
+ } else
+ Tracker.skipToEnd();
+ } else
+ Diag(Tok.getLocation(), diag::warn_cxx2a_compat_explicit_bool);
+ }
+ isInvalid = DS.setFunctionSpecExplicit(ExplicitLoc, PrevSpec, DiagID,
+ ExplicitSpec, CloseParenLoc);
break;
+ }
case tok::kw__Noreturn:
if (!getLangOpts().C11)
Diag(Loc, diag::ext_c11_noreturn);
@@ -3527,7 +3628,12 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
// constexpr
case tok::kw_constexpr:
- isInvalid = DS.SetConstexprSpec(Loc, PrevSpec, DiagID);
+ isInvalid = DS.SetConstexprSpec(CSK_constexpr, Loc, PrevSpec, DiagID);
+ break;
+
+ // consteval
+ case tok::kw_consteval:
+ isInvalid = DS.SetConstexprSpec(CSK_consteval, Loc, PrevSpec, DiagID);
break;
// type-specifier
@@ -3675,7 +3781,8 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
isInvalid = DS.SetTypeAltiVecBool(true, Loc, PrevSpec, DiagID, Policy);
break;
case tok::kw_pipe:
- if (!getLangOpts().OpenCL || (getLangOpts().OpenCLVersion < 200)) {
+ if (!getLangOpts().OpenCL || (getLangOpts().OpenCLVersion < 200 &&
+ !getLangOpts().OpenCLCPlusPlus)) {
// OpenCL 2.0 defined this keyword. OpenCL 1.2 and earlier should
// support the "pipe" word as identifier.
Tok.getIdentifierInfo()->revertTokenIDToIdentifier();
@@ -3790,19 +3897,6 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
getLangOpts());
break;
- // OpenCL access qualifiers:
- case tok::kw___read_only:
- case tok::kw___write_only:
- case tok::kw___read_write:
- // OpenCL C++ 1.0 s2.2: access qualifiers are reserved keywords.
- if (Actions.getLangOpts().OpenCLCPlusPlus) {
- DiagID = diag::err_openclcxx_reserved;
- PrevSpec = Tok.getIdentifierInfo()->getNameStart();
- isInvalid = true;
- }
- ParseOpenCLQualifiers(DS.getAttributes());
- break;
-
// OpenCL address space qualifiers:
case tok::kw___generic:
// generic address space is introduced only in OpenCL v2.0
@@ -3815,10 +3909,15 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
break;
};
LLVM_FALLTHROUGH;
+ case tok::kw_private:
case tok::kw___private:
case tok::kw___global:
case tok::kw___local:
case tok::kw___constant:
+ // OpenCL access qualifiers:
+ case tok::kw___read_only:
+ case tok::kw___write_only:
+ case tok::kw___read_write:
ParseOpenCLQualifiers(DS.getAttributes());
break;
@@ -3847,25 +3946,29 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
// If a type specifier follows, it will be diagnosed elsewhere.
continue;
}
+
+ DS.SetRangeEnd(ConsumedEnd.isValid() ? ConsumedEnd : Tok.getLocation());
+
// If the specifier wasn't legal, issue a diagnostic.
if (isInvalid) {
assert(PrevSpec && "Method did not return previous specifier!");
assert(DiagID);
if (DiagID == diag::ext_duplicate_declspec ||
- DiagID == diag::ext_warn_duplicate_declspec)
- Diag(Tok, DiagID)
- << PrevSpec << FixItHint::CreateRemoval(Tok.getLocation());
+ DiagID == diag::ext_warn_duplicate_declspec ||
+ DiagID == diag::err_duplicate_declspec)
+ Diag(Loc, DiagID) << PrevSpec
+ << FixItHint::CreateRemoval(
+ SourceRange(Loc, DS.getEndLoc()));
else if (DiagID == diag::err_opencl_unknown_type_specifier) {
- Diag(Tok, DiagID) << getLangOpts().OpenCLCPlusPlus
- << getLangOpts().getOpenCLVersionTuple().getAsString()
- << PrevSpec << isStorageClass;
+ Diag(Loc, DiagID) << getLangOpts().OpenCLCPlusPlus
+ << getLangOpts().getOpenCLVersionTuple().getAsString()
+ << PrevSpec << isStorageClass;
} else
- Diag(Tok, DiagID) << PrevSpec;
+ Diag(Loc, DiagID) << PrevSpec;
}
- DS.SetRangeEnd(Tok.getLocation());
- if (DiagID != diag::err_bool_redeclaration)
+ if (DiagID != diag::err_bool_redeclaration && ConsumedEnd.isInvalid())
// After an error the next token can be an annotation token.
ConsumeAnyToken();
@@ -3876,6 +3979,9 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
/// ParseStructDeclaration - Parse a struct declaration without the terminating
/// semicolon.
///
+/// Note that a struct declaration refers to a declaration in a struct,
+/// not to the declaration of a struct.
+///
/// struct-declaration:
/// [C2x] attributes-specifier-seq[opt]
/// specifier-qualifier-list struct-declarator-list
@@ -4331,7 +4437,7 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
if (Tok.isNot(tok::semi)) {
// A semicolon was missing after this declaration. Diagnose and recover.
ExpectAndConsume(tok::semi, diag::err_expected_after, "enum");
- PP.EnterToken(Tok);
+ PP.EnterToken(Tok, /*IsReinject=*/true);
Tok.setKind(tok::semi);
}
} else {
@@ -4609,7 +4715,7 @@ void Parser::ParseEnumBody(SourceLocation StartLoc, Decl *EnumDecl) {
// Push this token back into the preprocessor and change our current token
// to ';' so that the rest of the code recovers as though there were an
// ';' after the definition.
- PP.EnterToken(Tok);
+ PP.EnterToken(Tok, /*IsReinject=*/true);
Tok.setKind(tok::semi);
}
}
@@ -4782,9 +4888,11 @@ bool Parser::isTypeSpecifierQualifier() {
case tok::kw___read_only:
case tok::kw___read_write:
case tok::kw___write_only:
-
return true;
+ case tok::kw_private:
+ return getLangOpts().OpenCL;
+
// C11 _Atomic
case tok::kw__Atomic:
return true;
@@ -4801,7 +4909,8 @@ bool Parser::isDeclarationSpecifier(bool DisambiguatingWithExpression) {
default: return false;
case tok::kw_pipe:
- return getLangOpts().OpenCL && (getLangOpts().OpenCLVersion >= 200);
+ return (getLangOpts().OpenCL && getLangOpts().OpenCLVersion >= 200) ||
+ getLangOpts().OpenCLCPlusPlus;
case tok::identifier: // foo::bar
// Unfortunate hack to support "Class.factoryMethod" notation.
@@ -4929,6 +5038,9 @@ bool Parser::isDeclarationSpecifier(bool DisambiguatingWithExpression) {
case tok::annot_decltype:
case tok::kw_constexpr:
+ // C++20 consteval.
+ case tok::kw_consteval:
+
// C11 _Atomic
case tok::kw__Atomic:
return true;
@@ -4976,6 +5088,9 @@ bool Parser::isDeclarationSpecifier(bool DisambiguatingWithExpression) {
#include "clang/Basic/OpenCLImageTypes.def"
return true;
+
+ case tok::kw_private:
+ return getLangOpts().OpenCL;
}
}
@@ -5176,6 +5291,10 @@ void Parser::ParseTypeQualifierListOpt(
break;
// OpenCL qualifiers:
+ case tok::kw_private:
+ if (!getLangOpts().OpenCL)
+ goto DoneWithTypeQuals;
+ LLVM_FALLTHROUGH;
case tok::kw___private:
case tok::kw___global:
case tok::kw___local:
@@ -5282,7 +5401,8 @@ static bool isPtrOperatorToken(tok::TokenKind Kind, const LangOptions &Lang,
if (Kind == tok::star || Kind == tok::caret)
return true;
- if ((Kind == tok::kw_pipe) && Lang.OpenCL && (Lang.OpenCLVersion >= 200))
+ if (Kind == tok::kw_pipe &&
+ ((Lang.OpenCL && Lang.OpenCLVersion >= 200) || Lang.OpenCLCPlusPlus))
return true;
if (!Lang.CPlusPlus)
@@ -6157,8 +6277,22 @@ void Parser::ParseFunctionDeclarator(Declarator &D,
Actions.CurContext->isRecord());
Qualifiers Q = Qualifiers::fromCVRUMask(DS.getTypeQualifiers());
- if (D.getDeclSpec().isConstexprSpecified() && !getLangOpts().CPlusPlus14)
+ if (D.getDeclSpec().hasConstexprSpecifier() && !getLangOpts().CPlusPlus14)
Q.addConst();
+ // FIXME: Collect C++ address spaces.
+ // If there are multiple different address spaces, the source is invalid.
+ // Carry on using the first addr space for the qualifiers of 'this'.
+ // The diagnostic will be given later while creating the function
+ // prototype for the method.
+ if (getLangOpts().OpenCLCPlusPlus) {
+ for (ParsedAttr &attr : DS.getAttributes()) {
+ LangAS ASIdx = attr.asOpenCLLangAS();
+ if (ASIdx != LangAS::Default) {
+ Q.addAddressSpace(ASIdx);
+ break;
+ }
+ }
+ }
Sema::CXXThisScopeRAII ThisScope(
Actions, dyn_cast<CXXRecordDecl>(Actions.CurContext), Q,
diff --git a/lib/Parse/ParseDeclCXX.cpp b/lib/Parse/ParseDeclCXX.cpp
index f8359f1e87d8..9c61c4da447a 100644
--- a/lib/Parse/ParseDeclCXX.cpp
+++ b/lib/Parse/ParseDeclCXX.cpp
@@ -1,9 +1,8 @@
//===--- ParseDeclCXX.cpp - C++ Declaration Parsing -------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -25,6 +24,7 @@
#include "clang/Sema/ParsedTemplate.h"
#include "clang/Sema/Scope.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/TimeProfiler.h"
using namespace clang;
@@ -437,9 +437,10 @@ Decl *Parser::ParseExportDeclaration() {
// The Modules TS draft says "An export-declaration shall declare at least one
// entity", but the intent is that it shall contain at least one declaration.
- if (Tok.is(tok::r_brace))
+ if (Tok.is(tok::r_brace) && getLangOpts().ModulesTS) {
Diag(ExportLoc, diag::err_export_empty)
<< SourceRange(ExportLoc, Tok.getLocation());
+ }
while (!tryParseMisplacedModuleImport() && Tok.isNot(tok::r_brace) &&
Tok.isNot(tok::eof)) {
@@ -473,6 +474,13 @@ Parser::ParseUsingDirectiveOrDeclaration(DeclaratorContext Context,
return nullptr;
}
+ // Consume unexpected 'template' keywords.
+ while (Tok.is(tok::kw_template)) {
+ SourceLocation TemplateLoc = ConsumeToken();
+ Diag(TemplateLoc, diag::err_unexpected_template_after_using)
+ << FixItHint::CreateRemoval(TemplateLoc);
+ }
+
// 'using namespace' means this is a using-directive.
if (Tok.is(tok::kw_namespace)) {
// Template parameters are always an error here.
@@ -589,10 +597,11 @@ bool Parser::ParseUsingDeclarator(DeclaratorContext Context,
// Parse nested-name-specifier.
IdentifierInfo *LastII = nullptr;
- ParseOptionalCXXScopeSpecifier(D.SS, nullptr, /*EnteringContext=*/false,
- /*MayBePseudoDtor=*/nullptr,
- /*IsTypename=*/false,
- /*LastII=*/&LastII);
+ if (ParseOptionalCXXScopeSpecifier(D.SS, nullptr, /*EnteringContext=*/false,
+ /*MayBePseudoDtor=*/nullptr,
+ /*IsTypename=*/false,
+ /*LastII=*/&LastII))
+ return true;
if (D.SS.isInvalid())
return true;
@@ -1031,7 +1040,7 @@ void Parser::AnnotateExistingDecltypeSpecifier(const DeclSpec& DS,
if (PP.isBacktrackEnabled())
PP.RevertCachedTokens(1);
else
- PP.EnterToken(Tok);
+ PP.EnterToken(Tok, /*IsReinject*/true);
Tok.setKind(tok::annot_decltype);
setExprAnnotation(Tok,
@@ -1103,7 +1112,8 @@ TypeResult Parser::ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
// Parse optional nested-name-specifier
CXXScopeSpec SS;
- ParseOptionalCXXScopeSpecifier(SS, nullptr, /*EnteringContext=*/false);
+ if (ParseOptionalCXXScopeSpecifier(SS, nullptr, /*EnteringContext=*/false))
+ return true;
BaseLoc = Tok.getLocation();
@@ -1127,7 +1137,8 @@ TypeResult Parser::ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
if (Tok.is(tok::annot_template_id)) {
TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Tok);
if (TemplateId->Kind == TNK_Type_template ||
- TemplateId->Kind == TNK_Dependent_template_name) {
+ TemplateId->Kind == TNK_Dependent_template_name ||
+ TemplateId->Kind == TNK_Undeclared_template) {
AnnotateTemplateIdTokenAsType(/*IsClassName*/true);
assert(Tok.is(tok::annot_typename) && "template-id -> type failed");
@@ -1197,9 +1208,9 @@ TypeResult Parser::ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
// We have an identifier; check whether it is actually a type.
IdentifierInfo *CorrectedII = nullptr;
ParsedType Type = Actions.getTypeName(
- *Id, IdLoc, getCurScope(), &SS, /*IsClassName=*/true, false, nullptr,
+ *Id, IdLoc, getCurScope(), &SS, /*isClassName=*/true, false, nullptr,
/*IsCtorOrDtorName=*/false,
- /*NonTrivialTypeSourceInfo=*/true,
+ /*WantNontrivialTypeSourceInfo=*/true,
/*IsClassTemplateDeductionContext*/ false, &CorrectedII);
if (!Type) {
Diag(IdLoc, diag::err_expected_class_name);
@@ -1248,9 +1259,11 @@ bool Parser::isValidAfterTypeSpecifier(bool CouldBeBitfield) {
case tok::ampamp: // struct foo {...} && R = ...
case tok::identifier: // struct foo {...} V ;
case tok::r_paren: //(struct foo {...} ) {4}
+ case tok::coloncolon: // struct foo {...} :: a::b;
case tok::annot_cxxscope: // struct foo {...} a:: b;
case tok::annot_typename: // struct foo {...} a ::b;
case tok::annot_template_id: // struct foo {...} a<int> ::b;
+ case tok::kw_decltype: // struct foo {...} decltype (a)::b;
case tok::l_paren: // struct foo {...} ( x);
case tok::comma: // __builtin_offsetof(struct foo{...} ,
case tok::kw_operator: // struct foo operator ++() {...}
@@ -1544,6 +1557,36 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
TemplateParameterLists *TemplateParams = TemplateInfo.TemplateParams;
+ auto RecoverFromUndeclaredTemplateName = [&](IdentifierInfo *Name,
+ SourceLocation NameLoc,
+ SourceRange TemplateArgRange,
+ bool KnownUndeclared) {
+ Diag(NameLoc, diag::err_explicit_spec_non_template)
+ << (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation)
+ << TagTokKind << Name << TemplateArgRange << KnownUndeclared;
+
+ // Strip off the last template parameter list if it was empty, since
+ // we've removed its template argument list.
+ if (TemplateParams && TemplateInfo.LastParameterListWasEmpty) {
+ if (TemplateParams->size() > 1) {
+ TemplateParams->pop_back();
+ } else {
+ TemplateParams = nullptr;
+ const_cast<ParsedTemplateInfo &>(TemplateInfo).Kind =
+ ParsedTemplateInfo::NonTemplate;
+ }
+ } else if (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation) {
+ // Pretend this is just a forward declaration.
+ TemplateParams = nullptr;
+ const_cast<ParsedTemplateInfo &>(TemplateInfo).Kind =
+ ParsedTemplateInfo::NonTemplate;
+ const_cast<ParsedTemplateInfo &>(TemplateInfo).TemplateLoc =
+ SourceLocation();
+ const_cast<ParsedTemplateInfo &>(TemplateInfo).ExternLoc =
+ SourceLocation();
+ }
+ };
+
// Parse the (optional) class name or simple-template-id.
IdentifierInfo *Name = nullptr;
SourceLocation NameLoc;
@@ -1564,38 +1607,26 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// try to give any location information for the list.
LAngleLoc = RAngleLoc = SourceLocation();
}
-
- Diag(NameLoc, diag::err_explicit_spec_non_template)
- << (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation)
- << TagTokKind << Name << SourceRange(LAngleLoc, RAngleLoc);
-
- // Strip off the last template parameter list if it was empty, since
- // we've removed its template argument list.
- if (TemplateParams && TemplateInfo.LastParameterListWasEmpty) {
- if (TemplateParams->size() > 1) {
- TemplateParams->pop_back();
- } else {
- TemplateParams = nullptr;
- const_cast<ParsedTemplateInfo&>(TemplateInfo).Kind
- = ParsedTemplateInfo::NonTemplate;
- }
- } else if (TemplateInfo.Kind
- == ParsedTemplateInfo::ExplicitInstantiation) {
- // Pretend this is just a forward declaration.
- TemplateParams = nullptr;
- const_cast<ParsedTemplateInfo&>(TemplateInfo).Kind
- = ParsedTemplateInfo::NonTemplate;
- const_cast<ParsedTemplateInfo&>(TemplateInfo).TemplateLoc
- = SourceLocation();
- const_cast<ParsedTemplateInfo&>(TemplateInfo).ExternLoc
- = SourceLocation();
- }
+ RecoverFromUndeclaredTemplateName(
+ Name, NameLoc, SourceRange(LAngleLoc, RAngleLoc), false);
}
} else if (Tok.is(tok::annot_template_id)) {
TemplateId = takeTemplateIdAnnotation(Tok);
NameLoc = ConsumeAnnotationToken();
- if (TemplateId->Kind != TNK_Type_template &&
+ if (TemplateId->Kind == TNK_Undeclared_template) {
+ // Try to resolve the template name to a type template.
+ Actions.ActOnUndeclaredTypeTemplateName(getCurScope(), TemplateId->Template,
+ TemplateId->Kind, NameLoc, Name);
+ if (TemplateId->Kind == TNK_Undeclared_template) {
+ RecoverFromUndeclaredTemplateName(
+ Name, NameLoc,
+ SourceRange(TemplateId->LAngleLoc, TemplateId->RAngleLoc), true);
+ TemplateId = nullptr;
+ }
+ }
+
+ if (TemplateId && TemplateId->Kind != TNK_Type_template &&
TemplateId->Kind != TNK_Dependent_template_name) {
// The template-name in the simple-template-id refers to
// something other than a class template. Give an appropriate
@@ -1606,7 +1637,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// FIXME: Name may be null here.
Diag(TemplateId->LAngleLoc, diag::err_template_spec_syntax_non_template)
- << TemplateId->Name << static_cast<int>(TemplateId->Kind) << Range;
+ << TemplateId->Name << static_cast<int>(TemplateId->Kind) << Range;
DS.SetTypeSpecError();
SkipUntil(tok::semi, StopBeforeMatch);
@@ -1705,7 +1736,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// A semicolon was missing after this declaration. Diagnose and recover.
ExpectAndConsume(tok::semi, diag::err_expected_after,
DeclSpec::getSpecifierName(TagType, PPol));
- PP.EnterToken(Tok);
+ PP.EnterToken(Tok, /*IsReinject*/true);
Tok.setKind(tok::semi);
}
} else
@@ -1982,7 +2013,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// Push this token back into the preprocessor and change our current token
// to ';' so that the rest of the code recovers as though there were an
// ';' after the definition.
- PP.EnterToken(Tok);
+ PP.EnterToken(Tok, /*IsReinject=*/true);
Tok.setKind(tok::semi);
}
}
@@ -2539,6 +2570,13 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
// Eat 'using'.
SourceLocation UsingLoc = ConsumeToken();
+ // Consume unexpected 'template' keywords.
+ while (Tok.is(tok::kw_template)) {
+ SourceLocation TemplateLoc = ConsumeToken();
+ Diag(TemplateLoc, diag::err_unexpected_template_after_using)
+ << FixItHint::CreateRemoval(TemplateLoc);
+ }
+
if (Tok.is(tok::kw_namespace)) {
Diag(UsingLoc, diag::err_using_namespace_in_class);
SkipUntil(tok::semi, StopBeforeMatch);
@@ -3048,9 +3086,14 @@ Parser::DeclGroupPtrTy Parser::ParseCXXClassMemberDeclarationWithPragmas(
DiagnoseUnexpectedNamespace(cast<NamedDecl>(TagDecl));
return nullptr;
+ case tok::kw_private:
+ // FIXME: We don't accept GNU attributes on access specifiers in OpenCL mode
+ // yet.
+ if (getLangOpts().OpenCL && !NextToken().is(tok::colon))
+ return ParseCXXClassMemberDeclaration(AS, AccessAttrs);
+ LLVM_FALLTHROUGH;
case tok::kw_public:
- case tok::kw_protected:
- case tok::kw_private: {
+ case tok::kw_protected: {
AccessSpecifier NewAS = getAccessSpecifierIfPresent();
assert(NewAS != AS_none);
// Current token is a C++ access specifier.
@@ -3110,6 +3153,12 @@ void Parser::ParseCXXMemberSpecification(SourceLocation RecordLoc,
TagType == DeclSpec::TST_union ||
TagType == DeclSpec::TST_class) && "Invalid TagType!");
+ llvm::TimeTraceScope TimeScope("ParseClass", [&]() {
+ if (auto *TD = dyn_cast_or_null<NamedDecl>(TagDecl))
+ return TD->getQualifiedNameAsString();
+ return std::string("<anonymous>");
+ });
+
PrettyDeclStackTraceEntry CrashInfo(Actions.Context, TagDecl, RecordLoc,
"parsing struct/union/class body");
@@ -3231,7 +3280,7 @@ void Parser::ParseCXXMemberSpecification(SourceLocation RecordLoc,
if (SuggestFixIt) {
LBraceDiag << FixItHint::CreateInsertion(BraceLoc, " {");
// Try recovering from missing { after base-clause.
- PP.EnterToken(Tok);
+ PP.EnterToken(Tok, /*IsReinject*/true);
Tok.setKind(tok::l_brace);
} else {
if (TagDecl)
@@ -3327,12 +3376,12 @@ void Parser::DiagnoseUnexpectedNamespace(NamedDecl *D) {
diag::note_missing_end_of_definition_before) << D;
// Push '};' onto the token stream to recover.
- PP.EnterToken(Tok);
+ PP.EnterToken(Tok, /*IsReinject*/ true);
Tok.startToken();
Tok.setLocation(PP.getLocForEndOfToken(PrevTokLocation));
Tok.setKind(tok::semi);
- PP.EnterToken(Tok);
+ PP.EnterToken(Tok, /*IsReinject*/ true);
Tok.setKind(tok::r_brace);
}
@@ -3423,7 +3472,8 @@ void Parser::ParseConstructorInitializer(Decl *ConstructorDecl) {
MemInitResult Parser::ParseMemInitializer(Decl *ConstructorDecl) {
// parse '::'[opt] nested-name-specifier[opt]
CXXScopeSpec SS;
- ParseOptionalCXXScopeSpecifier(SS, nullptr, /*EnteringContext=*/false);
+ if (ParseOptionalCXXScopeSpecifier(SS, nullptr, /*EnteringContext=*/false))
+ return true;
// : identifier
IdentifierInfo *II = nullptr;
@@ -3449,11 +3499,14 @@ MemInitResult Parser::ParseMemInitializer(Decl *ConstructorDecl) {
? takeTemplateIdAnnotation(Tok)
: nullptr;
if (TemplateId && (TemplateId->Kind == TNK_Type_template ||
- TemplateId->Kind == TNK_Dependent_template_name)) {
+ TemplateId->Kind == TNK_Dependent_template_name ||
+ TemplateId->Kind == TNK_Undeclared_template)) {
AnnotateTemplateIdTokenAsType(/*IsClassName*/true);
assert(Tok.is(tok::annot_typename) && "template-id -> type failed");
TemplateTypeTy = getTypeAnnotation(Tok);
ConsumeAnnotationToken();
+ if (!TemplateTypeTy)
+ return true;
} else {
Diag(Tok, diag::err_expected_member_or_base_name);
return true;
@@ -3482,20 +3535,20 @@ MemInitResult Parser::ParseMemInitializer(Decl *ConstructorDecl) {
// Parse the optional expression-list.
ExprVector ArgExprs;
CommaLocsTy CommaLocs;
+ auto RunSignatureHelp = [&] {
+ QualType PreferredType = Actions.ProduceCtorInitMemberSignatureHelp(
+ getCurScope(), ConstructorDecl, SS, TemplateTypeTy, ArgExprs, II,
+ T.getOpenLocation());
+ CalledSignatureHelp = true;
+ return PreferredType;
+ };
if (Tok.isNot(tok::r_paren) &&
ParseExpressionList(ArgExprs, CommaLocs, [&] {
- QualType PreferredType = Actions.ProduceCtorInitMemberSignatureHelp(
- getCurScope(), ConstructorDecl, SS, TemplateTypeTy, ArgExprs, II,
- T.getOpenLocation());
- CalledSignatureHelp = true;
- Actions.CodeCompleteExpression(getCurScope(), PreferredType);
+ PreferredType.enterFunctionArgument(Tok.getLocation(),
+ RunSignatureHelp);
})) {
- if (PP.isCodeCompletionReached() && !CalledSignatureHelp) {
- Actions.ProduceCtorInitMemberSignatureHelp(
- getCurScope(), ConstructorDecl, SS, TemplateTypeTy, ArgExprs, II,
- T.getOpenLocation());
- CalledSignatureHelp = true;
- }
+ if (PP.isCodeCompletionReached() && !CalledSignatureHelp)
+ RunSignatureHelp();
SkipUntil(tok::r_paren, StopAtSemi);
return true;
}
@@ -3859,6 +3912,7 @@ static bool IsBuiltInOrStandardCXX11Attribute(IdentifierInfo *AttrName,
case ParsedAttr::AT_Deprecated:
case ParsedAttr::AT_FallThrough:
case ParsedAttr::AT_CXX11NoReturn:
+ case ParsedAttr::AT_NoUniqueAddress:
return true;
case ParsedAttr::AT_WarnUnusedResult:
return !ScopeName && AttrName->getName().equals("nodiscard");
diff --git a/lib/Parse/ParseExpr.cpp b/lib/Parse/ParseExpr.cpp
index 4bcbebcbb48e..7a0c07bd3b04 100644
--- a/lib/Parse/ParseExpr.cpp
+++ b/lib/Parse/ParseExpr.cpp
@@ -1,9 +1,8 @@
//===--- ParseExpr.cpp - Expression Parsing -------------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
@@ -159,7 +158,8 @@ Parser::ParseExpressionWithLeadingExtension(SourceLocation ExtLoc) {
/// Parse an expr that doesn't include (top-level) commas.
ExprResult Parser::ParseAssignmentExpression(TypeCastState isTypeCast) {
if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteOrdinaryName(getCurScope(), Sema::PCC_Expression);
+ Actions.CodeCompleteExpression(getCurScope(),
+ PreferredType.get(Tok.getLocation()));
cutOffParsing();
return ExprError();
}
@@ -272,7 +272,10 @@ Parser::ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec) {
getLangOpts().CPlusPlus11);
SourceLocation ColonLoc;
+ auto SavedType = PreferredType;
while (1) {
+ // Every iteration may rely on a preferred type for the whole expression.
+ PreferredType = SavedType;
// If this token has a lower precedence than we are allowed to parse (e.g.
// because we are called recursively, or because the token is not a binop),
// then we are done!
@@ -300,7 +303,7 @@ Parser::ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec) {
// We can't do this before consuming the comma, because
// isNotExpressionStart() looks at the token stream.
if (OpToken.is(tok::comma) && isNotExpressionStart()) {
- PP.EnterToken(Tok);
+ PP.EnterToken(Tok, /*IsReinject*/true);
Tok = OpToken;
return LHS;
}
@@ -310,7 +313,7 @@ Parser::ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec) {
if (isFoldOperator(NextTokPrec) && Tok.is(tok::ellipsis)) {
// FIXME: We can't check this via lookahead before we consume the token
// because that tickles a lexer bug.
- PP.EnterToken(Tok);
+ PP.EnterToken(Tok, /*IsReinject*/true);
Tok = OpToken;
return LHS;
}
@@ -323,7 +326,7 @@ Parser::ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec) {
if (getLangOpts().ObjC && getLangOpts().CPlusPlus &&
Tok.isOneOf(tok::colon, tok::r_square) &&
OpToken.getIdentifierInfo() != nullptr) {
- PP.EnterToken(Tok);
+ PP.EnterToken(Tok, /*IsReinject*/true);
Tok = OpToken;
return LHS;
}
@@ -393,15 +396,8 @@ Parser::ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec) {
}
}
- // Code completion for the right-hand side of a binary expression goes
- // through a special hook that takes the left-hand side into account.
- if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteBinaryRHS(getCurScope(), LHS.get(),
- OpToken.getKind());
- cutOffParsing();
- return ExprError();
- }
-
+ PreferredType.enterBinary(Actions, Tok.getLocation(), LHS.get(),
+ OpToken.getKind());
// Parse another leaf here for the RHS of the operator.
// ParseCastExpression works here because all RHS expressions in C have it
// as a prefix, at least. However, in C++, an assignment-expression could
@@ -547,7 +543,7 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
}
namespace {
-class CastExpressionIdValidator : public CorrectionCandidateCallback {
+class CastExpressionIdValidator final : public CorrectionCandidateCallback {
public:
CastExpressionIdValidator(Token Next, bool AllowTypes, bool AllowNonTypes)
: NextToken(Next), AllowNonTypes(AllowNonTypes) {
@@ -576,6 +572,10 @@ class CastExpressionIdValidator : public CorrectionCandidateCallback {
return false;
}
+ std::unique_ptr<CorrectionCandidateCallback> clone() override {
+ return llvm::make_unique<CastExpressionIdValidator>(*this);
+ }
+
private:
Token NextToken;
bool AllowNonTypes;
@@ -639,6 +639,10 @@ class CastExpressionIdValidator : public CorrectionCandidateCallback {
/// [GNU] '__builtin_offsetof' '(' type-name ',' offsetof-member-designator')'
/// [GNU] '__builtin_choose_expr' '(' assign-expr ',' assign-expr ','
/// assign-expr ')'
+/// [GNU] '__builtin_FILE' '(' ')'
+/// [GNU] '__builtin_FUNCTION' '(' ')'
+/// [GNU] '__builtin_LINE' '(' ')'
+/// [CLANG] '__builtin_COLUMN' '(' ')'
/// [GNU] '__builtin_types_compatible_p' '(' type-name ',' type-name ')'
/// [GNU] '__null'
/// [OBJC] '[' objc-message-expr ']'
@@ -764,6 +768,7 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
bool isVectorLiteral) {
ExprResult Res;
tok::TokenKind SavedKind = Tok.getKind();
+ auto SavedType = PreferredType;
NotCastExpr = false;
// This handles all of cast-expression, unary-expression, postfix-expression,
@@ -1044,19 +1049,21 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
CXXScopeSpec ScopeSpec;
SourceLocation TemplateKWLoc;
Token Replacement;
- auto Validator = llvm::make_unique<CastExpressionIdValidator>(
- Tok, isTypeCast != NotTypeCast, isTypeCast != IsTypeCast);
- Validator->IsAddressOfOperand = isAddressOfOperand;
+ CastExpressionIdValidator Validator(
+ /*Next=*/Tok,
+ /*AllowTypes=*/isTypeCast != NotTypeCast,
+ /*AllowNonTypes=*/isTypeCast != IsTypeCast);
+ Validator.IsAddressOfOperand = isAddressOfOperand;
if (Tok.isOneOf(tok::periodstar, tok::arrowstar)) {
- Validator->WantExpressionKeywords = false;
- Validator->WantRemainingKeywords = false;
+ Validator.WantExpressionKeywords = false;
+ Validator.WantRemainingKeywords = false;
} else {
- Validator->WantRemainingKeywords = Tok.isNot(tok::r_paren);
+ Validator.WantRemainingKeywords = Tok.isNot(tok::r_paren);
}
Name.setIdentifier(&II, ILoc);
Res = Actions.ActOnIdExpression(
getCurScope(), ScopeSpec, TemplateKWLoc, Name, Tok.is(tok::l_paren),
- isAddressOfOperand, std::move(Validator),
+ isAddressOfOperand, &Validator,
/*IsInlineAsmIdentifier=*/false,
Tok.is(tok::r_paren) ? nullptr : &Replacement);
if (!Res.isInvalid() && Res.isUnset()) {
@@ -1103,6 +1110,10 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
case tok::kw___builtin_choose_expr:
case tok::kw___builtin_astype: // primary-expression: [OCL] as_type()
case tok::kw___builtin_convertvector:
+ case tok::kw___builtin_COLUMN:
+ case tok::kw___builtin_FILE:
+ case tok::kw___builtin_FUNCTION:
+ case tok::kw___builtin_LINE:
return ParseBuiltinPrimaryExpression();
case tok::kw___null:
return Actions.ActOnGNUNullExpr(ConsumeToken());
@@ -1115,6 +1126,9 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
// -- cast-expression
Token SavedTok = Tok;
ConsumeToken();
+
+ PreferredType.enterUnary(Actions, Tok.getLocation(), SavedTok.getKind(),
+ SavedTok.getLocation());
// One special case is implicitly handled here: if the preceding tokens are
// an ambiguous cast expression, such as "(T())++", then we recurse to
// determine whether the '++' is prefix or postfix.
@@ -1136,6 +1150,7 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
case tok::amp: { // unary-expression: '&' cast-expression
// Special treatment because of member pointers
SourceLocation SavedLoc = ConsumeToken();
+ PreferredType.enterUnary(Actions, Tok.getLocation(), tok::amp, SavedLoc);
Res = ParseCastExpression(false, true);
if (!Res.isInvalid())
Res = Actions.ActOnUnaryOp(getCurScope(), SavedLoc, SavedKind, Res.get());
@@ -1150,6 +1165,7 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
case tok::kw___real: // unary-expression: '__real' cast-expression [GNU]
case tok::kw___imag: { // unary-expression: '__imag' cast-expression [GNU]
SourceLocation SavedLoc = ConsumeToken();
+ PreferredType.enterUnary(Actions, Tok.getLocation(), SavedKind, SavedLoc);
Res = ParseCastExpression(false);
if (!Res.isInvalid())
Res = Actions.ActOnUnaryOp(getCurScope(), SavedLoc, SavedKind, Res.get());
@@ -1207,6 +1223,9 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
case tok::kw_static_cast:
Res = ParseCXXCasts();
break;
+ case tok::kw___builtin_bit_cast:
+ Res = ParseBuiltinBitCast();
+ break;
case tok::kw_typeid:
Res = ParseCXXTypeid();
break;
@@ -1424,7 +1443,8 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
Res = ParseBlockLiteralExpression();
break;
case tok::code_completion: {
- Actions.CodeCompleteOrdinaryName(getCurScope(), Sema::PCC_Expression);
+ Actions.CodeCompleteExpression(getCurScope(),
+ PreferredType.get(Tok.getLocation()));
cutOffParsing();
return ExprError();
}
@@ -1459,6 +1479,7 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
// that the address of the function is being taken, which is illegal in CL.
// These can be followed by postfix-expr pieces.
+ PreferredType = SavedType;
Res = ParsePostfixExpressionSuffix(Res);
if (getLangOpts().OpenCL)
if (Expr *PostfixExpr = Res.get()) {
@@ -1498,13 +1519,17 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
// Now that the primary-expression piece of the postfix-expression has been
// parsed, see if there are any postfix-expression pieces here.
SourceLocation Loc;
+ auto SavedType = PreferredType;
while (1) {
+ // Each iteration relies on preferred type for the whole expression.
+ PreferredType = SavedType;
switch (Tok.getKind()) {
case tok::code_completion:
if (InMessageExpression)
return LHS;
- Actions.CodeCompletePostfixExpression(getCurScope(), LHS);
+ Actions.CodeCompletePostfixExpression(
+ getCurScope(), LHS, PreferredType.get(Tok.getLocation()));
cutOffParsing();
return ExprError();
@@ -1546,6 +1571,7 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
Loc = T.getOpenLocation();
ExprResult Idx, Length;
SourceLocation ColonLoc;
+ PreferredType.enterSubscript(Actions, Tok.getLocation(), LHS.get());
if (getLangOpts().CPlusPlus11 && Tok.is(tok::l_brace)) {
Diag(Tok, diag::warn_cxx98_compat_generalized_initializer_lists);
Idx = ParseBraceInitializer();
@@ -1567,7 +1593,9 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
SourceLocation RLoc = Tok.getLocation();
- ExprResult OrigLHS = LHS;
+ LHS = Actions.CorrectDelayedTyposInExpr(LHS);
+ Idx = Actions.CorrectDelayedTyposInExpr(Idx);
+ Length = Actions.CorrectDelayedTyposInExpr(Length);
if (!LHS.isInvalid() && !Idx.isInvalid() && !Length.isInvalid() &&
Tok.is(tok::r_square)) {
if (ColonLoc.isValid()) {
@@ -1579,12 +1607,6 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
}
} else {
LHS = ExprError();
- }
- if (LHS.isInvalid()) {
- (void)Actions.CorrectDelayedTyposInExpr(OrigLHS);
- (void)Actions.CorrectDelayedTyposInExpr(Idx);
- (void)Actions.CorrectDelayedTyposInExpr(Length);
- LHS = ExprError();
Idx = ExprError();
}
@@ -1649,34 +1671,25 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
ExprVector ArgExprs;
CommaLocsTy CommaLocs;
-
- if (Tok.is(tok::code_completion)) {
+ auto RunSignatureHelp = [&]() -> QualType {
QualType PreferredType = Actions.ProduceCallSignatureHelp(
- getCurScope(), LHS.get(), None, PT.getOpenLocation());
+ getCurScope(), LHS.get(), ArgExprs, PT.getOpenLocation());
CalledSignatureHelp = true;
- Actions.CodeCompleteExpression(getCurScope(), PreferredType);
- cutOffParsing();
- return ExprError();
- }
-
+ return PreferredType;
+ };
if (OpKind == tok::l_paren || !LHS.isInvalid()) {
if (Tok.isNot(tok::r_paren)) {
if (ParseExpressionList(ArgExprs, CommaLocs, [&] {
- QualType PreferredType = Actions.ProduceCallSignatureHelp(
- getCurScope(), LHS.get(), ArgExprs, PT.getOpenLocation());
- CalledSignatureHelp = true;
- Actions.CodeCompleteExpression(getCurScope(), PreferredType);
+ PreferredType.enterFunctionArgument(Tok.getLocation(),
+ RunSignatureHelp);
})) {
(void)Actions.CorrectDelayedTyposInExpr(LHS);
// If we got an error when parsing expression list, we don't call
// the CodeCompleteCall handler inside the parser. So call it here
// to make sure we get overload suggestions even when we are in the
// middle of a parameter.
- if (PP.isCodeCompletionReached() && !CalledSignatureHelp) {
- Actions.ProduceCallSignatureHelp(getCurScope(), LHS.get(),
- ArgExprs, PT.getOpenLocation());
- CalledSignatureHelp = true;
- }
+ if (PP.isCodeCompletionReached() && !CalledSignatureHelp)
+ RunSignatureHelp();
LHS = ExprError();
} else if (LHS.isInvalid()) {
for (auto &E : ArgExprs)
@@ -1727,6 +1740,8 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
bool MayBePseudoDestructor = false;
Expr* OrigLHS = !LHS.isInvalid() ? LHS.get() : nullptr;
+ PreferredType.enterMemAccess(Actions, Tok.getLocation(), OrigLHS);
+
if (getLangOpts().CPlusPlus && !LHS.isInvalid()) {
Expr *Base = OrigLHS;
const Type* BaseType = Base->getType().getTypePtrOrNull();
@@ -1755,7 +1770,7 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
if (Tok.is(tok::code_completion)) {
tok::TokenKind CorrectedOpKind =
OpKind == tok::arrow ? tok::period : tok::arrow;
- ExprResult CorrectedLHS(/*IsInvalid=*/true);
+ ExprResult CorrectedLHS(/*Invalid=*/true);
if (getLangOpts().CPlusPlus && OrigLHS) {
const bool DiagsAreSuppressed = Diags.getSuppressAllDiagnostics();
Diags.setSuppressAllDiagnostics(true);
@@ -1773,7 +1788,8 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
// Code completion for a member access expression.
Actions.CodeCompleteMemberReferenceExpr(
getCurScope(), Base, CorrectedBase, OpLoc, OpKind == tok::arrow,
- Base && ExprStatementTokLoc == Base->getBeginLoc());
+ Base && ExprStatementTokLoc == Base->getBeginLoc(),
+ PreferredType.get(Tok.getLocation()));
cutOffParsing();
return ExprError();
@@ -2036,7 +2052,7 @@ ExprResult Parser::ParseUnaryExprOrTypeTraitExpression() {
if (isCastExpr)
return Actions.ActOnUnaryExprOrTypeTraitExpr(OpTok.getLocation(),
ExprKind,
- /*isType=*/true,
+ /*IsType=*/true,
CastTy.getAsOpaquePtr(),
CastRange);
@@ -2047,7 +2063,7 @@ ExprResult Parser::ParseUnaryExprOrTypeTraitExpression() {
if (!Operand.isInvalid())
Operand = Actions.ActOnUnaryExprOrTypeTraitExpr(OpTok.getLocation(),
ExprKind,
- /*isType=*/false,
+ /*IsType=*/false,
Operand.get(),
CastRange);
return Operand;
@@ -2062,6 +2078,10 @@ ExprResult Parser::ParseUnaryExprOrTypeTraitExpression() {
/// [GNU] '__builtin_choose_expr' '(' assign-expr ',' assign-expr ','
/// assign-expr ')'
/// [GNU] '__builtin_types_compatible_p' '(' type-name ',' type-name ')'
+/// [GNU] '__builtin_FILE' '(' ')'
+/// [GNU] '__builtin_FUNCTION' '(' ')'
+/// [GNU] '__builtin_LINE' '(' ')'
+/// [CLANG] '__builtin_COLUMN' '(' ')'
/// [OCL] '__builtin_astype' '(' assignment-expression ',' type-name ')'
///
/// [GNU] offsetof-member-designator:
@@ -2281,6 +2301,33 @@ ExprResult Parser::ParseBuiltinPrimaryExpression() {
ConsumeParen());
break;
}
+ case tok::kw___builtin_COLUMN:
+ case tok::kw___builtin_FILE:
+ case tok::kw___builtin_FUNCTION:
+ case tok::kw___builtin_LINE: {
+ // Attempt to consume the r-paren.
+ if (Tok.isNot(tok::r_paren)) {
+ Diag(Tok, diag::err_expected) << tok::r_paren;
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return ExprError();
+ }
+ SourceLocExpr::IdentKind Kind = [&] {
+ switch (T) {
+ case tok::kw___builtin_FILE:
+ return SourceLocExpr::File;
+ case tok::kw___builtin_FUNCTION:
+ return SourceLocExpr::Function;
+ case tok::kw___builtin_LINE:
+ return SourceLocExpr::Line;
+ case tok::kw___builtin_COLUMN:
+ return SourceLocExpr::Column;
+ default:
+ llvm_unreachable("invalid keyword");
+ }
+ }();
+ Res = Actions.ActOnSourceLocExpr(Kind, StartLoc, ConsumeParen());
+ break;
+ }
}
if (Res.isInvalid())
@@ -2327,14 +2374,16 @@ Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr,
return ExprError();
SourceLocation OpenLoc = T.getOpenLocation();
+ PreferredType.enterParenExpr(Tok.getLocation(), OpenLoc);
+
ExprResult Result(true);
bool isAmbiguousTypeId;
CastTy = nullptr;
if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteOrdinaryName(getCurScope(),
- ExprType >= CompoundLiteral? Sema::PCC_ParenthesizedExpression
- : Sema::PCC_Expression);
+ Actions.CodeCompleteExpression(
+ getCurScope(), PreferredType.get(Tok.getLocation()),
+ /*IsParenthesized=*/ExprType >= CompoundLiteral);
cutOffParsing();
return ExprError();
}
@@ -2415,6 +2464,8 @@ Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr,
T.consumeClose();
ColonProtection.restore();
RParenLoc = T.getCloseLocation();
+
+ PreferredType.enterTypeCast(Tok.getLocation(), Ty.get().get());
ExprResult SubExpr = ParseCastExpression(/*isUnaryExpression=*/false);
if (Ty.isInvalid() || SubExpr.isInvalid())
@@ -2545,6 +2596,7 @@ Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr,
return ExprError();
}
+ PreferredType.enterTypeCast(Tok.getLocation(), CastTy.get());
// Parse the cast-expression that follows it next.
// TODO: For cast expression with CastTy.
Result = ParseCastExpression(/*isUnaryExpression=*/false,
@@ -2839,17 +2891,11 @@ ExprResult Parser::ParseFoldExpression(ExprResult LHS,
/// \endverbatim
bool Parser::ParseExpressionList(SmallVectorImpl<Expr *> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs,
- llvm::function_ref<void()> Completer) {
+ llvm::function_ref<void()> ExpressionStarts) {
bool SawError = false;
while (1) {
- if (Tok.is(tok::code_completion)) {
- if (Completer)
- Completer();
- else
- Actions.CodeCompleteOrdinaryName(getCurScope(), Sema::PCC_Expression);
- cutOffParsing();
- return true;
- }
+ if (ExpressionStarts)
+ ExpressionStarts();
ExprResult Expr;
if (getLangOpts().CPlusPlus11 && Tok.is(tok::l_brace)) {
@@ -3009,7 +3055,7 @@ ExprResult Parser::ParseBlockLiteralExpression() {
/*IsAmbiguous=*/false,
/*RParenLoc=*/NoLoc,
/*ArgInfo=*/nullptr,
- /*NumArgs=*/0,
+ /*NumParams=*/0,
/*EllipsisLoc=*/NoLoc,
/*RParenLoc=*/NoLoc,
/*RefQualifierIsLvalueRef=*/true,
diff --git a/lib/Parse/ParseExprCXX.cpp b/lib/Parse/ParseExprCXX.cpp
index 3caec6b4def6..85c7e6c6bcdf 100644
--- a/lib/Parse/ParseExprCXX.cpp
+++ b/lib/Parse/ParseExprCXX.cpp
@@ -1,9 +1,8 @@
//===--- ParseExprCXX.cpp - C++ Expression Parsing ------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -21,7 +20,7 @@
#include "clang/Sema/ParsedTemplate.h"
#include "clang/Sema/Scope.h"
#include "llvm/Support/ErrorHandling.h"
-
+#include <numeric>
using namespace clang;
@@ -70,9 +69,9 @@ static void FixDigraph(Parser &P, Preprocessor &PP, Token &DigraphToken,
DigraphToken.setLength(1);
// Push new tokens back to token stream.
- PP.EnterToken(ColonToken);
+ PP.EnterToken(ColonToken, /*IsReinject*/ true);
if (!AtDigraph)
- PP.EnterToken(DigraphToken);
+ PP.EnterToken(DigraphToken, /*IsReinject*/ true);
}
// Check for '<::' which should be '< ::' instead of '[:' when following
@@ -233,13 +232,16 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
HasScopeSpecifier = true;
}
+ // Preferred type might change when parsing qualifiers, we need the original.
+ auto SavedType = PreferredType;
while (true) {
if (HasScopeSpecifier) {
if (Tok.is(tok::code_completion)) {
// Code completion for a nested-name-specifier, where the code
// completion token follows the '::'.
Actions.CodeCompleteQualifiedId(getCurScope(), SS, EnteringContext,
- ObjectType.get());
+ ObjectType.get(),
+ SavedType.get(SS.getBeginLoc()));
// Include code completion token into the range of the scope otherwise
// when we try to annotate the scope tokens the dangling code completion
// token will cause assertion in
@@ -435,7 +437,7 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
Token ColonColon;
PP.Lex(ColonColon);
ColonColon.setKind(tok::colon);
- PP.EnterToken(ColonColon);
+ PP.EnterToken(ColonColon, /*IsReinject*/ true);
break;
}
}
@@ -461,8 +463,8 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
// mistyped '::' instead of ':'.
if (CorrectionFlagPtr && IsCorrectedToColon) {
ColonColon.setKind(tok::colon);
- PP.EnterToken(Tok);
- PP.EnterToken(ColonColon);
+ PP.EnterToken(Tok, /*IsReinject*/ true);
+ PP.EnterToken(ColonColon, /*IsReinject*/ true);
Tok = Identifier;
break;
}
@@ -488,6 +490,14 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
EnteringContext,
Template,
MemberOfUnknownSpecialization)) {
+ // If lookup didn't find anything, we treat the name as a template-name
+ // anyway. C++20 requires this, and in prior language modes it improves
+ // error recovery. But before we commit to this, check that we actually
+ // have something that looks like a template-argument-list next.
+ if (!IsTypename && TNK == TNK_Undeclared_template &&
+ isTemplateArgumentList(1) == TPResult::False)
+ break;
+
// We have found a template name, so annotate this token
// with a template-id annotation. We do not permit the
// template-id to be translated into a type annotation,
@@ -502,7 +512,7 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
}
if (MemberOfUnknownSpecialization && (ObjectType || SS.isSet()) &&
- (IsTypename || IsTemplateArgumentList(1))) {
+ (IsTypename || isTemplateArgumentList(1) == TPResult::True)) {
// We have something like t::getAs<T>, where getAs is a
// member of an unknown specialization. However, this will only
// parse correctly as a template, so suggest the keyword 'template'
@@ -564,7 +574,7 @@ ExprResult Parser::tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOpe
ExprResult E = Actions.ActOnIdExpression(
getCurScope(), SS, TemplateKWLoc, Name, Tok.is(tok::l_paren),
- isAddressOfOperand, nullptr, /*IsInlineAsmIdentifier=*/false,
+ isAddressOfOperand, /*CCC=*/nullptr, /*IsInlineAsmIdentifier=*/false,
&Replacement);
if (!E.isInvalid() && !E.isUnset() && Tok.is(tok::less))
checkPotentialAngleBracket(E);
@@ -639,6 +649,8 @@ ExprResult Parser::ParseCXXIdExpression(bool isAddressOfOperand) {
///
/// lambda-expression:
/// lambda-introducer lambda-declarator[opt] compound-statement
+/// lambda-introducer '<' template-parameter-list '>'
+/// lambda-declarator[opt] compound-statement
///
/// lambda-introducer:
/// '[' lambda-capture[opt] ']'
@@ -677,9 +689,7 @@ ExprResult Parser::ParseCXXIdExpression(bool isAddressOfOperand) {
ExprResult Parser::ParseLambdaExpression() {
// Parse lambda-introducer.
LambdaIntroducer Intro;
- Optional<unsigned> DiagID = ParseLambdaIntroducer(Intro);
- if (DiagID) {
- Diag(Tok, DiagID.getValue());
+ if (ParseLambdaIntroducer(Intro)) {
SkipUntil(tok::r_square, StopAtSemi);
SkipUntil(tok::l_brace, StopAtSemi);
SkipUntil(tok::r_brace, StopAtSemi);
@@ -689,9 +699,8 @@ ExprResult Parser::ParseLambdaExpression() {
return ParseLambdaExpressionAfterIntroducer(Intro);
}
-/// TryParseLambdaExpression - Use lookahead and potentially tentative
-/// parsing to determine if we are looking at a C++0x lambda expression, and parse
-/// it if we are.
+/// Use lookahead and potentially tentative parsing to determine if we are
+/// looking at a C++11 lambda expression, and parse it if we are.
///
/// If we are not looking at a lambda expression, returns ExprError().
ExprResult Parser::TryParseLambdaExpression() {
@@ -708,28 +717,53 @@ ExprResult Parser::TryParseLambdaExpression() {
if (Next.is(tok::r_square) || // []
Next.is(tok::equal) || // [=
(Next.is(tok::amp) && // [&] or [&,
- (After.is(tok::r_square) ||
- After.is(tok::comma))) ||
+ After.isOneOf(tok::r_square, tok::comma)) ||
(Next.is(tok::identifier) && // [identifier]
- After.is(tok::r_square))) {
+ After.is(tok::r_square)) ||
+ Next.is(tok::ellipsis)) { // [...
return ParseLambdaExpression();
}
// If lookahead indicates an ObjC message send...
// [identifier identifier
- if (Next.is(tok::identifier) && After.is(tok::identifier)) {
+ if (Next.is(tok::identifier) && After.is(tok::identifier))
return ExprEmpty();
- }
// Here, we're stuck: lambda introducers and Objective-C message sends are
// unambiguous, but it requires arbitrary lookhead. [a,b,c,d,e,f,g] is a
// lambda, and [a,b,c,d,e,f,g h] is a Objective-C message send. Instead of
// writing two routines to parse a lambda introducer, just try to parse
// a lambda introducer first, and fall back if that fails.
- // (TryParseLambdaIntroducer never produces any diagnostic output.)
LambdaIntroducer Intro;
- if (TryParseLambdaIntroducer(Intro))
- return ExprEmpty();
+ {
+ TentativeParsingAction TPA(*this);
+ LambdaIntroducerTentativeParse Tentative;
+ if (ParseLambdaIntroducer(Intro, &Tentative)) {
+ TPA.Commit();
+ return ExprError();
+ }
+
+ switch (Tentative) {
+ case LambdaIntroducerTentativeParse::Success:
+ TPA.Commit();
+ break;
+
+ case LambdaIntroducerTentativeParse::Incomplete:
+ // Didn't fully parse the lambda-introducer, try again with a
+ // non-tentative parse.
+ TPA.Revert();
+ Intro = LambdaIntroducer();
+ if (ParseLambdaIntroducer(Intro))
+ return ExprError();
+ break;
+
+ case LambdaIntroducerTentativeParse::MessageSend:
+ case LambdaIntroducerTentativeParse::Invalid:
+ // Not a lambda-introducer, might be a message send.
+ TPA.Revert();
+ return ExprEmpty();
+ }
+ }
return ParseLambdaExpressionAfterIntroducer(Intro);
}
@@ -737,15 +771,16 @@ ExprResult Parser::TryParseLambdaExpression() {
/// Parse a lambda introducer.
/// \param Intro A LambdaIntroducer filled in with information about the
/// contents of the lambda-introducer.
-/// \param SkippedInits If non-null, we are disambiguating between an Obj-C
-/// message send and a lambda expression. In this mode, we will
-/// sometimes skip the initializers for init-captures and not fully
-/// populate \p Intro. This flag will be set to \c true if we do so.
-/// \return A DiagnosticID if it hit something unexpected. The location for
-/// the diagnostic is that of the current token.
-Optional<unsigned> Parser::ParseLambdaIntroducer(LambdaIntroducer &Intro,
- bool *SkippedInits) {
- typedef Optional<unsigned> DiagResult;
+/// \param Tentative If non-null, we are disambiguating between a
+/// lambda-introducer and some other construct. In this mode, we do not
+/// produce any diagnostics or take any other irreversible action unless
+/// we're sure that this is a lambda-expression.
+/// \return \c true if parsing (or disambiguation) failed with a diagnostic and
+/// the caller should bail out / recover.
+bool Parser::ParseLambdaIntroducer(LambdaIntroducer &Intro,
+ LambdaIntroducerTentativeParse *Tentative) {
+ if (Tentative)
+ *Tentative = LambdaIntroducerTentativeParse::Success;
assert(Tok.is(tok::l_square) && "Lambda expressions begin with '['.");
BalancedDelimiterTracker T(*this, tok::l_square);
@@ -753,37 +788,64 @@ Optional<unsigned> Parser::ParseLambdaIntroducer(LambdaIntroducer &Intro,
Intro.Range.setBegin(T.getOpenLocation());
- bool first = true;
+ bool First = true;
+
+ // Produce a diagnostic if we're not tentatively parsing; otherwise track
+ // that our parse has failed.
+ auto Invalid = [&](llvm::function_ref<void()> Action) {
+ if (Tentative) {
+ *Tentative = LambdaIntroducerTentativeParse::Invalid;
+ return false;
+ }
+ Action();
+ return true;
+ };
+
+ // Perform some irreversible action if this is a non-tentative parse;
+ // otherwise note that our actions were incomplete.
+ auto NonTentativeAction = [&](llvm::function_ref<void()> Action) {
+ if (Tentative)
+ *Tentative = LambdaIntroducerTentativeParse::Incomplete;
+ else
+ Action();
+ };
// Parse capture-default.
if (Tok.is(tok::amp) &&
(NextToken().is(tok::comma) || NextToken().is(tok::r_square))) {
Intro.Default = LCD_ByRef;
Intro.DefaultLoc = ConsumeToken();
- first = false;
+ First = false;
+ if (!Tok.getIdentifierInfo()) {
+ // This can only be a lambda; no need for tentative parsing any more.
+ // '[[and]]' can still be an attribute, though.
+ Tentative = nullptr;
+ }
} else if (Tok.is(tok::equal)) {
Intro.Default = LCD_ByCopy;
Intro.DefaultLoc = ConsumeToken();
- first = false;
+ First = false;
+ Tentative = nullptr;
}
while (Tok.isNot(tok::r_square)) {
- if (!first) {
+ if (!First) {
if (Tok.isNot(tok::comma)) {
// Provide a completion for a lambda introducer here. Except
// in Objective-C, where this is Almost Surely meant to be a message
// send. In that case, fail here and let the ObjC message
// expression parser perform the completion.
if (Tok.is(tok::code_completion) &&
- !(getLangOpts().ObjC && Intro.Default == LCD_None &&
- !Intro.Captures.empty())) {
+ !(getLangOpts().ObjC && Tentative)) {
Actions.CodeCompleteLambdaIntroducer(getCurScope(), Intro,
/*AfterAmpersand=*/false);
cutOffParsing();
break;
}
- return DiagResult(diag::err_expected_comma_or_rsquare);
+ return Invalid([&] {
+ Diag(Tok.getLocation(), diag::err_expected_comma_or_rsquare);
+ });
}
ConsumeToken();
}
@@ -791,7 +853,7 @@ Optional<unsigned> Parser::ParseLambdaIntroducer(LambdaIntroducer &Intro,
if (Tok.is(tok::code_completion)) {
// If we're in Objective-C++ and we have a bare '[', then this is more
// likely to be a message receiver.
- if (getLangOpts().ObjC && first)
+ if (getLangOpts().ObjC && Tentative && First)
Actions.CodeCompleteObjCMessageReceiver(getCurScope());
else
Actions.CodeCompleteLambdaIntroducer(getCurScope(), Intro,
@@ -800,14 +862,14 @@ Optional<unsigned> Parser::ParseLambdaIntroducer(LambdaIntroducer &Intro,
break;
}
- first = false;
+ First = false;
// Parse capture.
LambdaCaptureKind Kind = LCK_ByCopy;
LambdaCaptureInitKind InitKind = LambdaCaptureInitKind::NoInit;
SourceLocation Loc;
IdentifierInfo *Id = nullptr;
- SourceLocation EllipsisLoc;
+ SourceLocation EllipsisLocs[4];
ExprResult Init;
SourceLocation LocStart = Tok.getLocation();
@@ -817,12 +879,16 @@ Optional<unsigned> Parser::ParseLambdaIntroducer(LambdaIntroducer &Intro,
ConsumeToken();
Kind = LCK_StarThis;
} else {
- return DiagResult(diag::err_expected_star_this_capture);
+ return Invalid([&] {
+ Diag(Tok.getLocation(), diag::err_expected_star_this_capture);
+ });
}
} else if (Tok.is(tok::kw_this)) {
Kind = LCK_This;
Loc = ConsumeToken();
} else {
+ TryConsumeToken(tok::ellipsis, EllipsisLocs[0]);
+
if (Tok.is(tok::amp)) {
Kind = LCK_ByRef;
ConsumeToken();
@@ -835,18 +901,24 @@ Optional<unsigned> Parser::ParseLambdaIntroducer(LambdaIntroducer &Intro,
}
}
+ TryConsumeToken(tok::ellipsis, EllipsisLocs[1]);
+
if (Tok.is(tok::identifier)) {
Id = Tok.getIdentifierInfo();
Loc = ConsumeToken();
} else if (Tok.is(tok::kw_this)) {
- // FIXME: If we want to suggest a fixit here, will need to return more
- // than just DiagnosticID. Perhaps full DiagnosticBuilder that can be
- // Clear()ed to prevent emission in case of tentative parsing?
- return DiagResult(diag::err_this_captured_by_reference);
+ return Invalid([&] {
+ // FIXME: Suggest a fixit here.
+ Diag(Tok.getLocation(), diag::err_this_captured_by_reference);
+ });
} else {
- return DiagResult(diag::err_expected_capture);
+ return Invalid([&] {
+ Diag(Tok.getLocation(), diag::err_expected_capture);
+ });
}
+ TryConsumeToken(tok::ellipsis, EllipsisLocs[2]);
+
if (Tok.is(tok::l_paren)) {
BalancedDelimiterTracker Parens(*this, tok::l_paren);
Parens.consumeOpen();
@@ -855,9 +927,9 @@ Optional<unsigned> Parser::ParseLambdaIntroducer(LambdaIntroducer &Intro,
ExprVector Exprs;
CommaLocsTy Commas;
- if (SkippedInits) {
+ if (Tentative) {
Parens.skipToEnd();
- *SkippedInits = true;
+ *Tentative = LambdaIntroducerTentativeParse::Incomplete;
} else if (ParseExpressionList(Exprs, Commas)) {
Parens.skipToEnd();
Init = ExprError();
@@ -879,13 +951,13 @@ Optional<unsigned> Parser::ParseLambdaIntroducer(LambdaIntroducer &Intro,
else
InitKind = LambdaCaptureInitKind::ListInit;
- if (!SkippedInits) {
+ if (!Tentative) {
Init = ParseInitializer();
} else if (Tok.is(tok::l_brace)) {
BalancedDelimiterTracker Braces(*this, tok::l_brace);
Braces.consumeOpen();
Braces.skipToEnd();
- *SkippedInits = true;
+ *Tentative = LambdaIntroducerTentativeParse::Incomplete;
} else {
// We're disambiguating this:
//
@@ -928,60 +1000,94 @@ Optional<unsigned> Parser::ParseLambdaIntroducer(LambdaIntroducer &Intro,
ConsumeAnnotationToken();
}
}
- } else
- TryConsumeToken(tok::ellipsis, EllipsisLoc);
- }
- // If this is an init capture, process the initialization expression
- // right away. For lambda init-captures such as the following:
- // const int x = 10;
- // auto L = [i = x+1](int a) {
- // return [j = x+2,
- // &k = x](char b) { };
- // };
- // keep in mind that each lambda init-capture has to have:
- // - its initialization expression executed in the context
- // of the enclosing/parent decl-context.
- // - but the variable itself has to be 'injected' into the
- // decl-context of its lambda's call-operator (which has
- // not yet been created).
- // Each init-expression is a full-expression that has to get
- // Sema-analyzed (for capturing etc.) before its lambda's
- // call-operator's decl-context, scope & scopeinfo are pushed on their
- // respective stacks. Thus if any variable is odr-used in the init-capture
- // it will correctly get captured in the enclosing lambda, if one exists.
- // The init-variables above are created later once the lambdascope and
- // call-operators decl-context is pushed onto its respective stack.
-
- // Since the lambda init-capture's initializer expression occurs in the
- // context of the enclosing function or lambda, therefore we can not wait
- // till a lambda scope has been pushed on before deciding whether the
- // variable needs to be captured. We also need to process all
- // lvalue-to-rvalue conversions and discarded-value conversions,
- // so that we can avoid capturing certain constant variables.
- // For e.g.,
- // void test() {
- // const int x = 10;
- // auto L = [&z = x](char a) { <-- don't capture by the current lambda
- // return [y = x](int i) { <-- don't capture by enclosing lambda
- // return y;
- // }
- // };
- // }
- // If x was not const, the second use would require 'L' to capture, and
- // that would be an error.
+ }
+
+ TryConsumeToken(tok::ellipsis, EllipsisLocs[3]);
+ }
+ // Check if this is a message send before we act on a possible init-capture.
+ if (Tentative && Tok.is(tok::identifier) &&
+ NextToken().isOneOf(tok::colon, tok::r_square)) {
+ // This can only be a message send. We're done with disambiguation.
+ *Tentative = LambdaIntroducerTentativeParse::MessageSend;
+ return false;
+ }
+
+ // Ensure that any ellipsis was in the right place.
+ SourceLocation EllipsisLoc;
+ if (std::any_of(std::begin(EllipsisLocs), std::end(EllipsisLocs),
+ [](SourceLocation Loc) { return Loc.isValid(); })) {
+ // The '...' should appear before the identifier in an init-capture, and
+ // after the identifier otherwise.
+ bool InitCapture = InitKind != LambdaCaptureInitKind::NoInit;
+ SourceLocation *ExpectedEllipsisLoc =
+ !InitCapture ? &EllipsisLocs[2] :
+ Kind == LCK_ByRef ? &EllipsisLocs[1] :
+ &EllipsisLocs[0];
+ EllipsisLoc = *ExpectedEllipsisLoc;
+
+ unsigned DiagID = 0;
+ if (EllipsisLoc.isInvalid()) {
+ DiagID = diag::err_lambda_capture_misplaced_ellipsis;
+ for (SourceLocation Loc : EllipsisLocs) {
+ if (Loc.isValid())
+ EllipsisLoc = Loc;
+ }
+ } else {
+ unsigned NumEllipses = std::accumulate(
+ std::begin(EllipsisLocs), std::end(EllipsisLocs), 0,
+ [](int N, SourceLocation Loc) { return N + Loc.isValid(); });
+ if (NumEllipses > 1)
+ DiagID = diag::err_lambda_capture_multiple_ellipses;
+ }
+ if (DiagID) {
+ NonTentativeAction([&] {
+ // Point the diagnostic at the first misplaced ellipsis.
+ SourceLocation DiagLoc;
+ for (SourceLocation &Loc : EllipsisLocs) {
+ if (&Loc != ExpectedEllipsisLoc && Loc.isValid()) {
+ DiagLoc = Loc;
+ break;
+ }
+ }
+ assert(DiagLoc.isValid() && "no location for diagnostic");
+
+ // Issue the diagnostic and produce fixits showing where the ellipsis
+ // should have been written.
+ auto &&D = Diag(DiagLoc, DiagID);
+ if (DiagID == diag::err_lambda_capture_misplaced_ellipsis) {
+ SourceLocation ExpectedLoc =
+ InitCapture ? Loc
+ : Lexer::getLocForEndOfToken(
+ Loc, 0, PP.getSourceManager(), getLangOpts());
+ D << InitCapture << FixItHint::CreateInsertion(ExpectedLoc, "...");
+ }
+ for (SourceLocation &Loc : EllipsisLocs) {
+ if (&Loc != ExpectedEllipsisLoc && Loc.isValid())
+ D << FixItHint::CreateRemoval(Loc);
+ }
+ });
+ }
+ }
+
+ // Process the init-capture initializers now rather than delaying until we
+ // form the lambda-expression so that they can be handled in the context
+ // enclosing the lambda-expression, rather than in the context of the
+ // lambda-expression itself.
ParsedType InitCaptureType;
- if (!Init.isInvalid())
+ if (Init.isUsable())
Init = Actions.CorrectDelayedTyposInExpr(Init.get());
if (Init.isUsable()) {
- // Get the pointer and store it in an lvalue, so we can use it as an
- // out argument.
- Expr *InitExpr = Init.get();
- // This performs any lvalue-to-rvalue conversions if necessary, which
- // can affect what gets captured in the containing decl-context.
- InitCaptureType = Actions.actOnLambdaInitCaptureInitialization(
- Loc, Kind == LCK_ByRef, Id, InitKind, InitExpr);
- Init = InitExpr;
+ NonTentativeAction([&] {
+ // Get the pointer and store it in an lvalue, so we can use it as an
+ // out argument.
+ Expr *InitExpr = Init.get();
+ // This performs any lvalue-to-rvalue conversions if necessary, which
+ // can affect what gets captured in the containing decl-context.
+ InitCaptureType = Actions.actOnLambdaInitCaptureInitialization(
+ Loc, Kind == LCK_ByRef, EllipsisLoc, Id, InitKind, InitExpr);
+ Init = InitExpr;
+ });
}
SourceLocation LocEnd = PrevTokLocation;
@@ -992,47 +1098,14 @@ Optional<unsigned> Parser::ParseLambdaIntroducer(LambdaIntroducer &Intro,
T.consumeClose();
Intro.Range.setEnd(T.getCloseLocation());
- return DiagResult();
-}
-
-/// TryParseLambdaIntroducer - Tentatively parse a lambda introducer.
-///
-/// Returns true if it hit something unexpected.
-bool Parser::TryParseLambdaIntroducer(LambdaIntroducer &Intro) {
- {
- bool SkippedInits = false;
- TentativeParsingAction PA1(*this);
-
- if (ParseLambdaIntroducer(Intro, &SkippedInits)) {
- PA1.Revert();
- return true;
- }
-
- if (!SkippedInits) {
- PA1.Commit();
- return false;
- }
-
- PA1.Revert();
- }
-
- // Try to parse it again, but this time parse the init-captures too.
- Intro = LambdaIntroducer();
- TentativeParsingAction PA2(*this);
-
- if (!ParseLambdaIntroducer(Intro)) {
- PA2.Commit();
- return false;
- }
-
- PA2.Revert();
- return true;
+ return false;
}
-static void
-tryConsumeMutableOrConstexprToken(Parser &P, SourceLocation &MutableLoc,
- SourceLocation &ConstexprLoc,
- SourceLocation &DeclEndLoc) {
+static void tryConsumeLambdaSpecifierToken(Parser &P,
+ SourceLocation &MutableLoc,
+ SourceLocation &ConstexprLoc,
+ SourceLocation &ConstevalLoc,
+ SourceLocation &DeclEndLoc) {
assert(MutableLoc.isInvalid());
assert(ConstexprLoc.isInvalid());
// Consume constexpr-opt mutable-opt in any sequence, and set the DeclEndLoc
@@ -1060,6 +1133,15 @@ tryConsumeMutableOrConstexprToken(Parser &P, SourceLocation &MutableLoc,
ConstexprLoc = P.ConsumeToken();
DeclEndLoc = ConstexprLoc;
break /*switch*/;
+ case tok::kw_consteval:
+ if (ConstevalLoc.isValid()) {
+ P.Diag(P.getCurToken().getLocation(),
+ diag::err_lambda_decl_specifier_repeated)
+ << 2 << FixItHint::CreateRemoval(P.getCurToken().getLocation());
+ }
+ ConstevalLoc = P.ConsumeToken();
+ DeclEndLoc = ConstevalLoc;
+ break /*switch*/;
default:
return;
}
@@ -1075,12 +1157,25 @@ addConstexprToLambdaDeclSpecifier(Parser &P, SourceLocation ConstexprLoc,
: diag::warn_cxx14_compat_constexpr_on_lambda);
const char *PrevSpec = nullptr;
unsigned DiagID = 0;
- DS.SetConstexprSpec(ConstexprLoc, PrevSpec, DiagID);
+ DS.SetConstexprSpec(CSK_constexpr, ConstexprLoc, PrevSpec, DiagID);
assert(PrevSpec == nullptr && DiagID == 0 &&
"Constexpr cannot have been set previously!");
}
}
+static void addConstevalToLambdaDeclSpecifier(Parser &P,
+ SourceLocation ConstevalLoc,
+ DeclSpec &DS) {
+ if (ConstevalLoc.isValid()) {
+ P.Diag(ConstevalLoc, diag::warn_cxx20_compat_consteval);
+ const char *PrevSpec = nullptr;
+ unsigned DiagID = 0;
+ DS.SetConstexprSpec(CSK_consteval, ConstevalLoc, PrevSpec, DiagID);
+ if (DiagID != 0)
+ P.Diag(ConstevalLoc, DiagID) << PrevSpec;
+ }
+}
+
/// ParseLambdaExpressionAfterIntroducer - Parse the rest of a lambda
/// expression.
ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
@@ -1122,6 +1217,33 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
<< A.getName()->getName();
};
+ // FIXME: Consider allowing this as an extension for GCC compatibiblity.
+ const bool HasExplicitTemplateParams = Tok.is(tok::less);
+ ParseScope TemplateParamScope(this, Scope::TemplateParamScope,
+ /*EnteredScope=*/HasExplicitTemplateParams);
+ if (HasExplicitTemplateParams) {
+ Diag(Tok, getLangOpts().CPlusPlus2a
+ ? diag::warn_cxx17_compat_lambda_template_parameter_list
+ : diag::ext_lambda_template_parameter_list);
+
+ SmallVector<NamedDecl*, 4> TemplateParams;
+ SourceLocation LAngleLoc, RAngleLoc;
+ if (ParseTemplateParameters(CurTemplateDepthTracker.getDepth(),
+ TemplateParams, LAngleLoc, RAngleLoc)) {
+ Actions.ActOnLambdaError(LambdaBeginLoc, getCurScope());
+ return ExprError();
+ }
+
+ if (TemplateParams.empty()) {
+ Diag(RAngleLoc,
+ diag::err_lambda_template_parameter_list_empty);
+ } else {
+ Actions.ActOnLambdaExplicitTemplateParameterList(
+ LAngleLoc, TemplateParams, RAngleLoc);
+ ++CurTemplateDepthTracker;
+ }
+ }
+
TypeResult TrailingReturnType;
if (Tok.is(tok::l_paren)) {
ParseScope PrototypeScope(this,
@@ -1138,13 +1260,20 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
SourceLocation EllipsisLoc;
if (Tok.isNot(tok::r_paren)) {
- Actions.RecordParsingTemplateParameterDepth(TemplateParameterDepth);
+ Actions.RecordParsingTemplateParameterDepth(
+ CurTemplateDepthTracker.getOriginalDepth());
+
ParseParameterDeclarationClause(D, Attr, ParamInfo, EllipsisLoc);
+
// For a generic lambda, each 'auto' within the parameter declaration
// clause creates a template type parameter, so increment the depth.
+ // If we've parsed any explicit template parameters, then the depth will
+ // have already been incremented. So we make sure that at most a single
+ // depth level is added.
if (Actions.getCurGenericLambda())
- ++CurTemplateDepthTracker;
+ CurTemplateDepthTracker.setAddedDepth(1);
}
+
T.consumeClose();
SourceLocation RParenLoc = T.getCloseLocation();
SourceLocation DeclEndLoc = RParenLoc;
@@ -1157,14 +1286,16 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
// compatible with MSVC.
MaybeParseMicrosoftDeclSpecs(Attr, &DeclEndLoc);
- // Parse mutable-opt and/or constexpr-opt, and update the DeclEndLoc.
+ // Parse mutable-opt and/or constexpr-opt or consteval-opt, and update the
+ // DeclEndLoc.
SourceLocation MutableLoc;
SourceLocation ConstexprLoc;
- tryConsumeMutableOrConstexprToken(*this, MutableLoc, ConstexprLoc,
- DeclEndLoc);
+ SourceLocation ConstevalLoc;
+ tryConsumeLambdaSpecifierToken(*this, MutableLoc, ConstexprLoc,
+ ConstevalLoc, DeclEndLoc);
addConstexprToLambdaDeclSpecifier(*this, ConstexprLoc, DS);
-
+ addConstevalToLambdaDeclSpecifier(*this, ConstevalLoc, DS);
// Parse exception-specification[opt].
ExceptionSpecificationType ESpecType = EST_None;
SourceRange ESpecRange;
@@ -1203,10 +1334,10 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
SourceLocation NoLoc;
D.AddTypeInfo(DeclaratorChunk::getFunction(
- /*hasProto=*/true,
- /*isAmbiguous=*/false, LParenLoc, ParamInfo.data(),
+ /*HasProto=*/true,
+ /*IsAmbiguous=*/false, LParenLoc, ParamInfo.data(),
ParamInfo.size(), EllipsisLoc, RParenLoc,
- /*RefQualifierIsLValueRef=*/true,
+ /*RefQualifierIsLvalueRef=*/true,
/*RefQualifierLoc=*/NoLoc, MutableLoc, ESpecType,
ESpecRange, DynamicExceptions.data(),
DynamicExceptionRanges.data(), DynamicExceptions.size(),
@@ -1216,7 +1347,7 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
TrailingReturnType),
std::move(Attr), DeclEndLoc);
} else if (Tok.isOneOf(tok::kw_mutable, tok::arrow, tok::kw___attribute,
- tok::kw_constexpr) ||
+ tok::kw_constexpr, tok::kw_consteval) ||
(Tok.is(tok::l_square) && NextToken().is(tok::l_square))) {
// It's common to forget that one needs '()' before 'mutable', an attribute
// specifier, or the result type. Deal with this.
@@ -1227,6 +1358,7 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
case tok::kw___attribute:
case tok::l_square: TokKind = 2; break;
case tok::kw_constexpr: TokKind = 3; break;
+ case tok::kw_consteval: TokKind = 4; break;
default: llvm_unreachable("Unknown token kind");
}
@@ -1262,14 +1394,14 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
SourceLocation NoLoc;
D.AddTypeInfo(DeclaratorChunk::getFunction(
- /*hasProto=*/true,
- /*isAmbiguous=*/false,
+ /*HasProto=*/true,
+ /*IsAmbiguous=*/false,
/*LParenLoc=*/NoLoc,
/*Params=*/nullptr,
/*NumParams=*/0,
/*EllipsisLoc=*/NoLoc,
/*RParenLoc=*/NoLoc,
- /*RefQualifierIsLValueRef=*/true,
+ /*RefQualifierIsLvalueRef=*/true,
/*RefQualifierLoc=*/NoLoc, MutableLoc, EST_None,
/*ESpecRange=*/SourceRange(),
/*Exceptions=*/nullptr,
@@ -1299,6 +1431,7 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
StmtResult Stmt(ParseCompoundStatementBody());
BodyScope.Exit();
+ TemplateParamScope.Exit();
if (!Stmt.isInvalid() && !TrailingReturnType.isInvalid())
return Actions.ActOnLambdaExpr(LambdaBeginLoc, Stmt.get(), getCurScope());
@@ -1568,7 +1701,7 @@ Parser::ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc,
ParseUnqualifiedIdTemplateId(SS, SourceLocation(),
Name, NameLoc,
false, ObjectType, SecondTypeName,
- /*AssumeTemplateName=*/true))
+ /*AssumeTemplateId=*/true))
return ExprError();
return Actions.ActOnPseudoDestructorExpr(getCurScope(), Base, OpLoc, OpKind,
@@ -1673,23 +1806,26 @@ Parser::ParseCXXTypeConstructExpression(const DeclSpec &DS) {
BalancedDelimiterTracker T(*this, tok::l_paren);
T.consumeOpen();
+ PreferredType.enterTypeCast(Tok.getLocation(), TypeRep.get());
+
ExprVector Exprs;
CommaLocsTy CommaLocs;
+ auto RunSignatureHelp = [&]() {
+ QualType PreferredType = Actions.ProduceConstructorSignatureHelp(
+ getCurScope(), TypeRep.get()->getCanonicalTypeInternal(),
+ DS.getEndLoc(), Exprs, T.getOpenLocation());
+ CalledSignatureHelp = true;
+ return PreferredType;
+ };
+
if (Tok.isNot(tok::r_paren)) {
if (ParseExpressionList(Exprs, CommaLocs, [&] {
- QualType PreferredType = Actions.ProduceConstructorSignatureHelp(
- getCurScope(), TypeRep.get()->getCanonicalTypeInternal(),
- DS.getEndLoc(), Exprs, T.getOpenLocation());
- CalledSignatureHelp = true;
- Actions.CodeCompleteExpression(getCurScope(), PreferredType);
+ PreferredType.enterFunctionArgument(Tok.getLocation(),
+ RunSignatureHelp);
})) {
- if (PP.isCodeCompletionReached() && !CalledSignatureHelp) {
- Actions.ProduceConstructorSignatureHelp(
- getCurScope(), TypeRep.get()->getCanonicalTypeInternal(),
- DS.getEndLoc(), Exprs, T.getOpenLocation());
- CalledSignatureHelp = true;
- }
+ if (PP.isCodeCompletionReached() && !CalledSignatureHelp)
+ RunSignatureHelp();
SkipUntil(tok::r_paren, StopAtSemi);
return ExprError();
}
@@ -1740,6 +1876,7 @@ Sema::ConditionResult Parser::ParseCXXCondition(StmtResult *InitStmt,
Sema::ConditionKind CK,
ForRangeInfo *FRI) {
ParenBraceBracketBalancer BalancerRAIIObj(*this);
+ PreferredType.enterCondition(Actions, Tok.getLocation());
if (Tok.is(tok::code_completion)) {
Actions.CodeCompleteOrdinaryName(getCurScope(), Sema::PCC_Condition);
@@ -1859,6 +1996,7 @@ Sema::ConditionResult Parser::ParseCXXCondition(StmtResult *InitStmt,
diag::warn_cxx98_compat_generalized_initializer_lists);
InitExpr = ParseBraceInitializer();
} else if (CopyInitialization) {
+ PreferredType.enterVariableInit(Tok.getLocation(), DeclOut);
InitExpr = ParseAssignmentExpression();
} else if (Tok.is(tok::l_paren)) {
// This was probably an attempt to initialize the variable.
@@ -1995,6 +2133,13 @@ void Parser::ParseCXXSimpleTypeSpecifier(DeclSpec &DS) {
case tok::kw_bool:
DS.SetTypeSpecType(DeclSpec::TST_bool, Loc, PrevSpec, DiagID, Policy);
break;
+#define GENERIC_IMAGE_TYPE(ImgType, Id) \
+ case tok::kw_##ImgType##_t: \
+ DS.SetTypeSpecType(DeclSpec::TST_##ImgType##_t, Loc, PrevSpec, DiagID, \
+ Policy); \
+ break;
+#include "clang/Basic/OpenCLImageTypes.def"
+
case tok::annot_decltype:
case tok::kw_decltype:
DS.SetRangeEnd(ParseDecltypeSpecifier(DS));
@@ -2090,9 +2235,15 @@ bool Parser::ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
TemplateKWLoc.isValid(), Id,
ObjectType, EnteringContext, Template,
MemberOfUnknownSpecialization);
+ // If lookup found nothing but we're assuming that this is a template
+ // name, double-check that makes sense syntactically before committing
+ // to it.
+ if (TNK == TNK_Undeclared_template &&
+ isTemplateArgumentList(0) == TPResult::False)
+ return false;
if (TNK == TNK_Non_template && MemberOfUnknownSpecialization &&
- ObjectType && IsTemplateArgumentList()) {
+ ObjectType && isTemplateArgumentList(0) == TPResult::True) {
// We have something like t->getAs<T>(), where getAs is a
// member of an unknown specialization. However, this will only
// parse correctly as a template, so suggest the keyword 'template'
@@ -2196,11 +2347,9 @@ bool Parser::ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
ASTTemplateArgsPtr TemplateArgsPtr(TemplateArgs);
// Constructor and destructor names.
- TypeResult Type
- = Actions.ActOnTemplateIdType(SS, TemplateKWLoc,
- Template, Name, NameLoc,
- LAngleLoc, TemplateArgsPtr, RAngleLoc,
- /*IsCtorOrDtorName=*/true);
+ TypeResult Type = Actions.ActOnTemplateIdType(
+ getCurScope(), SS, TemplateKWLoc, Template, Name, NameLoc, LAngleLoc,
+ TemplateArgsPtr, RAngleLoc, /*IsCtorOrDtorName=*/true);
if (Type.isInvalid())
return true;
@@ -2836,23 +2985,21 @@ Parser::ParseCXXNewExpression(bool UseGlobal, SourceLocation Start) {
ConstructorLParen = T.getOpenLocation();
if (Tok.isNot(tok::r_paren)) {
CommaLocsTy CommaLocs;
+ auto RunSignatureHelp = [&]() {
+ ParsedType TypeRep =
+ Actions.ActOnTypeName(getCurScope(), DeclaratorInfo).get();
+ QualType PreferredType = Actions.ProduceConstructorSignatureHelp(
+ getCurScope(), TypeRep.get()->getCanonicalTypeInternal(),
+ DeclaratorInfo.getEndLoc(), ConstructorArgs, ConstructorLParen);
+ CalledSignatureHelp = true;
+ return PreferredType;
+ };
if (ParseExpressionList(ConstructorArgs, CommaLocs, [&] {
- ParsedType TypeRep =
- Actions.ActOnTypeName(getCurScope(), DeclaratorInfo).get();
- QualType PreferredType = Actions.ProduceConstructorSignatureHelp(
- getCurScope(), TypeRep.get()->getCanonicalTypeInternal(),
- DeclaratorInfo.getEndLoc(), ConstructorArgs, ConstructorLParen);
- CalledSignatureHelp = true;
- Actions.CodeCompleteExpression(getCurScope(), PreferredType);
+ PreferredType.enterFunctionArgument(Tok.getLocation(),
+ RunSignatureHelp);
})) {
- if (PP.isCodeCompletionReached() && !CalledSignatureHelp) {
- ParsedType TypeRep =
- Actions.ActOnTypeName(getCurScope(), DeclaratorInfo).get();
- Actions.ProduceConstructorSignatureHelp(
- getCurScope(), TypeRep.get()->getCanonicalTypeInternal(),
- DeclaratorInfo.getEndLoc(), ConstructorArgs, ConstructorLParen);
- CalledSignatureHelp = true;
- }
+ if (PP.isCodeCompletionReached() && !CalledSignatureHelp)
+ RunSignatureHelp();
SkipUntil(tok::semi, StopAtSemi | StopBeforeMatch);
return ExprError();
}
@@ -2883,12 +3030,12 @@ Parser::ParseCXXNewExpression(bool UseGlobal, SourceLocation Start) {
/// passed to ParseDeclaratorInternal.
///
/// direct-new-declarator:
-/// '[' expression ']'
+/// '[' expression[opt] ']'
/// direct-new-declarator '[' constant-expression ']'
///
void Parser::ParseDirectNewDeclarator(Declarator &D) {
// Parse the array dimensions.
- bool first = true;
+ bool First = true;
while (Tok.is(tok::l_square)) {
// An array-size expression can't start with a lambda.
if (CheckProhibitedCXX11Attribute())
@@ -2897,14 +3044,15 @@ void Parser::ParseDirectNewDeclarator(Declarator &D) {
BalancedDelimiterTracker T(*this, tok::l_square);
T.consumeOpen();
- ExprResult Size(first ? ParseExpression()
- : ParseConstantExpression());
+ ExprResult Size =
+ First ? (Tok.is(tok::r_square) ? ExprResult() : ParseExpression())
+ : ParseConstantExpression();
if (Size.isInvalid()) {
// Recover
SkipUntil(tok::r_square, StopAtSemi);
return;
}
- first = false;
+ First = false;
T.consumeClose();
@@ -2913,7 +3061,7 @@ void Parser::ParseDirectNewDeclarator(Declarator &D) {
MaybeParseCXX11Attributes(Attrs);
D.AddTypeInfo(DeclaratorChunk::getArray(0,
- /*static=*/false, /*star=*/false,
+ /*isStatic=*/false, /*isStar=*/false,
Size.get(), T.getOpenLocation(),
T.getCloseLocation()),
std::move(Attrs), T.getCloseLocation());
@@ -2975,8 +3123,59 @@ Parser::ParseCXXDeleteExpression(bool UseGlobal, SourceLocation Start) {
// [Footnote: A lambda expression with a lambda-introducer that consists
// of empty square brackets can follow the delete keyword if
// the lambda expression is enclosed in parentheses.]
- // FIXME: Produce a better diagnostic if the '[]' is unambiguously a
- // lambda-introducer.
+
+ const Token Next = GetLookAheadToken(2);
+
+ // Basic lookahead to check if we have a lambda expression.
+ if (Next.isOneOf(tok::l_brace, tok::less) ||
+ (Next.is(tok::l_paren) &&
+ (GetLookAheadToken(3).is(tok::r_paren) ||
+ (GetLookAheadToken(3).is(tok::identifier) &&
+ GetLookAheadToken(4).is(tok::identifier))))) {
+ TentativeParsingAction TPA(*this);
+ SourceLocation LSquareLoc = Tok.getLocation();
+ SourceLocation RSquareLoc = NextToken().getLocation();
+
+ // SkipUntil can't skip pairs of </*...*/>; don't emit a FixIt in this
+ // case.
+ SkipUntil({tok::l_brace, tok::less}, StopBeforeMatch);
+ SourceLocation RBraceLoc;
+ bool EmitFixIt = false;
+ if (Tok.is(tok::l_brace)) {
+ ConsumeBrace();
+ SkipUntil(tok::r_brace, StopBeforeMatch);
+ RBraceLoc = Tok.getLocation();
+ EmitFixIt = true;
+ }
+
+ TPA.Revert();
+
+ if (EmitFixIt)
+ Diag(Start, diag::err_lambda_after_delete)
+ << SourceRange(Start, RSquareLoc)
+ << FixItHint::CreateInsertion(LSquareLoc, "(")
+ << FixItHint::CreateInsertion(
+ Lexer::getLocForEndOfToken(
+ RBraceLoc, 0, Actions.getSourceManager(), getLangOpts()),
+ ")");
+ else
+ Diag(Start, diag::err_lambda_after_delete)
+ << SourceRange(Start, RSquareLoc);
+
+ // Warn that the non-capturing lambda isn't surrounded by parentheses
+ // to disambiguate it from 'delete[]'.
+ ExprResult Lambda = ParseLambdaExpression();
+ if (Lambda.isInvalid())
+ return ExprError();
+
+ // Evaluate any postfix expressions used on the lambda.
+ Lambda = ParsePostfixExpressionSuffix(Lambda);
+ if (Lambda.isInvalid())
+ return ExprError();
+ return Actions.ActOnCXXDelete(Start, UseGlobal, /*ArrayForm=*/false,
+ Lambda.get());
+ }
+
ArrayDelete = true;
BalancedDelimiterTracker T(*this, tok::l_square);
@@ -3241,7 +3440,8 @@ Parser::ParseCXXAmbiguousParenExpression(ParenParseOption &ExprType,
Toks.push_back(Tok);
// Re-enter the stored parenthesized tokens into the token stream, so we may
// parse them now.
- PP.EnterTokenStream(Toks, true /*DisableMacroExpansion*/);
+ PP.EnterTokenStream(Toks, /*DisableMacroExpansion*/ true,
+ /*IsReinject*/ true);
// Drop the current token and bring the first cached one. It's the same token
// as when we entered this function.
ConsumeAnyToken();
@@ -3313,3 +3513,37 @@ Parser::ParseCXXAmbiguousParenExpression(ParenParseOption &ExprType,
ConsumeAnyToken();
return Result;
}
+
+/// Parse a __builtin_bit_cast(T, E).
+ExprResult Parser::ParseBuiltinBitCast() {
+ SourceLocation KWLoc = ConsumeToken();
+
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ if (T.expectAndConsume(diag::err_expected_lparen_after, "__builtin_bit_cast"))
+ return ExprError();
+
+ // Parse the common declaration-specifiers piece.
+ DeclSpec DS(AttrFactory);
+ ParseSpecifierQualifierList(DS);
+
+ // Parse the abstract-declarator, if present.
+ Declarator DeclaratorInfo(DS, DeclaratorContext::TypeNameContext);
+ ParseDeclarator(DeclaratorInfo);
+
+ if (ExpectAndConsume(tok::comma)) {
+ Diag(Tok.getLocation(), diag::err_expected) << tok::comma;
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return ExprError();
+ }
+
+ ExprResult Operand = ParseExpression();
+
+ if (T.consumeClose())
+ return ExprError();
+
+ if (Operand.isInvalid() || DeclaratorInfo.isInvalidType())
+ return ExprError();
+
+ return Actions.ActOnBuiltinBitCastExpr(KWLoc, DeclaratorInfo, Operand,
+ T.getCloseLocation());
+}
diff --git a/lib/Parse/ParseInit.cpp b/lib/Parse/ParseInit.cpp
index 7742a5087cf0..7a455484b902 100644
--- a/lib/Parse/ParseInit.cpp
+++ b/lib/Parse/ParseInit.cpp
@@ -1,9 +1,8 @@
//===--- ParseInit.cpp - Initializer Parsing ------------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -66,15 +65,28 @@ bool Parser::MayBeDesignationStart() {
// Parse up to (at most) the token after the closing ']' to determine
// whether this is a C99 designator or a lambda.
- TentativeParsingAction Tentative(*this);
+ RevertingTentativeParsingAction Tentative(*this);
LambdaIntroducer Intro;
- bool SkippedInits = false;
- Optional<unsigned> DiagID(ParseLambdaIntroducer(Intro, &SkippedInits));
+ LambdaIntroducerTentativeParse ParseResult;
+ if (ParseLambdaIntroducer(Intro, &ParseResult)) {
+ // Hit and diagnosed an error in a lambda.
+ // FIXME: Tell the caller this happened so they can recover.
+ return true;
+ }
+
+ switch (ParseResult) {
+ case LambdaIntroducerTentativeParse::Success:
+ case LambdaIntroducerTentativeParse::Incomplete:
+ // Might be a lambda-expression. Keep looking.
+ // FIXME: If our tentative parse was not incomplete, parse the lambda from
+ // here rather than throwing away then reparsing the LambdaIntroducer.
+ break;
- if (DiagID) {
- // If this can't be a lambda capture list, it's a designator.
- Tentative.Revert();
+ case LambdaIntroducerTentativeParse::MessageSend:
+ case LambdaIntroducerTentativeParse::Invalid:
+ // Can't be a lambda-expression. Treat it as a designator.
+ // FIXME: Should we disambiguate against a message-send?
return true;
}
@@ -83,11 +95,7 @@ bool Parser::MayBeDesignationStart() {
// lambda expression. This decision favors lambdas over the older
// GNU designator syntax, which allows one to omit the '=', but is
// consistent with GCC.
- tok::TokenKind Kind = Tok.getKind();
- // FIXME: If we didn't skip any inits, parse the lambda from here
- // rather than throwing away then reparsing the LambdaIntroducer.
- Tentative.Revert();
- return Kind == tok::equal;
+ return Tok.is(tok::equal);
}
static void CheckArrayDesignatorSyntax(Parser &P, SourceLocation Loc,
diff --git a/lib/Parse/ParseObjc.cpp b/lib/Parse/ParseObjc.cpp
index bd55f7179399..8937a0986c95 100644
--- a/lib/Parse/ParseObjc.cpp
+++ b/lib/Parse/ParseObjc.cpp
@@ -1,9 +1,8 @@
//===--- ParseObjC.cpp - Objective C Parsing ------------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -65,7 +64,7 @@ Parser::ParseObjCAtDirectives(ParsedAttributesWithRange &Attrs) {
case tok::objc_protocol:
return ParseObjCAtProtocolDeclaration(AtLoc, Attrs);
case tok::objc_implementation:
- return ParseObjCAtImplementationDeclaration(AtLoc);
+ return ParseObjCAtImplementationDeclaration(AtLoc, Attrs);
case tok::objc_end:
return ParseObjCAtEndDeclaration(AtLoc);
case tok::objc_compatibility_alias:
@@ -624,6 +623,8 @@ void Parser::ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
}
// Ignore excess semicolons.
if (Tok.is(tok::semi)) {
+ // FIXME: This should use ConsumeExtraSemi() for extraneous semicolons,
+ // to make -Wextra-semi diagnose them.
ConsumeToken();
continue;
}
@@ -647,7 +648,19 @@ void Parser::ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
// erroneous r_brace would cause an infinite loop if not handled here.
if (Tok.is(tok::r_brace))
break;
+
ParsedAttributesWithRange attrs(AttrFactory);
+
+ // Since we call ParseDeclarationOrFunctionDefinition() instead of
+ // ParseExternalDeclaration() below (so that this doesn't parse nested
+ // @interfaces), this needs to duplicate some code from the latter.
+ if (Tok.isOneOf(tok::kw_static_assert, tok::kw__Static_assert)) {
+ SourceLocation DeclEnd;
+ allTUVariables.push_back(
+ ParseDeclaration(DeclaratorContext::FileContext, DeclEnd, attrs));
+ continue;
+ }
+
allTUVariables.push_back(ParseDeclarationOrFunctionDefinition(attrs));
continue;
}
@@ -1234,11 +1247,11 @@ ParsedType Parser::ParseObjCTypeName(ObjCDeclSpec &DS,
BalancedDelimiterTracker T(*this, tok::l_paren);
T.consumeOpen();
- SourceLocation TypeStartLoc = Tok.getLocation();
ObjCDeclContextSwitch ObjCDC(*this);
// Parse type qualifiers, in, inout, etc.
ParseObjCTypeQualifierList(DS, context);
+ SourceLocation TypeStartLoc = Tok.getLocation();
ParsedType Ty;
if (isTypeSpecifierQualifier() || isObjCInstancetype()) {
@@ -1876,6 +1889,7 @@ void Parser::HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocatio
/// ';'
/// objc-instance-variable-decl-list objc-visibility-spec
/// objc-instance-variable-decl-list objc-instance-variable-decl ';'
+/// objc-instance-variable-decl-list static_assert-declaration
/// objc-instance-variable-decl-list ';'
///
/// objc-visibility-spec:
@@ -1929,7 +1943,7 @@ void Parser::ParseObjCClassInstanceVariables(Decl *interfaceDecl,
Tok.setLocation(Tok.getLocation().getLocWithOffset(-1));
Tok.setKind(tok::at);
Tok.setLength(1);
- PP.EnterToken(Tok);
+ PP.EnterToken(Tok, /*IsReinject*/true);
HelperActionsForIvarDeclarations(interfaceDecl, atLoc,
T, AllIvarDecls, true);
return;
@@ -1946,6 +1960,15 @@ void Parser::ParseObjCClassInstanceVariables(Decl *interfaceDecl,
return cutOffParsing();
}
+ // This needs to duplicate a small amount of code from
+ // ParseStructUnionBody() for things that should work in both
+ // C struct and in Objective-C class instance variables.
+ if (Tok.isOneOf(tok::kw_static_assert, tok::kw__Static_assert)) {
+ SourceLocation DeclEnd;
+ ParseStaticAssertDeclaration(DeclEnd);
+ continue;
+ }
+
auto ObjCIvarCallback = [&](ParsingFieldDeclarator &FD) {
Actions.ActOnObjCContainerStartDefinition(interfaceDecl);
// Install the declarator into the interface decl.
@@ -2074,7 +2097,8 @@ Parser::ParseObjCAtProtocolDeclaration(SourceLocation AtLoc,
/// objc-category-implementation-prologue:
/// @implementation identifier ( identifier )
Parser::DeclGroupPtrTy
-Parser::ParseObjCAtImplementationDeclaration(SourceLocation AtLoc) {
+Parser::ParseObjCAtImplementationDeclaration(SourceLocation AtLoc,
+ ParsedAttributes &Attrs) {
assert(Tok.isObjCAtKeyword(tok::objc_implementation) &&
"ParseObjCAtImplementationDeclaration(): Expected @implementation");
CheckNestedObjCContexts(AtLoc);
@@ -2151,8 +2175,7 @@ Parser::ParseObjCAtImplementationDeclaration(SourceLocation AtLoc) {
/*consumeLastToken=*/true);
}
ObjCImpDecl = Actions.ActOnStartCategoryImplementation(
- AtLoc, nameId, nameLoc, categoryId,
- categoryLoc);
+ AtLoc, nameId, nameLoc, categoryId, categoryLoc, Attrs);
} else {
// We have a class implementation
@@ -2166,8 +2189,7 @@ Parser::ParseObjCAtImplementationDeclaration(SourceLocation AtLoc) {
superClassLoc = ConsumeToken(); // Consume super class name
}
ObjCImpDecl = Actions.ActOnStartClassImplementation(
- AtLoc, nameId, nameLoc,
- superClassId, superClassLoc);
+ AtLoc, nameId, nameLoc, superClassId, superClassLoc, Attrs);
if (Tok.is(tok::l_brace)) // we have ivars
ParseObjCClassInstanceVariables(ObjCImpDecl, tok::objc_private, AtLoc);
@@ -2704,7 +2726,8 @@ Decl *Parser::ParseObjCMethodDefinition() {
return MDecl;
}
-StmtResult Parser::ParseObjCAtStatement(SourceLocation AtLoc) {
+StmtResult Parser::ParseObjCAtStatement(SourceLocation AtLoc,
+ ParsedStmtContext StmtCtx) {
if (Tok.is(tok::code_completion)) {
Actions.CodeCompleteObjCAtStatement(getCurScope());
cutOffParsing();
@@ -2741,7 +2764,7 @@ StmtResult Parser::ParseObjCAtStatement(SourceLocation AtLoc) {
// Otherwise, eat the semicolon.
ExpectAndConsumeSemi(diag::err_expected_semi_after_expr);
- return Actions.ActOnExprStmt(Res, isExprValueDiscarded());
+ return handleExprStmt(Res, StmtCtx);
}
ExprResult Parser::ParseObjCAtExpression(SourceLocation AtLoc) {
@@ -3171,15 +3194,15 @@ Parser::ParseObjCMessageExpressionBody(SourceLocation LBracLoc,
if (SuperLoc.isValid())
Actions.CodeCompleteObjCSuperMessage(getCurScope(), SuperLoc,
KeyIdents,
- /*AtArgumentEpression=*/true);
+ /*AtArgumentExpression=*/true);
else if (ReceiverType)
Actions.CodeCompleteObjCClassMessage(getCurScope(), ReceiverType,
KeyIdents,
- /*AtArgumentEpression=*/true);
+ /*AtArgumentExpression=*/true);
else
Actions.CodeCompleteObjCInstanceMessage(getCurScope(), ReceiverExpr,
KeyIdents,
- /*AtArgumentEpression=*/true);
+ /*AtArgumentExpression=*/true);
cutOffParsing();
return ExprError();
@@ -3209,15 +3232,15 @@ Parser::ParseObjCMessageExpressionBody(SourceLocation LBracLoc,
if (SuperLoc.isValid())
Actions.CodeCompleteObjCSuperMessage(getCurScope(), SuperLoc,
KeyIdents,
- /*AtArgumentEpression=*/false);
+ /*AtArgumentExpression=*/false);
else if (ReceiverType)
Actions.CodeCompleteObjCClassMessage(getCurScope(), ReceiverType,
KeyIdents,
- /*AtArgumentEpression=*/false);
+ /*AtArgumentExpression=*/false);
else
Actions.CodeCompleteObjCInstanceMessage(getCurScope(), ReceiverExpr,
KeyIdents,
- /*AtArgumentEpression=*/false);
+ /*AtArgumentExpression=*/false);
cutOffParsing();
return ExprError();
}
@@ -3631,7 +3654,7 @@ void Parser::ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod) {
// Append the current token at the end of the new token stream so that it
// doesn't get lost.
LM.Toks.push_back(Tok);
- PP.EnterTokenStream(LM.Toks, true);
+ PP.EnterTokenStream(LM.Toks, true, /*IsReinject*/true);
// Consume the previously pushed token.
ConsumeAnyToken(/*ConsumeCodeCompletionTok=*/true);
diff --git a/lib/Parse/ParseOpenMP.cpp b/lib/Parse/ParseOpenMP.cpp
index dd2a8aae9f2f..52a68f6d6935 100644
--- a/lib/Parse/ParseOpenMP.cpp
+++ b/lib/Parse/ParseOpenMP.cpp
@@ -1,9 +1,8 @@
//===--- ParseOpenMP.cpp - OpenMP directives parsing ----------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
@@ -41,18 +40,21 @@ enum OpenMPDirectiveKindEx {
OMPD_update,
OMPD_distribute_parallel,
OMPD_teams_distribute_parallel,
- OMPD_target_teams_distribute_parallel
+ OMPD_target_teams_distribute_parallel,
+ OMPD_mapper,
};
-class ThreadprivateListParserHelper final {
+class DeclDirectiveListParserHelper final {
SmallVector<Expr *, 4> Identifiers;
Parser *P;
+ OpenMPDirectiveKind Kind;
public:
- ThreadprivateListParserHelper(Parser *P) : P(P) {}
+ DeclDirectiveListParserHelper(Parser *P, OpenMPDirectiveKind Kind)
+ : P(P), Kind(Kind) {}
void operator()(CXXScopeSpec &SS, DeclarationNameInfo NameInfo) {
- ExprResult Res =
- P->getActions().ActOnOpenMPIdExpression(P->getCurScope(), SS, NameInfo);
+ ExprResult Res = P->getActions().ActOnOpenMPIdExpression(
+ P->getCurScope(), SS, NameInfo, Kind);
if (Res.isUsable())
Identifiers.push_back(Res.get());
}
@@ -77,6 +79,7 @@ static unsigned getOpenMPDirectiveKindEx(StringRef S) {
.Case("point", OMPD_point)
.Case("reduction", OMPD_reduction)
.Case("update", OMPD_update)
+ .Case("mapper", OMPD_mapper)
.Default(OMPD_unknown);
}
@@ -87,6 +90,7 @@ static OpenMPDirectiveKind parseOpenMPDirectiveKind(Parser &P) {
static const unsigned F[][3] = {
{OMPD_cancellation, OMPD_point, OMPD_cancellation_point},
{OMPD_declare, OMPD_reduction, OMPD_declare_reduction},
+ {OMPD_declare, OMPD_mapper, OMPD_declare_mapper},
{OMPD_declare, OMPD_simd, OMPD_declare_simd},
{OMPD_declare, OMPD_target, OMPD_declare_target},
{OMPD_distribute, OMPD_parallel, OMPD_distribute_parallel},
@@ -422,21 +426,19 @@ void Parser::ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm) {
CommaLocsTy CommaLocs;
SourceLocation LParLoc = T.getOpenLocation();
- if (ParseExpressionList(
- Exprs, CommaLocs, [this, OmpPrivParm, LParLoc, &Exprs] {
- QualType PreferredType = Actions.ProduceConstructorSignatureHelp(
- getCurScope(),
- OmpPrivParm->getType()->getCanonicalTypeInternal(),
- OmpPrivParm->getLocation(), Exprs, LParLoc);
- CalledSignatureHelp = true;
- Actions.CodeCompleteExpression(getCurScope(), PreferredType);
- })) {
- if (PP.isCodeCompletionReached() && !CalledSignatureHelp) {
- Actions.ProduceConstructorSignatureHelp(
- getCurScope(), OmpPrivParm->getType()->getCanonicalTypeInternal(),
- OmpPrivParm->getLocation(), Exprs, LParLoc);
- CalledSignatureHelp = true;
- }
+ auto RunSignatureHelp = [this, OmpPrivParm, LParLoc, &Exprs]() {
+ QualType PreferredType = Actions.ProduceConstructorSignatureHelp(
+ getCurScope(), OmpPrivParm->getType()->getCanonicalTypeInternal(),
+ OmpPrivParm->getLocation(), Exprs, LParLoc);
+ CalledSignatureHelp = true;
+ return PreferredType;
+ };
+ if (ParseExpressionList(Exprs, CommaLocs, [&] {
+ PreferredType.enterFunctionArgument(Tok.getLocation(),
+ RunSignatureHelp);
+ })) {
+ if (PP.isCodeCompletionReached() && !CalledSignatureHelp)
+ RunSignatureHelp();
Actions.ActOnInitializerError(OmpPrivParm);
SkipUntil(tok::r_paren, tok::annot_pragma_openmp_end, StopBeforeMatch);
} else {
@@ -470,6 +472,141 @@ void Parser::ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm) {
}
}
+/// Parses 'omp declare mapper' directive.
+///
+/// declare-mapper-directive:
+/// annot_pragma_openmp 'declare' 'mapper' '(' [<mapper-identifier> ':']
+/// <type> <var> ')' [<clause>[[,] <clause>] ... ]
+/// annot_pragma_openmp_end
+/// <mapper-identifier> and <var> are base language identifiers.
+///
+Parser::DeclGroupPtrTy
+Parser::ParseOpenMPDeclareMapperDirective(AccessSpecifier AS) {
+ bool IsCorrect = true;
+ // Parse '('
+ BalancedDelimiterTracker T(*this, tok::l_paren, tok::annot_pragma_openmp_end);
+ if (T.expectAndConsume(diag::err_expected_lparen_after,
+ getOpenMPDirectiveName(OMPD_declare_mapper))) {
+ SkipUntil(tok::annot_pragma_openmp_end, StopBeforeMatch);
+ return DeclGroupPtrTy();
+ }
+
+ // Parse <mapper-identifier>
+ auto &DeclNames = Actions.getASTContext().DeclarationNames;
+ DeclarationName MapperId;
+ if (PP.LookAhead(0).is(tok::colon)) {
+ if (Tok.isNot(tok::identifier) && Tok.isNot(tok::kw_default)) {
+ Diag(Tok.getLocation(), diag::err_omp_mapper_illegal_identifier);
+ IsCorrect = false;
+ } else {
+ MapperId = DeclNames.getIdentifier(Tok.getIdentifierInfo());
+ }
+ ConsumeToken();
+ // Consume ':'.
+ ExpectAndConsume(tok::colon);
+ } else {
+ // If no mapper identifier is provided, its name is "default" by default
+ MapperId =
+ DeclNames.getIdentifier(&Actions.getASTContext().Idents.get("default"));
+ }
+
+ if (!IsCorrect && Tok.is(tok::annot_pragma_openmp_end))
+ return DeclGroupPtrTy();
+
+ // Parse <type> <var>
+ DeclarationName VName;
+ QualType MapperType;
+ SourceRange Range;
+ TypeResult ParsedType = parseOpenMPDeclareMapperVarDecl(Range, VName, AS);
+ if (ParsedType.isUsable())
+ MapperType =
+ Actions.ActOnOpenMPDeclareMapperType(Range.getBegin(), ParsedType);
+ if (MapperType.isNull())
+ IsCorrect = false;
+ if (!IsCorrect) {
+ SkipUntil(tok::annot_pragma_openmp_end, Parser::StopBeforeMatch);
+ return DeclGroupPtrTy();
+ }
+
+ // Consume ')'.
+ IsCorrect &= !T.consumeClose();
+ if (!IsCorrect) {
+ SkipUntil(tok::annot_pragma_openmp_end, Parser::StopBeforeMatch);
+ return DeclGroupPtrTy();
+ }
+
+ // Enter scope.
+ OMPDeclareMapperDecl *DMD = Actions.ActOnOpenMPDeclareMapperDirectiveStart(
+ getCurScope(), Actions.getCurLexicalContext(), MapperId, MapperType,
+ Range.getBegin(), VName, AS);
+ DeclarationNameInfo DirName;
+ SourceLocation Loc = Tok.getLocation();
+ unsigned ScopeFlags = Scope::FnScope | Scope::DeclScope |
+ Scope::CompoundStmtScope | Scope::OpenMPDirectiveScope;
+ ParseScope OMPDirectiveScope(this, ScopeFlags);
+ Actions.StartOpenMPDSABlock(OMPD_declare_mapper, DirName, getCurScope(), Loc);
+
+ // Add the mapper variable declaration.
+ Actions.ActOnOpenMPDeclareMapperDirectiveVarDecl(
+ DMD, getCurScope(), MapperType, Range.getBegin(), VName);
+
+ // Parse map clauses.
+ SmallVector<OMPClause *, 6> Clauses;
+ while (Tok.isNot(tok::annot_pragma_openmp_end)) {
+ OpenMPClauseKind CKind = Tok.isAnnotation()
+ ? OMPC_unknown
+ : getOpenMPClauseKind(PP.getSpelling(Tok));
+ Actions.StartOpenMPClause(CKind);
+ OMPClause *Clause =
+ ParseOpenMPClause(OMPD_declare_mapper, CKind, Clauses.size() == 0);
+ if (Clause)
+ Clauses.push_back(Clause);
+ else
+ IsCorrect = false;
+ // Skip ',' if any.
+ if (Tok.is(tok::comma))
+ ConsumeToken();
+ Actions.EndOpenMPClause();
+ }
+ if (Clauses.empty()) {
+ Diag(Tok, diag::err_omp_expected_clause)
+ << getOpenMPDirectiveName(OMPD_declare_mapper);
+ IsCorrect = false;
+ }
+
+ // Exit scope.
+ Actions.EndOpenMPDSABlock(nullptr);
+ OMPDirectiveScope.Exit();
+
+ DeclGroupPtrTy DGP =
+ Actions.ActOnOpenMPDeclareMapperDirectiveEnd(DMD, getCurScope(), Clauses);
+ if (!IsCorrect)
+ return DeclGroupPtrTy();
+ return DGP;
+}
+
+TypeResult Parser::parseOpenMPDeclareMapperVarDecl(SourceRange &Range,
+ DeclarationName &Name,
+ AccessSpecifier AS) {
+ // Parse the common declaration-specifiers piece.
+ Parser::DeclSpecContext DSC = Parser::DeclSpecContext::DSC_type_specifier;
+ DeclSpec DS(AttrFactory);
+ ParseSpecifierQualifierList(DS, AS, DSC);
+
+ // Parse the declarator.
+ DeclaratorContext Context = DeclaratorContext::PrototypeContext;
+ Declarator DeclaratorInfo(DS, Context);
+ ParseDeclarator(DeclaratorInfo);
+ Range = DeclaratorInfo.getSourceRange();
+ if (DeclaratorInfo.getIdentifier() == nullptr) {
+ Diag(Tok.getLocation(), diag::err_omp_mapper_expected_declarator);
+ return true;
+ }
+ Name = Actions.GetNameForDeclarator(DeclaratorInfo).getName();
+
+ return Actions.ActOnOpenMPDeclareMapperVarDecl(getCurScope(), DeclaratorInfo);
+}
+
namespace {
/// RAII that recreates function context for correct parsing of clauses of
/// 'declare simd' construct.
@@ -610,8 +747,9 @@ static bool parseDeclareSimdClauses(
Parser::DeclGroupPtrTy
Parser::ParseOMPDeclareSimdClauses(Parser::DeclGroupPtrTy Ptr,
CachedTokens &Toks, SourceLocation Loc) {
- PP.EnterToken(Tok);
- PP.EnterTokenStream(Toks, /*DisableMacroExpansion=*/true);
+ PP.EnterToken(Tok, /*IsReinject*/ true);
+ PP.EnterTokenStream(Toks, /*DisableMacroExpansion=*/true,
+ /*IsReinject*/ true);
// Consume the previously pushed token.
ConsumeAnyToken(/*ConsumeCodeCompletionTok=*/true);
@@ -704,10 +842,19 @@ void Parser::ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind DKind,
/// annot_pragma_openmp 'threadprivate' simple-variable-list
/// annot_pragma_openmp_end
///
+/// allocate-directive:
+/// annot_pragma_openmp 'allocate' simple-variable-list [<clause>]
+/// annot_pragma_openmp_end
+///
/// declare-reduction-directive:
/// annot_pragma_openmp 'declare' 'reduction' [...]
/// annot_pragma_openmp_end
///
+/// declare-mapper-directive:
+/// annot_pragma_openmp 'declare' 'mapper' '(' [<mapper-identifer> ':']
+/// <type> <var> ')' [<clause>[[,] <clause>] ... ]
+/// annot_pragma_openmp_end
+///
/// declare-simd-directive:
/// annot_pragma_openmp 'declare simd' {<clause> [,]}
/// annot_pragma_openmp_end
@@ -729,13 +876,14 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
switch (DKind) {
case OMPD_threadprivate: {
ConsumeToken();
- ThreadprivateListParserHelper Helper(this);
- if (!ParseOpenMPSimpleVarList(OMPD_threadprivate, Helper, true)) {
+ DeclDirectiveListParserHelper Helper(this, DKind);
+ if (!ParseOpenMPSimpleVarList(DKind, Helper,
+ /*AllowScopeSpecifier=*/true)) {
// The last seen token is annot_pragma_openmp_end - need to check for
// extra tokens.
if (Tok.isNot(tok::annot_pragma_openmp_end)) {
Diag(Tok, diag::warn_omp_extra_tokens_at_eol)
- << getOpenMPDirectiveName(OMPD_threadprivate);
+ << getOpenMPDirectiveName(DKind);
SkipUntil(tok::annot_pragma_openmp_end, StopBeforeMatch);
}
// Skip the last annot_pragma_openmp_end.
@@ -745,13 +893,59 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
}
break;
}
+ case OMPD_allocate: {
+ ConsumeToken();
+ DeclDirectiveListParserHelper Helper(this, DKind);
+ if (!ParseOpenMPSimpleVarList(DKind, Helper,
+ /*AllowScopeSpecifier=*/true)) {
+ SmallVector<OMPClause *, 1> Clauses;
+ if (Tok.isNot(tok::annot_pragma_openmp_end)) {
+ SmallVector<llvm::PointerIntPair<OMPClause *, 1, bool>,
+ OMPC_unknown + 1>
+ FirstClauses(OMPC_unknown + 1);
+ while (Tok.isNot(tok::annot_pragma_openmp_end)) {
+ OpenMPClauseKind CKind =
+ Tok.isAnnotation() ? OMPC_unknown
+ : getOpenMPClauseKind(PP.getSpelling(Tok));
+ Actions.StartOpenMPClause(CKind);
+ OMPClause *Clause = ParseOpenMPClause(OMPD_allocate, CKind,
+ !FirstClauses[CKind].getInt());
+ SkipUntil(tok::comma, tok::identifier, tok::annot_pragma_openmp_end,
+ StopBeforeMatch);
+ FirstClauses[CKind].setInt(true);
+ if (Clause != nullptr)
+ Clauses.push_back(Clause);
+ if (Tok.is(tok::annot_pragma_openmp_end)) {
+ Actions.EndOpenMPClause();
+ break;
+ }
+ // Skip ',' if any.
+ if (Tok.is(tok::comma))
+ ConsumeToken();
+ Actions.EndOpenMPClause();
+ }
+ // The last seen token is annot_pragma_openmp_end - need to check for
+ // extra tokens.
+ if (Tok.isNot(tok::annot_pragma_openmp_end)) {
+ Diag(Tok, diag::warn_omp_extra_tokens_at_eol)
+ << getOpenMPDirectiveName(DKind);
+ SkipUntil(tok::annot_pragma_openmp_end, StopBeforeMatch);
+ }
+ }
+ // Skip the last annot_pragma_openmp_end.
+ ConsumeAnnotationToken();
+ return Actions.ActOnOpenMPAllocateDirective(Loc, Helper.getIdentifiers(),
+ Clauses);
+ }
+ break;
+ }
case OMPD_requires: {
SourceLocation StartLoc = ConsumeToken();
SmallVector<OMPClause *, 5> Clauses;
SmallVector<llvm::PointerIntPair<OMPClause *, 1, bool>, OMPC_unknown + 1>
FirstClauses(OMPC_unknown + 1);
if (Tok.is(tok::annot_pragma_openmp_end)) {
- Diag(Tok, diag::err_omp_expected_clause)
+ Diag(Tok, diag::err_omp_expected_clause)
<< getOpenMPDirectiveName(OMPD_requires);
break;
}
@@ -760,9 +954,10 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
? OMPC_unknown
: getOpenMPClauseKind(PP.getSpelling(Tok));
Actions.StartOpenMPClause(CKind);
- OMPClause *Clause =
- ParseOpenMPClause(OMPD_requires, CKind, !FirstClauses[CKind].getInt());
- SkipUntil(tok::comma, tok::identifier, tok::annot_pragma_openmp_end, StopBeforeMatch);
+ OMPClause *Clause = ParseOpenMPClause(OMPD_requires, CKind,
+ !FirstClauses[CKind].getInt());
+ SkipUntil(tok::comma, tok::identifier, tok::annot_pragma_openmp_end,
+ StopBeforeMatch);
FirstClauses[CKind].setInt(true);
if (Clause != nullptr)
Clauses.push_back(Clause);
@@ -801,6 +996,15 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
return Res;
}
break;
+ case OMPD_declare_mapper: {
+ ConsumeToken();
+ if (DeclGroupPtrTy Res = ParseOpenMPDeclareMapperDirective(AS)) {
+ // Skip the last annot_pragma_openmp_end.
+ ConsumeAnnotationToken();
+ return Res;
+ }
+ break;
+ }
case OMPD_declare_simd: {
// The syntax is:
// { #pragma omp declare simd }
@@ -949,12 +1153,21 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
/// annot_pragma_openmp 'threadprivate' simple-variable-list
/// annot_pragma_openmp_end
///
+/// allocate-directive:
+/// annot_pragma_openmp 'allocate' simple-variable-list
+/// annot_pragma_openmp_end
+///
/// declare-reduction-directive:
/// annot_pragma_openmp 'declare' 'reduction' '(' <reduction_id> ':'
/// <type> {',' <type>} ':' <expression> ')' ['initializer' '('
/// ('omp_priv' '=' <expression>|<function_call>) ')']
/// annot_pragma_openmp_end
///
+/// declare-mapper-directive:
+/// annot_pragma_openmp 'declare' 'mapper' '(' [<mapper-identifer> ':']
+/// <type> <var> ')' [<clause>[[,] <clause>] ... ]
+/// annot_pragma_openmp_end
+///
/// executable-directive:
/// annot_pragma_openmp 'parallel' | 'simd' | 'for' | 'sections' |
/// 'section' | 'single' | 'master' | 'critical' [ '(' <name> ')' ] |
@@ -976,8 +1189,8 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
/// 'target teams distribute simd' {clause}
/// annot_pragma_openmp_end
///
-StmtResult Parser::ParseOpenMPDeclarativeOrExecutableDirective(
- AllowedConstructsKind Allowed) {
+StmtResult
+Parser::ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx) {
assert(Tok.is(tok::annot_pragma_openmp) && "Not an OpenMP directive!");
ParenBraceBracketBalancer BalancerRAIIObj(*this);
SmallVector<OMPClause *, 5> Clauses;
@@ -996,18 +1209,21 @@ StmtResult Parser::ParseOpenMPDeclarativeOrExecutableDirective(
switch (DKind) {
case OMPD_threadprivate: {
- if (Allowed != ACK_Any) {
+ // FIXME: Should this be permitted in C++?
+ if ((StmtCtx & ParsedStmtContext::AllowDeclarationsInC) ==
+ ParsedStmtContext()) {
Diag(Tok, diag::err_omp_immediate_directive)
<< getOpenMPDirectiveName(DKind) << 0;
}
ConsumeToken();
- ThreadprivateListParserHelper Helper(this);
- if (!ParseOpenMPSimpleVarList(OMPD_threadprivate, Helper, false)) {
+ DeclDirectiveListParserHelper Helper(this, DKind);
+ if (!ParseOpenMPSimpleVarList(DKind, Helper,
+ /*AllowScopeSpecifier=*/false)) {
// The last seen token is annot_pragma_openmp_end - need to check for
// extra tokens.
if (Tok.isNot(tok::annot_pragma_openmp_end)) {
Diag(Tok, diag::warn_omp_extra_tokens_at_eol)
- << getOpenMPDirectiveName(OMPD_threadprivate);
+ << getOpenMPDirectiveName(DKind);
SkipUntil(tok::annot_pragma_openmp_end, StopBeforeMatch);
}
DeclGroupPtrTy Res = Actions.ActOnOpenMPThreadprivateDirective(
@@ -1017,6 +1233,58 @@ StmtResult Parser::ParseOpenMPDeclarativeOrExecutableDirective(
SkipUntil(tok::annot_pragma_openmp_end);
break;
}
+ case OMPD_allocate: {
+ // FIXME: Should this be permitted in C++?
+ if ((StmtCtx & ParsedStmtContext::AllowDeclarationsInC) ==
+ ParsedStmtContext()) {
+ Diag(Tok, diag::err_omp_immediate_directive)
+ << getOpenMPDirectiveName(DKind) << 0;
+ }
+ ConsumeToken();
+ DeclDirectiveListParserHelper Helper(this, DKind);
+ if (!ParseOpenMPSimpleVarList(DKind, Helper,
+ /*AllowScopeSpecifier=*/false)) {
+ SmallVector<OMPClause *, 1> Clauses;
+ if (Tok.isNot(tok::annot_pragma_openmp_end)) {
+ SmallVector<llvm::PointerIntPair<OMPClause *, 1, bool>,
+ OMPC_unknown + 1>
+ FirstClauses(OMPC_unknown + 1);
+ while (Tok.isNot(tok::annot_pragma_openmp_end)) {
+ OpenMPClauseKind CKind =
+ Tok.isAnnotation() ? OMPC_unknown
+ : getOpenMPClauseKind(PP.getSpelling(Tok));
+ Actions.StartOpenMPClause(CKind);
+ OMPClause *Clause = ParseOpenMPClause(OMPD_allocate, CKind,
+ !FirstClauses[CKind].getInt());
+ SkipUntil(tok::comma, tok::identifier, tok::annot_pragma_openmp_end,
+ StopBeforeMatch);
+ FirstClauses[CKind].setInt(true);
+ if (Clause != nullptr)
+ Clauses.push_back(Clause);
+ if (Tok.is(tok::annot_pragma_openmp_end)) {
+ Actions.EndOpenMPClause();
+ break;
+ }
+ // Skip ',' if any.
+ if (Tok.is(tok::comma))
+ ConsumeToken();
+ Actions.EndOpenMPClause();
+ }
+ // The last seen token is annot_pragma_openmp_end - need to check for
+ // extra tokens.
+ if (Tok.isNot(tok::annot_pragma_openmp_end)) {
+ Diag(Tok, diag::warn_omp_extra_tokens_at_eol)
+ << getOpenMPDirectiveName(DKind);
+ SkipUntil(tok::annot_pragma_openmp_end, StopBeforeMatch);
+ }
+ }
+ DeclGroupPtrTy Res = Actions.ActOnOpenMPAllocateDirective(
+ Loc, Helper.getIdentifiers(), Clauses);
+ Directive = Actions.ActOnDeclStmt(Res, Loc, Tok.getLocation());
+ }
+ SkipUntil(tok::annot_pragma_openmp_end);
+ break;
+ }
case OMPD_declare_reduction:
ConsumeToken();
if (DeclGroupPtrTy Res =
@@ -1035,12 +1303,24 @@ StmtResult Parser::ParseOpenMPDeclarativeOrExecutableDirective(
SkipUntil(tok::annot_pragma_openmp_end);
}
break;
+ case OMPD_declare_mapper: {
+ ConsumeToken();
+ if (DeclGroupPtrTy Res =
+ ParseOpenMPDeclareMapperDirective(/*AS=*/AS_none)) {
+ // Skip the last annot_pragma_openmp_end.
+ ConsumeAnnotationToken();
+ Directive = Actions.ActOnDeclStmt(Res, Loc, Tok.getLocation());
+ } else {
+ SkipUntil(tok::annot_pragma_openmp_end);
+ }
+ break;
+ }
case OMPD_flush:
if (PP.LookAhead(0).is(tok::l_paren)) {
FlushHasClause = true;
// Push copy of the current token back to stream to properly parse
// pseudo-clause OMPFlushClause.
- PP.EnterToken(Tok);
+ PP.EnterToken(Tok, /*IsReinject*/ true);
}
LLVM_FALLTHROUGH;
case OMPD_taskyield:
@@ -1051,7 +1331,8 @@ StmtResult Parser::ParseOpenMPDeclarativeOrExecutableDirective(
case OMPD_target_enter_data:
case OMPD_target_exit_data:
case OMPD_target_update:
- if (Allowed == ACK_StatementsOpenMPNonStandalone) {
+ if ((StmtCtx & ParsedStmtContext::AllowStandaloneOpenMPDirectives) ==
+ ParsedStmtContext()) {
Diag(Tok, diag::err_omp_immediate_directive)
<< getOpenMPDirectiveName(DKind) << 0;
}
@@ -1154,7 +1435,8 @@ StmtResult Parser::ParseOpenMPDeclarativeOrExecutableDirective(
// If the depend clause is specified, the ordered construct is a stand-alone
// directive.
if (DKind == OMPD_ordered && FirstClauses[OMPC_depend].getInt()) {
- if (Allowed == ACK_StatementsOpenMPNonStandalone) {
+ if ((StmtCtx & ParsedStmtContext::AllowStandaloneOpenMPDirectives) ==
+ ParsedStmtContext()) {
Diag(Loc, diag::err_omp_immediate_directive)
<< getOpenMPDirectiveName(DKind) << 1
<< getOpenMPClauseName(OMPC_depend);
@@ -1281,7 +1563,7 @@ bool Parser::ParseOpenMPSimpleVarList(
/// thread_limit-clause | priority-clause | grainsize-clause |
/// nogroup-clause | num_tasks-clause | hint-clause | to-clause |
/// from-clause | is_device_ptr-clause | task_reduction-clause |
-/// in_reduction-clause
+/// in_reduction-clause | allocator-clause | allocate-clause
///
OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind CKind, bool FirstClause) {
@@ -1310,6 +1592,7 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
case OMPC_grainsize:
case OMPC_num_tasks:
case OMPC_hint:
+ case OMPC_allocator:
// OpenMP [2.5, Restrictions]
// At most one num_threads clause can appear on the directive.
// OpenMP [2.8.1, simd construct, Restrictions]
@@ -1330,6 +1613,8 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
// At most one grainsize clause can appear on the directive.
// OpenMP [2.9.2, taskloop Construct, Restrictions]
// At most one num_tasks clause can appear on the directive.
+ // OpenMP [2.11.3, allocate Directive, Restrictions]
+ // At most one allocator clause can appear on the directive.
if (!FirstClause) {
Diag(Tok, diag::err_omp_more_one_clause)
<< getOpenMPDirectiveName(DKind) << getOpenMPClauseName(CKind) << 0;
@@ -1424,6 +1709,7 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
case OMPC_from:
case OMPC_use_device_ptr:
case OMPC_is_device_ptr:
+ case OMPC_allocate:
Clause = ParseOpenMPVarListClause(DKind, CKind, WrongDirective);
break;
case OMPC_unknown:
@@ -1496,6 +1782,9 @@ ExprResult Parser::ParseOpenMPParensExpr(StringRef ClauseName,
/// hint-clause:
/// 'hint' '(' expression ')'
///
+/// allocator-clause:
+/// 'allocator' '(' expression ')'
+///
OMPClause *Parser::ParseOpenMPSingleExprClause(OpenMPClauseKind Kind,
bool ParseOnly) {
SourceLocation Loc = ConsumeToken();
@@ -1787,38 +2076,70 @@ static OpenMPMapModifierKind isMapModifier(Parser &P) {
return TypeModifier;
}
+/// Parse the mapper modifier in map, to, and from clauses.
+bool Parser::parseMapperModifier(OpenMPVarListDataTy &Data) {
+ // Parse '('.
+ BalancedDelimiterTracker T(*this, tok::l_paren, tok::colon);
+ if (T.expectAndConsume(diag::err_expected_lparen_after, "mapper")) {
+ SkipUntil(tok::colon, tok::r_paren, tok::annot_pragma_openmp_end,
+ StopBeforeMatch);
+ return true;
+ }
+ // Parse mapper-identifier
+ if (getLangOpts().CPlusPlus)
+ ParseOptionalCXXScopeSpecifier(Data.ReductionOrMapperIdScopeSpec,
+ /*ObjectType=*/nullptr,
+ /*EnteringContext=*/false);
+ if (Tok.isNot(tok::identifier) && Tok.isNot(tok::kw_default)) {
+ Diag(Tok.getLocation(), diag::err_omp_mapper_illegal_identifier);
+ SkipUntil(tok::colon, tok::r_paren, tok::annot_pragma_openmp_end,
+ StopBeforeMatch);
+ return true;
+ }
+ auto &DeclNames = Actions.getASTContext().DeclarationNames;
+ Data.ReductionOrMapperId = DeclarationNameInfo(
+ DeclNames.getIdentifier(Tok.getIdentifierInfo()), Tok.getLocation());
+ ConsumeToken();
+ // Parse ')'.
+ return T.consumeClose();
+}
+
/// Parse map-type-modifiers in map clause.
/// map([ [map-type-modifier[,] [map-type-modifier[,] ...] map-type : ] list)
-/// where, map-type-modifier ::= always | close
-static void parseMapTypeModifiers(Parser &P,
- Parser::OpenMPVarListDataTy &Data) {
- Preprocessor &PP = P.getPreprocessor();
- while (P.getCurToken().isNot(tok::colon)) {
- Token Tok = P.getCurToken();
- OpenMPMapModifierKind TypeModifier = isMapModifier(P);
+/// where, map-type-modifier ::= always | close | mapper(mapper-identifier)
+bool Parser::parseMapTypeModifiers(OpenMPVarListDataTy &Data) {
+ while (getCurToken().isNot(tok::colon)) {
+ OpenMPMapModifierKind TypeModifier = isMapModifier(*this);
if (TypeModifier == OMPC_MAP_MODIFIER_always ||
TypeModifier == OMPC_MAP_MODIFIER_close) {
Data.MapTypeModifiers.push_back(TypeModifier);
Data.MapTypeModifiersLoc.push_back(Tok.getLocation());
- P.ConsumeToken();
+ ConsumeToken();
+ } else if (TypeModifier == OMPC_MAP_MODIFIER_mapper) {
+ Data.MapTypeModifiers.push_back(TypeModifier);
+ Data.MapTypeModifiersLoc.push_back(Tok.getLocation());
+ ConsumeToken();
+ if (parseMapperModifier(Data))
+ return true;
} else {
// For the case of unknown map-type-modifier or a map-type.
// Map-type is followed by a colon; the function returns when it
// encounters a token followed by a colon.
if (Tok.is(tok::comma)) {
- P.Diag(Tok, diag::err_omp_map_type_modifier_missing);
- P.ConsumeToken();
+ Diag(Tok, diag::err_omp_map_type_modifier_missing);
+ ConsumeToken();
continue;
}
// Potential map-type token as it is followed by a colon.
if (PP.LookAhead(0).is(tok::colon))
- return;
- P.Diag(Tok, diag::err_omp_unknown_map_type_modifier);
- P.ConsumeToken();
+ return false;
+ Diag(Tok, diag::err_omp_unknown_map_type_modifier);
+ ConsumeToken();
}
- if (P.getCurToken().is(tok::comma))
- P.ConsumeToken();
+ if (getCurToken().is(tok::comma))
+ ConsumeToken();
}
+ return false;
}
/// Checks if the token is a valid map-type.
@@ -1835,7 +2156,7 @@ static OpenMPMapClauseKind isMapType(Parser &P) {
/// Parse map-type in map clause.
/// map([ [map-type-modifier[,] [map-type-modifier[,] ...] map-type : ] list)
-/// where, map-type ::= to | from | tofrom | alloc | release | delete
+/// where, map-type ::= to | from | tofrom | alloc | release | delete
static void parseMapType(Parser &P, Parser::OpenMPVarListDataTy &Data) {
Token Tok = P.getCurToken();
if (Tok.is(tok::colon)) {
@@ -1855,6 +2176,7 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
OpenMPVarListDataTy &Data) {
UnqualifiedId UnqualifiedReductionId;
bool InvalidReductionId = false;
+ bool IsInvalidMapperModifier = false;
// Parse '('.
BalancedDelimiterTracker T(*this, tok::l_paren, tok::annot_pragma_openmp_end);
@@ -1870,11 +2192,11 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
Kind == OMPC_in_reduction) {
ColonProtectionRAIIObject ColonRAII(*this);
if (getLangOpts().CPlusPlus)
- ParseOptionalCXXScopeSpecifier(Data.ReductionIdScopeSpec,
+ ParseOptionalCXXScopeSpecifier(Data.ReductionOrMapperIdScopeSpec,
/*ObjectType=*/nullptr,
/*EnteringContext=*/false);
- InvalidReductionId = ParseReductionId(*this, Data.ReductionIdScopeSpec,
- UnqualifiedReductionId);
+ InvalidReductionId = ParseReductionId(
+ *this, Data.ReductionOrMapperIdScopeSpec, UnqualifiedReductionId);
if (InvalidReductionId) {
SkipUntil(tok::colon, tok::r_paren, tok::annot_pragma_openmp_end,
StopBeforeMatch);
@@ -1884,7 +2206,7 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
else
Diag(Tok, diag::warn_pragma_expected_colon) << "reduction identifier";
if (!InvalidReductionId)
- Data.ReductionId =
+ Data.ReductionOrMapperId =
Actions.GetNameFromUnqualifiedId(UnqualifiedReductionId);
} else if (Kind == OMPC_depend) {
// Handle dependency type for depend clause.
@@ -1943,8 +2265,11 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
// Only parse map-type-modifier[s] and map-type if a colon is present in
// the map clause.
if (ColonPresent) {
- parseMapTypeModifiers(*this, Data);
- parseMapType(*this, Data);
+ IsInvalidMapperModifier = parseMapTypeModifiers(Data);
+ if (!IsInvalidMapperModifier)
+ parseMapType(*this, Data);
+ else
+ SkipUntil(tok::colon, tok::annot_pragma_openmp_end, StopBeforeMatch);
}
if (Data.MapType == OMPC_MAP_unknown) {
Data.MapType = OMPC_MAP_tofrom;
@@ -1953,6 +2278,60 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
if (Tok.is(tok::colon))
Data.ColonLoc = ConsumeToken();
+ } else if (Kind == OMPC_to || Kind == OMPC_from) {
+ if (Tok.is(tok::identifier)) {
+ bool IsMapperModifier = false;
+ if (Kind == OMPC_to) {
+ auto Modifier = static_cast<OpenMPToModifierKind>(
+ getOpenMPSimpleClauseType(Kind, PP.getSpelling(Tok)));
+ if (Modifier == OMPC_TO_MODIFIER_mapper)
+ IsMapperModifier = true;
+ } else {
+ auto Modifier = static_cast<OpenMPFromModifierKind>(
+ getOpenMPSimpleClauseType(Kind, PP.getSpelling(Tok)));
+ if (Modifier == OMPC_FROM_MODIFIER_mapper)
+ IsMapperModifier = true;
+ }
+ if (IsMapperModifier) {
+ // Parse the mapper modifier.
+ ConsumeToken();
+ IsInvalidMapperModifier = parseMapperModifier(Data);
+ if (Tok.isNot(tok::colon)) {
+ if (!IsInvalidMapperModifier)
+ Diag(Tok, diag::warn_pragma_expected_colon) << ")";
+ SkipUntil(tok::colon, tok::r_paren, tok::annot_pragma_openmp_end,
+ StopBeforeMatch);
+ }
+ // Consume ':'.
+ if (Tok.is(tok::colon))
+ ConsumeToken();
+ }
+ }
+ } else if (Kind == OMPC_allocate) {
+ // Handle optional allocator expression followed by colon delimiter.
+ ColonProtectionRAIIObject ColonRAII(*this);
+ TentativeParsingAction TPA(*this);
+ ExprResult Tail =
+ Actions.CorrectDelayedTyposInExpr(ParseAssignmentExpression());
+ Tail = Actions.ActOnFinishFullExpr(Tail.get(), T.getOpenLocation(),
+ /*DiscardedValue=*/false);
+ if (Tail.isUsable()) {
+ if (Tok.is(tok::colon)) {
+ Data.TailExpr = Tail.get();
+ Data.ColonLoc = ConsumeToken();
+ TPA.Commit();
+ } else {
+ // colon not found, no allocator specified, parse only list of
+ // variables.
+ TPA.Revert();
+ }
+ } else {
+ // Parsing was unsuccessfull, revert and skip to the end of clause or
+ // directive.
+ TPA.Revert();
+ SkipUntil(tok::comma, tok::r_paren, tok::annot_pragma_openmp_end,
+ StopBeforeMatch);
+ }
}
bool IsComma =
@@ -2013,7 +2392,8 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
return (Kind == OMPC_depend && Data.DepKind != OMPC_DEPEND_unknown &&
Vars.empty()) ||
(Kind != OMPC_depend && Kind != OMPC_map && Vars.empty()) ||
- (MustHaveTail && !Data.TailExpr) || InvalidReductionId;
+ (MustHaveTail && !Data.TailExpr) || InvalidReductionId ||
+ IsInvalidMapperModifier;
}
/// Parsing of OpenMP clause 'private', 'firstprivate', 'lastprivate',
@@ -2046,15 +2426,18 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
/// 'depend' '(' in | out | inout : list | source ')'
/// map-clause:
/// 'map' '(' [ [ always [,] ] [ close [,] ]
+/// [ mapper '(' mapper-identifier ')' [,] ]
/// to | from | tofrom | alloc | release | delete ':' ] list ')';
/// to-clause:
-/// 'to' '(' list ')'
+/// 'to' '(' [ mapper '(' mapper-identifier ')' ':' ] list ')'
/// from-clause:
-/// 'from' '(' list ')'
+/// 'from' '(' [ mapper '(' mapper-identifier ')' ':' ] list ')'
/// use_device_ptr-clause:
/// 'use_device_ptr' '(' list ')'
/// is_device_ptr-clause:
/// 'is_device_ptr' '(' list ')'
+/// allocate-clause:
+/// 'allocate' '(' [ allocator ':' ] list ')'
///
/// For 'linear' clause linear-list may have the following forms:
/// list
@@ -2073,10 +2456,11 @@ OMPClause *Parser::ParseOpenMPVarListClause(OpenMPDirectiveKind DKind,
if (ParseOnly)
return nullptr;
+ OMPVarListLocTy Locs(Loc, LOpen, Data.RLoc);
return Actions.ActOnOpenMPVarListClause(
- Kind, Vars, Data.TailExpr, Loc, LOpen, Data.ColonLoc, Data.RLoc,
- Data.ReductionIdScopeSpec, Data.ReductionId, Data.DepKind, Data.LinKind,
- Data.MapTypeModifiers, Data.MapTypeModifiersLoc, Data.MapType,
- Data.IsMapTypeImplicit, Data.DepLinMapLoc);
+ Kind, Vars, Data.TailExpr, Locs, Data.ColonLoc,
+ Data.ReductionOrMapperIdScopeSpec, Data.ReductionOrMapperId, Data.DepKind,
+ Data.LinKind, Data.MapTypeModifiers, Data.MapTypeModifiersLoc,
+ Data.MapType, Data.IsMapTypeImplicit, Data.DepLinMapLoc);
}
diff --git a/lib/Parse/ParsePragma.cpp b/lib/Parse/ParsePragma.cpp
index 380eb64997a7..f81ecc738c28 100644
--- a/lib/Parse/ParsePragma.cpp
+++ b/lib/Parse/ParsePragma.cpp
@@ -1,9 +1,8 @@
//===--- ParsePragma.cpp - Language specific pragma parsing ---------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -27,71 +26,72 @@ namespace {
struct PragmaAlignHandler : public PragmaHandler {
explicit PragmaAlignHandler() : PragmaHandler("align") {}
- void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
Token &FirstToken) override;
};
struct PragmaGCCVisibilityHandler : public PragmaHandler {
explicit PragmaGCCVisibilityHandler() : PragmaHandler("visibility") {}
- void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
Token &FirstToken) override;
};
struct PragmaOptionsHandler : public PragmaHandler {
explicit PragmaOptionsHandler() : PragmaHandler("options") {}
- void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
Token &FirstToken) override;
};
struct PragmaPackHandler : public PragmaHandler {
explicit PragmaPackHandler() : PragmaHandler("pack") {}
- void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
Token &FirstToken) override;
};
struct PragmaClangSectionHandler : public PragmaHandler {
explicit PragmaClangSectionHandler(Sema &S)
: PragmaHandler("section"), Actions(S) {}
- void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
Token &FirstToken) override;
+
private:
Sema &Actions;
};
struct PragmaMSStructHandler : public PragmaHandler {
explicit PragmaMSStructHandler() : PragmaHandler("ms_struct") {}
- void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
Token &FirstToken) override;
};
struct PragmaUnusedHandler : public PragmaHandler {
PragmaUnusedHandler() : PragmaHandler("unused") {}
- void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
Token &FirstToken) override;
};
struct PragmaWeakHandler : public PragmaHandler {
explicit PragmaWeakHandler() : PragmaHandler("weak") {}
- void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
Token &FirstToken) override;
};
struct PragmaRedefineExtnameHandler : public PragmaHandler {
explicit PragmaRedefineExtnameHandler() : PragmaHandler("redefine_extname") {}
- void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
Token &FirstToken) override;
};
struct PragmaOpenCLExtensionHandler : public PragmaHandler {
PragmaOpenCLExtensionHandler() : PragmaHandler("EXTENSION") {}
- void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
Token &FirstToken) override;
};
struct PragmaFPContractHandler : public PragmaHandler {
PragmaFPContractHandler() : PragmaHandler("FP_CONTRACT") {}
- void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
Token &FirstToken) override;
};
@@ -101,7 +101,7 @@ struct PragmaFPContractHandler : public PragmaHandler {
struct PragmaSTDC_FENV_ACCESSHandler : public PragmaHandler {
PragmaSTDC_FENV_ACCESSHandler() : PragmaHandler("FENV_ACCESS") {}
- void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
Token &Tok) override {
tok::OnOffSwitch OOS;
if (PP.LexOnOffSwitch(OOS))
@@ -118,7 +118,8 @@ struct PragmaSTDC_FENV_ACCESSHandler : public PragmaHandler {
Toks[0].setAnnotationEndLoc(Tok.getLocation());
Toks[0].setAnnotationValue(reinterpret_cast<void*>(
static_cast<uintptr_t>(OOS)));
- PP.EnterTokenStream(Toks, /*DisableMacroExpansion=*/true);
+ PP.EnterTokenStream(Toks, /*DisableMacroExpansion=*/true,
+ /*IsReinject=*/false);
}
};
@@ -126,7 +127,7 @@ struct PragmaSTDC_FENV_ACCESSHandler : public PragmaHandler {
struct PragmaSTDC_CX_LIMITED_RANGEHandler : public PragmaHandler {
PragmaSTDC_CX_LIMITED_RANGEHandler() : PragmaHandler("CX_LIMITED_RANGE") {}
- void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
Token &Tok) override {
tok::OnOffSwitch OOS;
PP.LexOnOffSwitch(OOS);
@@ -137,7 +138,7 @@ struct PragmaSTDC_CX_LIMITED_RANGEHandler : public PragmaHandler {
struct PragmaSTDC_UnknownHandler : public PragmaHandler {
PragmaSTDC_UnknownHandler() = default;
- void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
Token &UnknownTok) override {
// C99 6.10.6p2, unknown forms are not allowed.
PP.Diag(UnknownTok, diag::ext_stdc_pragma_ignored);
@@ -146,19 +147,19 @@ struct PragmaSTDC_UnknownHandler : public PragmaHandler {
struct PragmaFPHandler : public PragmaHandler {
PragmaFPHandler() : PragmaHandler("fp") {}
- void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
Token &FirstToken) override;
};
struct PragmaNoOpenMPHandler : public PragmaHandler {
PragmaNoOpenMPHandler() : PragmaHandler("omp") { }
- void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
Token &FirstToken) override;
};
struct PragmaOpenMPHandler : public PragmaHandler {
PragmaOpenMPHandler() : PragmaHandler("omp") { }
- void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
Token &FirstToken) override;
};
@@ -166,8 +167,9 @@ struct PragmaOpenMPHandler : public PragmaHandler {
struct PragmaCommentHandler : public PragmaHandler {
PragmaCommentHandler(Sema &Actions)
: PragmaHandler("comment"), Actions(Actions) {}
- void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
Token &FirstToken) override;
+
private:
Sema &Actions;
};
@@ -175,27 +177,28 @@ private:
struct PragmaDetectMismatchHandler : public PragmaHandler {
PragmaDetectMismatchHandler(Sema &Actions)
: PragmaHandler("detect_mismatch"), Actions(Actions) {}
- void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
Token &FirstToken) override;
+
private:
Sema &Actions;
};
struct PragmaMSPointersToMembers : public PragmaHandler {
explicit PragmaMSPointersToMembers() : PragmaHandler("pointers_to_members") {}
- void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
Token &FirstToken) override;
};
struct PragmaMSVtorDisp : public PragmaHandler {
explicit PragmaMSVtorDisp() : PragmaHandler("vtordisp") {}
- void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
Token &FirstToken) override;
};
struct PragmaMSPragma : public PragmaHandler {
explicit PragmaMSPragma(const char *name) : PragmaHandler(name) {}
- void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
Token &FirstToken) override;
};
@@ -203,21 +206,22 @@ struct PragmaMSPragma : public PragmaHandler {
struct PragmaOptimizeHandler : public PragmaHandler {
PragmaOptimizeHandler(Sema &S)
: PragmaHandler("optimize"), Actions(S) {}
- void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
Token &FirstToken) override;
+
private:
Sema &Actions;
};
struct PragmaLoopHintHandler : public PragmaHandler {
PragmaLoopHintHandler() : PragmaHandler("loop") {}
- void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
Token &FirstToken) override;
};
struct PragmaUnrollHintHandler : public PragmaHandler {
PragmaUnrollHintHandler(const char *name) : PragmaHandler(name) {}
- void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
Token &FirstToken) override;
};
@@ -227,20 +231,20 @@ struct PragmaMSRuntimeChecksHandler : public EmptyPragmaHandler {
struct PragmaMSIntrinsicHandler : public PragmaHandler {
PragmaMSIntrinsicHandler() : PragmaHandler("intrinsic") {}
- void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
Token &FirstToken) override;
};
struct PragmaMSOptimizeHandler : public PragmaHandler {
PragmaMSOptimizeHandler() : PragmaHandler("optimize") {}
- void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
Token &FirstToken) override;
};
struct PragmaForceCUDAHostDeviceHandler : public PragmaHandler {
PragmaForceCUDAHostDeviceHandler(Sema &Actions)
: PragmaHandler("force_cuda_host_device"), Actions(Actions) {}
- void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
Token &FirstToken) override;
private:
@@ -251,7 +255,7 @@ private:
struct PragmaAttributeHandler : public PragmaHandler {
PragmaAttributeHandler(AttributeFactory &AttrFactory)
: PragmaHandler("attribute"), AttributesForPragmaAttribute(AttrFactory) {}
- void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
Token &FirstToken) override;
/// A pool of attributes that were parsed in \#pragma clang attribute.
@@ -693,13 +697,12 @@ void Parser::HandlePragmaOpenCLExtension() {
if (Name == "all") {
if (State == Disable) {
Opt.disableAll();
- Opt.enableSupportedCore(getLangOpts().OpenCLVersion);
+ Opt.enableSupportedCore(getLangOpts());
} else {
PP.Diag(NameLoc, diag::warn_pragma_expected_predicate) << 1;
}
} else if (State == Begin) {
- if (!Opt.isKnown(Name) ||
- !Opt.isSupported(Name, getLangOpts().OpenCLVersion)) {
+ if (!Opt.isKnown(Name) || !Opt.isSupported(Name, getLangOpts())) {
Opt.support(Name);
}
Actions.setCurrentOpenCLExtension(Name);
@@ -709,9 +712,9 @@ void Parser::HandlePragmaOpenCLExtension() {
Actions.setCurrentOpenCLExtension("");
} else if (!Opt.isKnown(Name))
PP.Diag(NameLoc, diag::warn_pragma_unknown_extension) << Ident;
- else if (Opt.isSupportedExtension(Name, getLangOpts().OpenCLVersion))
+ else if (Opt.isSupportedExtension(Name, getLangOpts()))
Opt.enable(Name, State == Enable);
- else if (Opt.isSupportedCore(Name, getLangOpts().OpenCLVersion))
+ else if (Opt.isSupportedCore(Name, getLangOpts()))
PP.Diag(NameLoc, diag::warn_pragma_extension_is_core) << Ident;
else
PP.Diag(NameLoc, diag::warn_pragma_unsupported_extension) << Ident;
@@ -741,7 +744,8 @@ void Parser::HandlePragmaMSPragma() {
// Grab the tokens out of the annotation and enter them into the stream.
auto TheTokens =
(std::pair<std::unique_ptr<Token[]>, size_t> *)Tok.getAnnotationValue();
- PP.EnterTokenStream(std::move(TheTokens->first), TheTokens->second, true);
+ PP.EnterTokenStream(std::move(TheTokens->first), TheTokens->second, true,
+ /*IsReinject=*/true);
SourceLocation PragmaLocation = ConsumeAnnotationToken();
assert(Tok.isAnyIdentifier());
StringRef PragmaName = Tok.getIdentifierInfo()->getName();
@@ -1113,7 +1117,8 @@ bool Parser::HandlePragmaLoopHint(LoopHint &Hint) {
Hint.StateLoc = IdentifierLoc::create(Actions.Context, StateLoc, StateInfo);
} else {
// Enter constant expression including eof terminator into token stream.
- PP.EnterTokenStream(Toks, /*DisableMacroExpansion=*/false);
+ PP.EnterTokenStream(Toks, /*DisableMacroExpansion=*/false,
+ /*IsReinject=*/false);
ConsumeAnnotationToken();
ExprResult R = ParseConstantExpression();
@@ -1416,7 +1421,8 @@ void Parser::HandlePragmaAttribute() {
return;
}
- PP.EnterTokenStream(Info->Tokens, /*DisableMacroExpansion=*/false);
+ PP.EnterTokenStream(Info->Tokens, /*DisableMacroExpansion=*/false,
+ /*IsReinject=*/false);
ConsumeAnnotationToken();
ParsedAttributes &Attrs = Info->Attributes;
@@ -1574,7 +1580,7 @@ void Parser::HandlePragmaAttribute() {
// 'push' '(' [visibility] ')'
// 'pop'
void PragmaGCCVisibilityHandler::HandlePragma(Preprocessor &PP,
- PragmaIntroducerKind Introducer,
+ PragmaIntroducer Introducer,
Token &VisTok) {
SourceLocation VisLoc = VisTok.getLocation();
@@ -1625,8 +1631,9 @@ void PragmaGCCVisibilityHandler::HandlePragma(Preprocessor &PP,
Toks[0].setLocation(VisLoc);
Toks[0].setAnnotationEndLoc(EndLoc);
Toks[0].setAnnotationValue(
- const_cast<void*>(static_cast<const void*>(VisType)));
- PP.EnterTokenStream(std::move(Toks), 1, /*DisableMacroExpansion=*/true);
+ const_cast<void *>(static_cast<const void *>(VisType)));
+ PP.EnterTokenStream(std::move(Toks), 1, /*DisableMacroExpansion=*/true,
+ /*IsReinject=*/false);
}
// #pragma pack(...) comes in the following delicious flavors:
@@ -1634,7 +1641,7 @@ void PragmaGCCVisibilityHandler::HandlePragma(Preprocessor &PP,
// pack '(' 'show' ')'
// pack '(' ('push' | 'pop') [',' identifier] [, integer] ')'
void PragmaPackHandler::HandlePragma(Preprocessor &PP,
- PragmaIntroducerKind Introducer,
+ PragmaIntroducer Introducer,
Token &PackTok) {
SourceLocation PackLoc = PackTok.getLocation();
@@ -1739,13 +1746,14 @@ void PragmaPackHandler::HandlePragma(Preprocessor &PP,
Toks[0].setLocation(PackLoc);
Toks[0].setAnnotationEndLoc(RParenLoc);
Toks[0].setAnnotationValue(static_cast<void*>(Info));
- PP.EnterTokenStream(Toks, /*DisableMacroExpansion=*/true);
+ PP.EnterTokenStream(Toks, /*DisableMacroExpansion=*/true,
+ /*IsReinject=*/false);
}
// #pragma ms_struct on
// #pragma ms_struct off
void PragmaMSStructHandler::HandlePragma(Preprocessor &PP,
- PragmaIntroducerKind Introducer,
+ PragmaIntroducer Introducer,
Token &MSStructTok) {
PragmaMSStructKind Kind = PMSST_OFF;
@@ -1782,12 +1790,14 @@ void PragmaMSStructHandler::HandlePragma(Preprocessor &PP,
Toks[0].setAnnotationEndLoc(EndLoc);
Toks[0].setAnnotationValue(reinterpret_cast<void*>(
static_cast<uintptr_t>(Kind)));
- PP.EnterTokenStream(Toks, /*DisableMacroExpansion=*/true);
+ PP.EnterTokenStream(Toks, /*DisableMacroExpansion=*/true,
+ /*IsReinject=*/false);
}
// #pragma clang section bss="abc" data="" rodata="def" text=""
void PragmaClangSectionHandler::HandlePragma(Preprocessor &PP,
- PragmaIntroducerKind Introducer, Token &FirstToken) {
+ PragmaIntroducer Introducer,
+ Token &FirstToken) {
Token Tok;
auto SecKind = Sema::PragmaClangSectionKind::PCSK_Invalid;
@@ -1895,24 +1905,25 @@ static void ParseAlignPragma(Preprocessor &PP, Token &FirstTok,
Toks[0].setAnnotationEndLoc(EndLoc);
Toks[0].setAnnotationValue(reinterpret_cast<void*>(
static_cast<uintptr_t>(Kind)));
- PP.EnterTokenStream(Toks, /*DisableMacroExpansion=*/true);
+ PP.EnterTokenStream(Toks, /*DisableMacroExpansion=*/true,
+ /*IsReinject=*/false);
}
void PragmaAlignHandler::HandlePragma(Preprocessor &PP,
- PragmaIntroducerKind Introducer,
+ PragmaIntroducer Introducer,
Token &AlignTok) {
ParseAlignPragma(PP, AlignTok, /*IsOptions=*/false);
}
void PragmaOptionsHandler::HandlePragma(Preprocessor &PP,
- PragmaIntroducerKind Introducer,
+ PragmaIntroducer Introducer,
Token &OptionsTok) {
ParseAlignPragma(PP, OptionsTok, /*IsOptions=*/true);
}
// #pragma unused(identifier)
void PragmaUnusedHandler::HandlePragma(Preprocessor &PP,
- PragmaIntroducerKind Introducer,
+ PragmaIntroducer Introducer,
Token &UnusedTok) {
// FIXME: Should we be expanding macros here? My guess is no.
SourceLocation UnusedLoc = UnusedTok.getLocation();
@@ -1987,13 +1998,14 @@ void PragmaUnusedHandler::HandlePragma(Preprocessor &PP,
pragmaUnusedTok.setLocation(UnusedLoc);
idTok = Identifiers[i];
}
- PP.EnterTokenStream(Toks, /*DisableMacroExpansion=*/true);
+ PP.EnterTokenStream(Toks, /*DisableMacroExpansion=*/true,
+ /*IsReinject=*/false);
}
// #pragma weak identifier
// #pragma weak identifier '=' identifier
void PragmaWeakHandler::HandlePragma(Preprocessor &PP,
- PragmaIntroducerKind Introducer,
+ PragmaIntroducer Introducer,
Token &WeakTok) {
SourceLocation WeakLoc = WeakTok.getLocation();
@@ -2036,7 +2048,8 @@ void PragmaWeakHandler::HandlePragma(Preprocessor &PP,
pragmaUnusedTok.setAnnotationEndLoc(AliasName.getLocation());
Toks[1] = WeakName;
Toks[2] = AliasName;
- PP.EnterTokenStream(Toks, /*DisableMacroExpansion=*/true);
+ PP.EnterTokenStream(Toks, /*DisableMacroExpansion=*/true,
+ /*IsReinject=*/false);
} else {
MutableArrayRef<Token> Toks(
PP.getPreprocessorAllocator().Allocate<Token>(2), 2);
@@ -2046,13 +2059,14 @@ void PragmaWeakHandler::HandlePragma(Preprocessor &PP,
pragmaUnusedTok.setLocation(WeakLoc);
pragmaUnusedTok.setAnnotationEndLoc(WeakLoc);
Toks[1] = WeakName;
- PP.EnterTokenStream(Toks, /*DisableMacroExpansion=*/true);
+ PP.EnterTokenStream(Toks, /*DisableMacroExpansion=*/true,
+ /*IsReinject=*/false);
}
}
// #pragma redefine_extname identifier identifier
void PragmaRedefineExtnameHandler::HandlePragma(Preprocessor &PP,
- PragmaIntroducerKind Introducer,
+ PragmaIntroducer Introducer,
Token &RedefToken) {
SourceLocation RedefLoc = RedefToken.getLocation();
@@ -2091,14 +2105,13 @@ void PragmaRedefineExtnameHandler::HandlePragma(Preprocessor &PP,
pragmaRedefTok.setAnnotationEndLoc(AliasName.getLocation());
Toks[1] = RedefName;
Toks[2] = AliasName;
- PP.EnterTokenStream(Toks, /*DisableMacroExpansion=*/true);
+ PP.EnterTokenStream(Toks, /*DisableMacroExpansion=*/true,
+ /*IsReinject=*/false);
}
-
-void
-PragmaFPContractHandler::HandlePragma(Preprocessor &PP,
- PragmaIntroducerKind Introducer,
- Token &Tok) {
+void PragmaFPContractHandler::HandlePragma(Preprocessor &PP,
+ PragmaIntroducer Introducer,
+ Token &Tok) {
tok::OnOffSwitch OOS;
if (PP.LexOnOffSwitch(OOS))
return;
@@ -2111,13 +2124,13 @@ PragmaFPContractHandler::HandlePragma(Preprocessor &PP,
Toks[0].setAnnotationEndLoc(Tok.getLocation());
Toks[0].setAnnotationValue(reinterpret_cast<void*>(
static_cast<uintptr_t>(OOS)));
- PP.EnterTokenStream(Toks, /*DisableMacroExpansion=*/true);
+ PP.EnterTokenStream(Toks, /*DisableMacroExpansion=*/true,
+ /*IsReinject=*/false);
}
-void
-PragmaOpenCLExtensionHandler::HandlePragma(Preprocessor &PP,
- PragmaIntroducerKind Introducer,
- Token &Tok) {
+void PragmaOpenCLExtensionHandler::HandlePragma(Preprocessor &PP,
+ PragmaIntroducer Introducer,
+ Token &Tok) {
PP.LexUnexpandedToken(Tok);
if (Tok.isNot(tok::identifier)) {
PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_identifier) <<
@@ -2173,7 +2186,8 @@ PragmaOpenCLExtensionHandler::HandlePragma(Preprocessor &PP,
Toks[0].setLocation(NameLoc);
Toks[0].setAnnotationValue(static_cast<void*>(Info));
Toks[0].setAnnotationEndLoc(StateLoc);
- PP.EnterTokenStream(Toks, /*DisableMacroExpansion=*/true);
+ PP.EnterTokenStream(Toks, /*DisableMacroExpansion=*/true,
+ /*IsReinject=*/false);
if (PP.getPPCallbacks())
PP.getPPCallbacks()->PragmaOpenCLExtension(NameLoc, Ext,
@@ -2182,10 +2196,9 @@ PragmaOpenCLExtensionHandler::HandlePragma(Preprocessor &PP,
/// Handle '#pragma omp ...' when OpenMP is disabled.
///
-void
-PragmaNoOpenMPHandler::HandlePragma(Preprocessor &PP,
- PragmaIntroducerKind Introducer,
- Token &FirstTok) {
+void PragmaNoOpenMPHandler::HandlePragma(Preprocessor &PP,
+ PragmaIntroducer Introducer,
+ Token &FirstTok) {
if (!PP.getDiagnostics().isIgnored(diag::warn_pragma_omp_ignored,
FirstTok.getLocation())) {
PP.Diag(FirstTok, diag::warn_pragma_omp_ignored);
@@ -2197,15 +2210,14 @@ PragmaNoOpenMPHandler::HandlePragma(Preprocessor &PP,
/// Handle '#pragma omp ...' when OpenMP is enabled.
///
-void
-PragmaOpenMPHandler::HandlePragma(Preprocessor &PP,
- PragmaIntroducerKind Introducer,
- Token &FirstTok) {
+void PragmaOpenMPHandler::HandlePragma(Preprocessor &PP,
+ PragmaIntroducer Introducer,
+ Token &FirstTok) {
SmallVector<Token, 16> Pragma;
Token Tok;
Tok.startToken();
Tok.setKind(tok::annot_pragma_openmp);
- Tok.setLocation(FirstTok.getLocation());
+ Tok.setLocation(Introducer.Loc);
while (Tok.isNot(tok::eod) && Tok.isNot(tok::eof)) {
Pragma.push_back(Tok);
@@ -2232,7 +2244,7 @@ PragmaOpenMPHandler::HandlePragma(Preprocessor &PP,
auto Toks = llvm::make_unique<Token[]>(Pragma.size());
std::copy(Pragma.begin(), Pragma.end(), Toks.get());
PP.EnterTokenStream(std::move(Toks), Pragma.size(),
- /*DisableMacroExpansion=*/false);
+ /*DisableMacroExpansion=*/false, /*IsReinject=*/false);
}
/// Handle '#pragma pointers_to_members'
@@ -2244,7 +2256,7 @@ PragmaOpenMPHandler::HandlePragma(Preprocessor &PP,
// #pragma pointers_to_members '(' 'full_generality' [',' inheritance-model] ')'
// #pragma pointers_to_members '(' inheritance-model ')'
void PragmaMSPointersToMembers::HandlePragma(Preprocessor &PP,
- PragmaIntroducerKind Introducer,
+ PragmaIntroducer Introducer,
Token &Tok) {
SourceLocation PointersToMembersLoc = Tok.getLocation();
PP.Lex(Tok);
@@ -2330,7 +2342,7 @@ void PragmaMSPointersToMembers::HandlePragma(Preprocessor &PP,
AnnotTok.setAnnotationEndLoc(EndLoc);
AnnotTok.setAnnotationValue(
reinterpret_cast<void *>(static_cast<uintptr_t>(RepresentationMethod)));
- PP.EnterToken(AnnotTok);
+ PP.EnterToken(AnnotTok, /*IsReinject=*/true);
}
/// Handle '#pragma vtordisp'
@@ -2342,8 +2354,7 @@ void PragmaMSPointersToMembers::HandlePragma(Preprocessor &PP,
// #pragma vtordisp '(' 'pop' ')'
// #pragma vtordisp '(' ')'
void PragmaMSVtorDisp::HandlePragma(Preprocessor &PP,
- PragmaIntroducerKind Introducer,
- Token &Tok) {
+ PragmaIntroducer Introducer, Token &Tok) {
SourceLocation VtorDispLoc = Tok.getLocation();
PP.Lex(Tok);
if (Tok.isNot(tok::l_paren)) {
@@ -2423,14 +2434,13 @@ void PragmaMSVtorDisp::HandlePragma(Preprocessor &PP,
AnnotTok.setAnnotationEndLoc(EndLoc);
AnnotTok.setAnnotationValue(reinterpret_cast<void *>(
static_cast<uintptr_t>((Action << 16) | (Value & 0xFFFF))));
- PP.EnterToken(AnnotTok);
+ PP.EnterToken(AnnotTok, /*IsReinject=*/false);
}
/// Handle all MS pragmas. Simply forwards the tokens after inserting
/// an annotation token.
void PragmaMSPragma::HandlePragma(Preprocessor &PP,
- PragmaIntroducerKind Introducer,
- Token &Tok) {
+ PragmaIntroducer Introducer, Token &Tok) {
Token EoF, AnnotTok;
EoF.startToken();
EoF.setKind(tok::eof);
@@ -2454,7 +2464,7 @@ void PragmaMSPragma::HandlePragma(Preprocessor &PP,
std::pair<std::unique_ptr<Token[]>, size_t>(std::move(TokenArray),
TokenVector.size());
AnnotTok.setAnnotationValue(Value);
- PP.EnterToken(AnnotTok);
+ PP.EnterToken(AnnotTok, /*IsReinject*/ false);
}
/// Handle the Microsoft \#pragma detect_mismatch extension.
@@ -2468,7 +2478,7 @@ void PragmaMSPragma::HandlePragma(Preprocessor &PP,
/// mismatch in the object file's values for the given name, a LNK2038 error
/// is emitted. See MSDN for more details.
void PragmaDetectMismatchHandler::HandlePragma(Preprocessor &PP,
- PragmaIntroducerKind Introducer,
+ PragmaIntroducer Introducer,
Token &Tok) {
SourceLocation DetectMismatchLoc = Tok.getLocation();
PP.Lex(Tok);
@@ -2481,7 +2491,7 @@ void PragmaDetectMismatchHandler::HandlePragma(Preprocessor &PP,
std::string NameString;
if (!PP.LexStringLiteral(Tok, NameString,
"pragma detect_mismatch",
- /*MacroExpansion=*/true))
+ /*AllowMacroExpansion=*/true))
return;
// Read the comma followed by a second string literal.
@@ -2492,7 +2502,7 @@ void PragmaDetectMismatchHandler::HandlePragma(Preprocessor &PP,
}
if (!PP.LexStringLiteral(Tok, ValueString, "pragma detect_mismatch",
- /*MacroExpansion=*/true))
+ /*AllowMacroExpansion=*/true))
return;
if (Tok.isNot(tok::r_paren)) {
@@ -2524,7 +2534,7 @@ void PragmaDetectMismatchHandler::HandlePragma(Preprocessor &PP,
/// "foo" is a string, which is fully macro expanded, and permits string
/// concatenation, embedded escape characters etc. See MSDN for more details.
void PragmaCommentHandler::HandlePragma(Preprocessor &PP,
- PragmaIntroducerKind Introducer,
+ PragmaIntroducer Introducer,
Token &Tok) {
SourceLocation CommentLoc = Tok.getLocation();
PP.Lex(Tok);
@@ -2574,7 +2584,7 @@ void PragmaCommentHandler::HandlePragma(Preprocessor &PP,
std::string ArgumentString;
if (Tok.is(tok::comma) && !PP.LexStringLiteral(Tok, ArgumentString,
"pragma comment",
- /*MacroExpansion=*/true))
+ /*AllowMacroExpansion=*/true))
return;
// FIXME: warn that 'exestr' is deprecated.
@@ -2605,8 +2615,8 @@ void PragmaCommentHandler::HandlePragma(Preprocessor &PP,
// #pragma clang optimize off
// #pragma clang optimize on
void PragmaOptimizeHandler::HandlePragma(Preprocessor &PP,
- PragmaIntroducerKind Introducer,
- Token &FirstToken) {
+ PragmaIntroducer Introducer,
+ Token &FirstToken) {
Token Tok;
PP.Lex(Tok);
if (Tok.is(tok::eod)) {
@@ -2652,8 +2662,7 @@ struct TokFPAnnotValue {
} // end anonymous namespace
void PragmaFPHandler::HandlePragma(Preprocessor &PP,
- PragmaIntroducerKind Introducer,
- Token &Tok) {
+ PragmaIntroducer Introducer, Token &Tok) {
// fp
Token PragmaName = Tok;
SmallVector<Token, 1> TokenList;
@@ -2738,7 +2747,7 @@ void PragmaFPHandler::HandlePragma(Preprocessor &PP,
std::copy(TokenList.begin(), TokenList.end(), TokenArray.get());
PP.EnterTokenStream(std::move(TokenArray), TokenList.size(),
- /*DisableMacroExpansion=*/false);
+ /*DisableMacroExpansion=*/false, /*IsReinject=*/false);
}
void Parser::HandlePragmaFP() {
@@ -2853,7 +2862,7 @@ static bool ParseLoopHintValue(Preprocessor &PP, Token &Tok, Token PragmaName,
/// loop. Specifying unroll_count(_value_) instructs llvm to try to unroll the
/// loop the number of times indicated by the value.
void PragmaLoopHintHandler::HandlePragma(Preprocessor &PP,
- PragmaIntroducerKind Introducer,
+ PragmaIntroducer Introducer,
Token &Tok) {
// Incoming token is "loop" from "#pragma clang loop".
Token PragmaName = Tok;
@@ -2921,7 +2930,7 @@ void PragmaLoopHintHandler::HandlePragma(Preprocessor &PP,
std::copy(TokenList.begin(), TokenList.end(), TokenArray.get());
PP.EnterTokenStream(std::move(TokenArray), TokenList.size(),
- /*DisableMacroExpansion=*/false);
+ /*DisableMacroExpansion=*/false, /*IsReinject=*/false);
}
/// Handle the loop unroll optimization pragmas.
@@ -2946,7 +2955,7 @@ void PragmaLoopHintHandler::HandlePragma(Preprocessor &PP,
/// specified with or without parentheses. Specifying, '#pragma nounroll'
/// disables unrolling of the loop.
void PragmaUnrollHintHandler::HandlePragma(Preprocessor &PP,
- PragmaIntroducerKind Introducer,
+ PragmaIntroducer Introducer,
Token &Tok) {
// Incoming token is "unroll" for "#pragma unroll", or "nounroll" for
// "#pragma nounroll".
@@ -2996,7 +3005,7 @@ void PragmaUnrollHintHandler::HandlePragma(Preprocessor &PP,
TokenArray[0].setAnnotationEndLoc(PragmaName.getLocation());
TokenArray[0].setAnnotationValue(static_cast<void *>(Info));
PP.EnterTokenStream(std::move(TokenArray), 1,
- /*DisableMacroExpansion=*/false);
+ /*DisableMacroExpansion=*/false, /*IsReinject=*/false);
}
/// Handle the Microsoft \#pragma intrinsic extension.
@@ -3012,7 +3021,7 @@ void PragmaUnrollHintHandler::HandlePragma(Preprocessor &PP,
/// Anyway, we emit a warning if the function specified in \#pragma intrinsic
/// isn't an intrinsic in clang and suggest to include intrin.h.
void PragmaMSIntrinsicHandler::HandlePragma(Preprocessor &PP,
- PragmaIntroducerKind Introducer,
+ PragmaIntroducer Introducer,
Token &Tok) {
PP.Lex(Tok);
@@ -3051,7 +3060,7 @@ void PragmaMSIntrinsicHandler::HandlePragma(Preprocessor &PP,
// #pragma optimize("gsty", on|off)
void PragmaMSOptimizeHandler::HandlePragma(Preprocessor &PP,
- PragmaIntroducerKind Introducer,
+ PragmaIntroducer Introducer,
Token &Tok) {
SourceLocation StartLoc = Tok.getLocation();
PP.Lex(Tok);
@@ -3104,7 +3113,7 @@ void PragmaMSOptimizeHandler::HandlePragma(Preprocessor &PP,
}
void PragmaForceCUDAHostDeviceHandler::HandlePragma(
- Preprocessor &PP, PragmaIntroducerKind Introducer, Token &Tok) {
+ Preprocessor &PP, PragmaIntroducer Introducer, Token &Tok) {
Token FirstTok = Tok;
PP.Lex(Tok);
@@ -3155,7 +3164,7 @@ void PragmaForceCUDAHostDeviceHandler::HandlePragma(
/// attribute to the set of attribute-specific declarations in the active range
/// of the pragma.
void PragmaAttributeHandler::HandlePragma(Preprocessor &PP,
- PragmaIntroducerKind Introducer,
+ PragmaIntroducer Introducer,
Token &FirstToken) {
Token Tok;
PP.Lex(Tok);
@@ -3268,5 +3277,5 @@ void PragmaAttributeHandler::HandlePragma(Preprocessor &PP,
TokenArray[0].setAnnotationEndLoc(FirstToken.getLocation());
TokenArray[0].setAnnotationValue(static_cast<void *>(Info));
PP.EnterTokenStream(std::move(TokenArray), 1,
- /*DisableMacroExpansion=*/false);
+ /*DisableMacroExpansion=*/false, /*IsReinject=*/false);
}
diff --git a/lib/Parse/ParseStmt.cpp b/lib/Parse/ParseStmt.cpp
index 2974e6a245b0..bf04253ab7fd 100644
--- a/lib/Parse/ParseStmt.cpp
+++ b/lib/Parse/ParseStmt.cpp
@@ -1,9 +1,8 @@
//===--- ParseStmt.cpp - Statement and Block Parser -----------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -30,17 +29,14 @@ using namespace clang;
/// Parse a standalone statement (for instance, as the body of an 'if',
/// 'while', or 'for').
StmtResult Parser::ParseStatement(SourceLocation *TrailingElseLoc,
- bool AllowOpenMPStandalone) {
+ ParsedStmtContext StmtCtx) {
StmtResult Res;
// We may get back a null statement if we found a #pragma. Keep going until
// we get an actual statement.
do {
StmtVector Stmts;
- Res = ParseStatementOrDeclaration(
- Stmts, AllowOpenMPStandalone ? ACK_StatementsOpenMPAnyExecutable
- : ACK_StatementsOpenMPNonStandalone,
- TrailingElseLoc);
+ Res = ParseStatementOrDeclaration(Stmts, StmtCtx, TrailingElseLoc);
} while (!Res.isInvalid() && !Res.get());
return Res;
@@ -97,7 +93,7 @@ StmtResult Parser::ParseStatement(SourceLocation *TrailingElseLoc,
///
StmtResult
Parser::ParseStatementOrDeclaration(StmtVector &Stmts,
- AllowedConstructsKind Allowed,
+ ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc) {
ParenBraceBracketBalancer BalancerRAIIObj(*this);
@@ -108,7 +104,7 @@ Parser::ParseStatementOrDeclaration(StmtVector &Stmts,
return StmtError();
StmtResult Res = ParseStatementOrDeclarationAfterAttributes(
- Stmts, Allowed, TrailingElseLoc, Attrs);
+ Stmts, StmtCtx, TrailingElseLoc, Attrs);
assert((Attrs.empty() || Res.isInvalid() || Res.isUsable()) &&
"attributes on empty statement");
@@ -120,7 +116,7 @@ Parser::ParseStatementOrDeclaration(StmtVector &Stmts,
}
namespace {
-class StatementFilterCCC : public CorrectionCandidateCallback {
+class StatementFilterCCC final : public CorrectionCandidateCallback {
public:
StatementFilterCCC(Token nextTok) : NextToken(nextTok) {
WantTypeSpecifiers = nextTok.isOneOf(tok::l_paren, tok::less, tok::l_square,
@@ -143,15 +139,18 @@ public:
return CorrectionCandidateCallback::ValidateCandidate(candidate);
}
+ std::unique_ptr<CorrectionCandidateCallback> clone() override {
+ return llvm::make_unique<StatementFilterCCC>(*this);
+ }
+
private:
Token NextToken;
};
}
-StmtResult
-Parser::ParseStatementOrDeclarationAfterAttributes(StmtVector &Stmts,
- AllowedConstructsKind Allowed, SourceLocation *TrailingElseLoc,
- ParsedAttributesWithRange &Attrs) {
+StmtResult Parser::ParseStatementOrDeclarationAfterAttributes(
+ StmtVector &Stmts, ParsedStmtContext StmtCtx,
+ SourceLocation *TrailingElseLoc, ParsedAttributesWithRange &Attrs) {
const char *SemiError = nullptr;
StmtResult Res;
@@ -166,7 +165,7 @@ Retry:
{
ProhibitAttributes(Attrs); // TODO: is it correct?
AtLoc = ConsumeToken(); // consume @
- return ParseObjCAtStatement(AtLoc);
+ return ParseObjCAtStatement(AtLoc, StmtCtx);
}
case tok::code_completion:
@@ -178,7 +177,7 @@ Retry:
Token Next = NextToken();
if (Next.is(tok::colon)) { // C99 6.8.1: labeled-statement
// identifier ':' statement
- return ParseLabeledStatement(Attrs);
+ return ParseLabeledStatement(Attrs, StmtCtx);
}
// Look up the identifier, and typo-correct it to a keyword if it's not
@@ -186,9 +185,8 @@ Retry:
if (Next.isNot(tok::coloncolon)) {
// Try to limit which sets of keywords should be included in typo
// correction based on what the next token is.
- if (TryAnnotateName(/*IsAddressOfOperand*/ false,
- llvm::make_unique<StatementFilterCCC>(Next)) ==
- ANK_Error) {
+ StatementFilterCCC CCC(Next);
+ if (TryAnnotateName(/*IsAddressOfOperand*/ false, &CCC) == ANK_Error) {
// Handle errors here by skipping up to the next semicolon or '}', and
// eat the semicolon if that's what stopped us.
SkipUntil(tok::r_brace, StopAtSemi | StopBeforeMatch);
@@ -208,7 +206,8 @@ Retry:
default: {
if ((getLangOpts().CPlusPlus || getLangOpts().MicrosoftExt ||
- Allowed == ACK_Any) &&
+ (StmtCtx & ParsedStmtContext::AllowDeclarationsInC) !=
+ ParsedStmtContext()) &&
isDeclarationStatement()) {
SourceLocation DeclStart = Tok.getLocation(), DeclEnd;
DeclGroupPtrTy Decl = ParseDeclaration(DeclaratorContext::BlockContext,
@@ -221,13 +220,13 @@ Retry:
return StmtError();
}
- return ParseExprStatement();
+ return ParseExprStatement(StmtCtx);
}
case tok::kw_case: // C99 6.8.1: labeled-statement
- return ParseCaseStatement();
+ return ParseCaseStatement(StmtCtx);
case tok::kw_default: // C99 6.8.1: labeled-statement
- return ParseDefaultStatement();
+ return ParseDefaultStatement(StmtCtx);
case tok::l_brace: // C99 6.8.2: compound-statement
return ParseCompoundStatement();
@@ -364,7 +363,7 @@ Retry:
case tok::annot_pragma_openmp:
ProhibitAttributes(Attrs);
- return ParseOpenMPDeclarativeOrExecutableDirective(Allowed);
+ return ParseOpenMPDeclarativeOrExecutableDirective(StmtCtx);
case tok::annot_pragma_ms_pointers_to_members:
ProhibitAttributes(Attrs);
@@ -383,7 +382,7 @@ Retry:
case tok::annot_pragma_loop_hint:
ProhibitAttributes(Attrs);
- return ParsePragmaLoopHint(Stmts, Allowed, TrailingElseLoc, Attrs);
+ return ParsePragmaLoopHint(Stmts, StmtCtx, TrailingElseLoc, Attrs);
case tok::annot_pragma_dump:
HandlePragmaDump();
@@ -408,7 +407,7 @@ Retry:
}
/// Parse an expression statement.
-StmtResult Parser::ParseExprStatement() {
+StmtResult Parser::ParseExprStatement(ParsedStmtContext StmtCtx) {
// If a case keyword is missing, this is where it should be inserted.
Token OldToken = Tok;
@@ -434,12 +433,12 @@ StmtResult Parser::ParseExprStatement() {
<< FixItHint::CreateInsertion(OldToken.getLocation(), "case ");
// Recover parsing as a case statement.
- return ParseCaseStatement(/*MissingCase=*/true, Expr);
+ return ParseCaseStatement(StmtCtx, /*MissingCase=*/true, Expr);
}
// Otherwise, eat the semicolon.
ExpectAndConsumeSemi(diag::err_expected_semi_after_expr);
- return Actions.ActOnExprStmt(Expr, isExprValueDiscarded());
+ return handleExprStmt(Expr, StmtCtx);
}
/// ParseSEHTryBlockCommon
@@ -578,10 +577,15 @@ StmtResult Parser::ParseSEHLeaveStatement() {
/// identifier ':' statement
/// [GNU] identifier ':' attributes[opt] statement
///
-StmtResult Parser::ParseLabeledStatement(ParsedAttributesWithRange &attrs) {
+StmtResult Parser::ParseLabeledStatement(ParsedAttributesWithRange &attrs,
+ ParsedStmtContext StmtCtx) {
assert(Tok.is(tok::identifier) && Tok.getIdentifierInfo() &&
"Not an identifier!");
+ // The substatement is always a 'statement', not a 'declaration', but is
+ // otherwise in the same context as the labeled-statement.
+ StmtCtx &= ~ParsedStmtContext::AllowDeclarationsInC;
+
Token IdentTok = Tok; // Save the whole token.
ConsumeToken(); // eat the identifier.
@@ -611,9 +615,8 @@ StmtResult Parser::ParseLabeledStatement(ParsedAttributesWithRange &attrs) {
// statement, but that doesn't work correctly (because ProhibitAttributes
// can't handle GNU attributes), so only call it in the one case where
// GNU attributes are allowed.
- SubStmt = ParseStatementOrDeclarationAfterAttributes(
- Stmts, /*Allowed=*/ACK_StatementsOpenMPNonStandalone, nullptr,
- TempAttrs);
+ SubStmt = ParseStatementOrDeclarationAfterAttributes(Stmts, StmtCtx,
+ nullptr, TempAttrs);
if (!TempAttrs.empty() && !SubStmt.isInvalid())
SubStmt = Actions.ProcessStmtAttributes(SubStmt.get(), TempAttrs,
TempAttrs.Range);
@@ -624,7 +627,7 @@ StmtResult Parser::ParseLabeledStatement(ParsedAttributesWithRange &attrs) {
// If we've not parsed a statement yet, parse one now.
if (!SubStmt.isInvalid() && !SubStmt.isUsable())
- SubStmt = ParseStatement();
+ SubStmt = ParseStatement(nullptr, StmtCtx);
// Broken substmt shouldn't prevent the label from being added to the AST.
if (SubStmt.isInvalid())
@@ -644,9 +647,14 @@ StmtResult Parser::ParseLabeledStatement(ParsedAttributesWithRange &attrs) {
/// 'case' constant-expression ':' statement
/// [GNU] 'case' constant-expression '...' constant-expression ':' statement
///
-StmtResult Parser::ParseCaseStatement(bool MissingCase, ExprResult Expr) {
+StmtResult Parser::ParseCaseStatement(ParsedStmtContext StmtCtx,
+ bool MissingCase, ExprResult Expr) {
assert((MissingCase || Tok.is(tok::kw_case)) && "Not a case stmt!");
+ // The substatement is always a 'statement', not a 'declaration', but is
+ // otherwise in the same context as the labeled-statement.
+ StmtCtx &= ~ParsedStmtContext::AllowDeclarationsInC;
+
// It is very very common for code to contain many case statements recursively
// nested, as in (but usually without indentation):
// case 1:
@@ -738,8 +746,7 @@ StmtResult Parser::ParseCaseStatement(bool MissingCase, ExprResult Expr) {
// continue parsing the sub-stmt.
if (Case.isInvalid()) {
if (TopLevelCase.isInvalid()) // No parsed case stmts.
- return ParseStatement(/*TrailingElseLoc=*/nullptr,
- /*AllowOpenMPStandalone=*/true);
+ return ParseStatement(/*TrailingElseLoc=*/nullptr, StmtCtx);
// Otherwise, just don't add it as a nested case.
} else {
// If this is the first case statement we parsed, it becomes TopLevelCase.
@@ -759,8 +766,7 @@ StmtResult Parser::ParseCaseStatement(bool MissingCase, ExprResult Expr) {
StmtResult SubStmt;
if (Tok.isNot(tok::r_brace)) {
- SubStmt = ParseStatement(/*TrailingElseLoc=*/nullptr,
- /*AllowOpenMPStandalone=*/true);
+ SubStmt = ParseStatement(/*TrailingElseLoc=*/nullptr, StmtCtx);
} else {
// Nicely diagnose the common error "switch (X) { case 4: }", which is
// not valid. If ColonLoc doesn't point to a valid text location, there was
@@ -790,8 +796,13 @@ StmtResult Parser::ParseCaseStatement(bool MissingCase, ExprResult Expr) {
/// 'default' ':' statement
/// Note that this does not parse the 'statement' at the end.
///
-StmtResult Parser::ParseDefaultStatement() {
+StmtResult Parser::ParseDefaultStatement(ParsedStmtContext StmtCtx) {
assert(Tok.is(tok::kw_default) && "Not a default stmt!");
+
+ // The substatement is always a 'statement', not a 'declaration', but is
+ // otherwise in the same context as the labeled-statement.
+ StmtCtx &= ~ParsedStmtContext::AllowDeclarationsInC;
+
SourceLocation DefaultLoc = ConsumeToken(); // eat the 'default'.
SourceLocation ColonLoc;
@@ -812,8 +823,7 @@ StmtResult Parser::ParseDefaultStatement() {
StmtResult SubStmt;
if (Tok.isNot(tok::r_brace)) {
- SubStmt = ParseStatement(/*TrailingElseLoc=*/nullptr,
- /*AllowOpenMPStandalone=*/true);
+ SubStmt = ParseStatement(/*TrailingElseLoc=*/nullptr, StmtCtx);
} else {
// Diagnose the common error "switch (X) {... default: }", which is
// not valid.
@@ -944,7 +954,8 @@ bool Parser::ConsumeNullStmt(StmtVector &Stmts) {
EndLoc = Tok.getLocation();
// Don't just ConsumeToken() this tok::semi, do store it in AST.
- StmtResult R = ParseStatementOrDeclaration(Stmts, ACK_Any);
+ StmtResult R =
+ ParseStatementOrDeclaration(Stmts, ParsedStmtContext::SubStmt);
if (R.isUsable())
Stmts.push_back(R.get());
}
@@ -958,14 +969,24 @@ bool Parser::ConsumeNullStmt(StmtVector &Stmts) {
return true;
}
-bool Parser::isExprValueDiscarded() {
- if (Actions.isCurCompoundStmtAStmtExpr()) {
- // Look to see if the next two tokens close the statement expression;
- // if so, this expression statement is the last statement in a
- // statment expression.
- return Tok.isNot(tok::r_brace) || NextToken().isNot(tok::r_paren);
+StmtResult Parser::handleExprStmt(ExprResult E, ParsedStmtContext StmtCtx) {
+ bool IsStmtExprResult = false;
+ if ((StmtCtx & ParsedStmtContext::InStmtExpr) != ParsedStmtContext()) {
+ // For GCC compatibility we skip past NullStmts.
+ unsigned LookAhead = 0;
+ while (GetLookAheadToken(LookAhead).is(tok::semi)) {
+ ++LookAhead;
+ }
+ // Then look to see if the next two tokens close the statement expression;
+ // if so, this expression statement is the last statement in a statment
+ // expression.
+ IsStmtExprResult = GetLookAheadToken(LookAhead).is(tok::r_brace) &&
+ GetLookAheadToken(LookAhead + 1).is(tok::r_paren);
}
- return true;
+
+ if (IsStmtExprResult)
+ E = Actions.ActOnStmtExprResult(E);
+ return Actions.ActOnExprStmt(E, /*DiscardedValue=*/!IsStmtExprResult);
}
/// ParseCompoundStatementBody - Parse a sequence of statements and invoke the
@@ -1023,6 +1044,10 @@ StmtResult Parser::ParseCompoundStatementBody(bool isStmtExpr) {
Stmts.push_back(R.get());
}
+ ParsedStmtContext SubStmtCtx =
+ ParsedStmtContext::Compound |
+ (isStmtExpr ? ParsedStmtContext::InStmtExpr : ParsedStmtContext());
+
while (!tryParseMisplacedModuleImport() && Tok.isNot(tok::r_brace) &&
Tok.isNot(tok::eof)) {
if (Tok.is(tok::annot_pragma_unused)) {
@@ -1035,7 +1060,7 @@ StmtResult Parser::ParseCompoundStatementBody(bool isStmtExpr) {
StmtResult R;
if (Tok.isNot(tok::kw___extension__)) {
- R = ParseStatementOrDeclaration(Stmts, ACK_Any);
+ R = ParseStatementOrDeclaration(Stmts, SubStmtCtx);
} else {
// __extension__ can start declarations and it can also be a unary
// operator for expressions. Consume multiple __extension__ markers here
@@ -1068,11 +1093,12 @@ StmtResult Parser::ParseCompoundStatementBody(bool isStmtExpr) {
continue;
}
- // FIXME: Use attributes?
// Eat the semicolon at the end of stmt and convert the expr into a
// statement.
ExpectAndConsumeSemi(diag::err_expected_semi_after_expr);
- R = Actions.ActOnExprStmt(Res, isExprValueDiscarded());
+ R = handleExprStmt(Res, SubStmtCtx);
+ if (R.isUsable())
+ R = Actions.ProcessStmtAttributes(R.get(), attrs, attrs.Range);
}
}
@@ -1971,9 +1997,12 @@ StmtResult Parser::ParseReturnStatement() {
ExprResult R;
if (Tok.isNot(tok::semi)) {
+ if (!IsCoreturn)
+ PreferredType.enterReturn(Actions, Tok.getLocation());
// FIXME: Code completion for co_return.
if (Tok.is(tok::code_completion) && !IsCoreturn) {
- Actions.CodeCompleteReturn(getCurScope());
+ Actions.CodeCompleteExpression(getCurScope(),
+ PreferredType.get(Tok.getLocation()));
cutOffParsing();
return StmtError();
}
@@ -1999,7 +2028,7 @@ StmtResult Parser::ParseReturnStatement() {
}
StmtResult Parser::ParsePragmaLoopHint(StmtVector &Stmts,
- AllowedConstructsKind Allowed,
+ ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs) {
// Create temporary attribute list.
@@ -2022,7 +2051,7 @@ StmtResult Parser::ParsePragmaLoopHint(StmtVector &Stmts,
MaybeParseCXX11Attributes(Attrs);
StmtResult S = ParseStatementOrDeclarationAfterAttributes(
- Stmts, Allowed, TrailingElseLoc, Attrs);
+ Stmts, StmtCtx, TrailingElseLoc, Attrs);
Attrs.takeAllFrom(TempAttrs);
return S;
@@ -2241,7 +2270,8 @@ StmtResult Parser::ParseCXXCatchBlock(bool FnCatch) {
// The name in a catch exception-declaration is local to the handler and
// shall not be redeclared in the outermost block of the handler.
ParseScope CatchScope(this, Scope::DeclScope | Scope::ControlScope |
- (FnCatch ? Scope::FnTryCatchScope : 0));
+ Scope::CatchScope |
+ (FnCatch ? Scope::FnTryCatchScope : 0));
// exception-declaration is equivalent to '...' or a parameter-declaration
// without default arguments.
@@ -2327,7 +2357,8 @@ void Parser::ParseMicrosoftIfExistsStatement(StmtVector &Stmts) {
// Condition is true, parse the statements.
while (Tok.isNot(tok::r_brace)) {
- StmtResult R = ParseStatementOrDeclaration(Stmts, ACK_Any);
+ StmtResult R =
+ ParseStatementOrDeclaration(Stmts, ParsedStmtContext::Compound);
if (R.isUsable())
Stmts.push_back(R.get());
}
diff --git a/lib/Parse/ParseStmtAsm.cpp b/lib/Parse/ParseStmtAsm.cpp
index 9b96c5150e56..1153c2510b05 100644
--- a/lib/Parse/ParseStmtAsm.cpp
+++ b/lib/Parse/ParseStmtAsm.cpp
@@ -1,9 +1,8 @@
//===---- ParseStmtAsm.cpp - Assembly Statement Parser --------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -145,8 +144,8 @@ void ClangAsmParserCallback::findTokensForString(
// Try to find a token whose offset matches the first token.
unsigned FirstCharOffset = Str.begin() - AsmString.begin();
- const unsigned *FirstTokOffset = std::lower_bound(
- AsmTokOffsets.begin(), AsmTokOffsets.end(), FirstCharOffset);
+ const unsigned *FirstTokOffset =
+ llvm::lower_bound(AsmTokOffsets, FirstCharOffset);
// For now, assert that the start of the string exactly
// corresponds to the start of a token.
@@ -175,8 +174,7 @@ ClangAsmParserCallback::translateLocation(const llvm::SourceMgr &LSM,
unsigned Offset = SMLoc.getPointer() - LBuf->getBufferStart();
// Figure out which token that offset points into.
- const unsigned *TokOffsetPtr =
- std::lower_bound(AsmTokOffsets.begin(), AsmTokOffsets.end(), Offset);
+ const unsigned *TokOffsetPtr = llvm::lower_bound(AsmTokOffsets, Offset);
unsigned TokIndex = TokOffsetPtr - AsmTokOffsets.begin();
unsigned TokOffset = *TokOffsetPtr;
@@ -214,7 +212,8 @@ ExprResult Parser::ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks,
// Also copy the current token over.
LineToks.push_back(Tok);
- PP.EnterTokenStream(LineToks, /*DisableMacroExpansions*/ true);
+ PP.EnterTokenStream(LineToks, /*DisableMacroExpansions*/ true,
+ /*IsReinject*/ true);
// Clear the current token and advance to the first token in LineToks.
ConsumeAnyToken();
@@ -637,7 +636,7 @@ StmtResult Parser::ParseMicrosoftAsmStatement(SourceLocation AsmLoc) {
// Filter out "fpsw" and "mxcsr". They aren't valid GCC asm clobber
// constraints. Clang always adds fpsr to the clobber list anyway.
llvm::erase_if(Clobbers, [](const std::string &C) {
- return C == "fpsw" || C == "mxcsr";
+ return C == "fpsr" || C == "mxcsr";
});
// Build the vector of clobber StringRefs.
@@ -710,12 +709,12 @@ StmtResult Parser::ParseAsmStatement(bool &msAsm) {
// Remember if this was a volatile asm.
bool isVolatile = DS.getTypeQualifiers() & DeclSpec::TQ_volatile;
+ // Remember if this was a goto asm.
+ bool isGotoAsm = false;
- // TODO: support "asm goto" constructs (PR#9295).
if (Tok.is(tok::kw_goto)) {
- Diag(Tok, diag::err_asm_goto_not_supported_yet);
- SkipUntil(tok::r_paren, StopAtSemi);
- return StmtError();
+ isGotoAsm = true;
+ ConsumeToken();
}
if (Tok.isNot(tok::l_paren)) {
@@ -753,7 +752,8 @@ StmtResult Parser::ParseAsmStatement(bool &msAsm) {
return Actions.ActOnGCCAsmStmt(AsmLoc, /*isSimple*/ true, isVolatile,
/*NumOutputs*/ 0, /*NumInputs*/ 0, nullptr,
Constraints, Exprs, AsmString.get(),
- Clobbers, T.getCloseLocation());
+ Clobbers, /*NumLabels*/ 0,
+ T.getCloseLocation());
}
// Parse Outputs, if present.
@@ -763,6 +763,12 @@ StmtResult Parser::ParseAsmStatement(bool &msAsm) {
AteExtraColon = Tok.is(tok::coloncolon);
ConsumeToken();
+ if (!AteExtraColon && isGotoAsm && Tok.isNot(tok::colon)) {
+ Diag(Tok, diag::err_asm_goto_cannot_have_output);
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return StmtError();
+ }
+
if (!AteExtraColon && ParseAsmOperandsOpt(Names, Constraints, Exprs))
return StmtError();
}
@@ -789,12 +795,15 @@ StmtResult Parser::ParseAsmStatement(bool &msAsm) {
unsigned NumInputs = Names.size() - NumOutputs;
// Parse the clobbers, if present.
- if (AteExtraColon || Tok.is(tok::colon)) {
- if (!AteExtraColon)
+ if (AteExtraColon || Tok.is(tok::colon) || Tok.is(tok::coloncolon)) {
+ if (AteExtraColon)
+ AteExtraColon = false;
+ else {
+ AteExtraColon = Tok.is(tok::coloncolon);
ConsumeToken();
-
+ }
// Parse the asm-string list for clobbers if present.
- if (Tok.isNot(tok::r_paren)) {
+ if (!AteExtraColon && isTokenStringLiteral()) {
while (1) {
ExprResult Clobber(ParseAsmStringLiteral());
@@ -808,11 +817,49 @@ StmtResult Parser::ParseAsmStatement(bool &msAsm) {
}
}
}
+ if (!isGotoAsm && (Tok.isNot(tok::r_paren) || AteExtraColon)) {
+ Diag(Tok, diag::err_expected) << tok::r_paren;
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return StmtError();
+ }
+ // Parse the goto label, if present.
+ unsigned NumLabels = 0;
+ if (AteExtraColon || Tok.is(tok::colon)) {
+ if (!AteExtraColon)
+ ConsumeToken();
+
+ while (true) {
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected) << tok::identifier;
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return StmtError();
+ }
+ LabelDecl *LD = Actions.LookupOrCreateLabel(Tok.getIdentifierInfo(),
+ Tok.getLocation());
+ Names.push_back(Tok.getIdentifierInfo());
+ if (!LD) {
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return StmtError();
+ }
+ ExprResult Res =
+ Actions.ActOnAddrLabel(Tok.getLocation(), Tok.getLocation(), LD);
+ Exprs.push_back(Res.get());
+ NumLabels++;
+ ConsumeToken();
+ if (!TryConsumeToken(tok::comma))
+ break;
+ }
+ } else if (isGotoAsm) {
+ Diag(Tok, diag::err_expected) << tok::colon;
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return StmtError();
+ }
T.consumeClose();
return Actions.ActOnGCCAsmStmt(
AsmLoc, false, isVolatile, NumOutputs, NumInputs, Names.data(),
- Constraints, Exprs, AsmString.get(), Clobbers, T.getCloseLocation());
+ Constraints, Exprs, AsmString.get(), Clobbers, NumLabels,
+ T.getCloseLocation());
}
/// ParseAsmOperands - Parse the asm-operands production as used by
diff --git a/lib/Parse/ParseTemplate.cpp b/lib/Parse/ParseTemplate.cpp
index e0a7cc6e856d..9bb5b6eac37e 100644
--- a/lib/Parse/ParseTemplate.cpp
+++ b/lib/Parse/ParseTemplate.cpp
@@ -1,9 +1,8 @@
//===--- ParseTemplate.cpp - Template Parsing -----------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -19,6 +18,7 @@
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ParsedTemplate.h"
#include "clang/Sema/Scope.h"
+#include "llvm/Support/TimeProfiler.h"
using namespace clang;
/// Parse a template declaration, explicit instantiation, or
@@ -49,6 +49,15 @@ Decl *Parser::ParseDeclarationStartingWithTemplate(
/// template-declaration: [C++ temp]
/// 'export'[opt] 'template' '<' template-parameter-list '>' declaration
///
+/// template-declaration: [C++2a]
+/// template-head declaration
+/// template-head concept-definition
+///
+/// TODO: requires-clause
+/// template-head: [C++2a]
+/// 'template' '<' template-parameter-list '>'
+/// requires-clause[opt]
+///
/// explicit-specialization: [ C++ temp.expl.spec]
/// 'template' '<' '>' declaration
Decl *Parser::ParseTemplateDeclarationOrSpecialization(
@@ -142,6 +151,12 @@ Decl *Parser::ParseTemplateDeclarationOrSpecialization(
ParseScopeFlags TemplateScopeFlags(this, NewFlags, isSpecialization);
// Parse the actual template declaration.
+ if (Tok.is(tok::kw_concept))
+ return ParseConceptDefinition(
+ ParsedTemplateInfo(&ParamLists, isSpecialization,
+ LastParamListWasEmpty),
+ DeclEnd);
+
return ParseSingleDeclarationAfterTemplate(
Context,
ParsedTemplateInfo(&ParamLists, isSpecialization, LastParamListWasEmpty),
@@ -232,6 +247,12 @@ Decl *Parser::ParseSingleDeclarationAfterTemplate(
return nullptr;
}
+ llvm::TimeTraceScope TimeScope("ParseTemplate", [&]() {
+ return DeclaratorInfo.getIdentifier() != nullptr
+ ? DeclaratorInfo.getIdentifier()->getName()
+ : "<unknown>";
+ });
+
LateParsedAttrList LateParsedAttrs(true);
if (DeclaratorInfo.isFunctionDeclarator())
MaybeParseGNUAttributes(DeclaratorInfo, &LateParsedAttrs);
@@ -282,7 +303,7 @@ Decl *Parser::ParseSingleDeclarationAfterTemplate(
return ParseFunctionDefinition(
DeclaratorInfo, ParsedTemplateInfo(&FakedParamLists,
/*isSpecialization=*/true,
- /*LastParamListWasEmpty=*/true),
+ /*lastParameterListWasEmpty=*/true),
&LateParsedAttrs);
}
}
@@ -309,6 +330,85 @@ Decl *Parser::ParseSingleDeclarationAfterTemplate(
return ThisDecl;
}
+/// \brief Parse a single declaration that declares a concept.
+///
+/// \param DeclEnd will receive the source location of the last token
+/// within this declaration.
+///
+/// \returns the new declaration.
+Decl *
+Parser::ParseConceptDefinition(const ParsedTemplateInfo &TemplateInfo,
+ SourceLocation &DeclEnd) {
+ assert(TemplateInfo.Kind != ParsedTemplateInfo::NonTemplate &&
+ "Template information required");
+ assert(Tok.is(tok::kw_concept) &&
+ "ParseConceptDefinition must be called when at a 'concept' keyword");
+
+ ConsumeToken(); // Consume 'concept'
+
+ SourceLocation BoolKWLoc;
+ if (TryConsumeToken(tok::kw_bool, BoolKWLoc))
+ Diag(Tok.getLocation(), diag::ext_concept_legacy_bool_keyword) <<
+ FixItHint::CreateRemoval(SourceLocation(BoolKWLoc));
+
+ DiagnoseAndSkipCXX11Attributes();
+
+ CXXScopeSpec SS;
+ if (ParseOptionalCXXScopeSpecifier(SS, ParsedType(),
+ /*EnteringContext=*/false, /*MayBePseudoDestructor=*/nullptr,
+ /*IsTypename=*/false, /*LastII=*/nullptr, /*OnlyNamespace=*/true) ||
+ SS.isInvalid()) {
+ SkipUntil(tok::semi);
+ return nullptr;
+ }
+
+ if (SS.isNotEmpty())
+ Diag(SS.getBeginLoc(),
+ diag::err_concept_definition_not_identifier);
+
+ UnqualifiedId Result;
+ if (ParseUnqualifiedId(SS, /*EnteringContext=*/false,
+ /*AllowDestructorName=*/false,
+ /*AllowConstructorName=*/false,
+ /*AllowDeductionGuide=*/false,
+ /*ObjectType=*/ParsedType(), /*TemplateKWLoc=*/nullptr,
+ Result)) {
+ SkipUntil(tok::semi);
+ return nullptr;
+ }
+
+ if (Result.getKind() != UnqualifiedIdKind::IK_Identifier) {
+ Diag(Result.getBeginLoc(), diag::err_concept_definition_not_identifier);
+ SkipUntil(tok::semi);
+ return nullptr;
+ }
+
+ IdentifierInfo *Id = Result.Identifier;
+ SourceLocation IdLoc = Result.getBeginLoc();
+
+ DiagnoseAndSkipCXX11Attributes();
+
+ if (!TryConsumeToken(tok::equal)) {
+ Diag(Tok.getLocation(), diag::err_expected) << tok::equal;
+ SkipUntil(tok::semi);
+ return nullptr;
+ }
+
+ ExprResult ConstraintExprResult =
+ Actions.CorrectDelayedTyposInExpr(ParseConstraintExpression());
+ if (ConstraintExprResult.isInvalid()) {
+ SkipUntil(tok::semi);
+ return nullptr;
+ }
+
+ DeclEnd = Tok.getLocation();
+ ExpectAndConsumeSemi(diag::err_expected_semi_declaration);
+ Expr *ConstraintExpr = ConstraintExprResult.get();
+ return Actions.ActOnConceptDefinition(getCurScope(),
+ *TemplateInfo.TemplateParams,
+ Id, IdLoc, ConstraintExpr);
+}
+
/// ParseTemplateParameters - Parses a template-parameter-list enclosed in
/// angle brackets. Depth is the depth of this template-parameter-list, which
/// is the number of template headers directly enclosing this template header.
@@ -913,7 +1013,7 @@ bool Parser::ParseGreaterThanInTemplateList(SourceLocation &RAngleLoc,
PrevTokLocation = RAngleLoc;
} else {
PrevTokLocation = TokBeforeGreaterLoc;
- PP.EnterToken(Tok);
+ PP.EnterToken(Tok, /*IsReinject=*/true);
Tok = Greater;
}
@@ -1025,6 +1125,8 @@ bool Parser::AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK,
// If we failed to parse the template ID but skipped ahead to a >, we're not
// going to be able to form a token annotation. Eat the '>' if present.
TryConsumeToken(tok::greater);
+ // FIXME: Annotate the token stream so we don't produce the same errors
+ // again if we're doing this annotation as part of a tentative parse.
return true;
}
@@ -1033,13 +1135,15 @@ bool Parser::AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK,
// Build the annotation token.
if (TNK == TNK_Type_template && AllowTypeAnnotation) {
TypeResult Type = Actions.ActOnTemplateIdType(
- SS, TemplateKWLoc, Template, TemplateName.Identifier,
+ getCurScope(), SS, TemplateKWLoc, Template, TemplateName.Identifier,
TemplateNameLoc, LAngleLoc, TemplateArgsPtr, RAngleLoc);
if (Type.isInvalid()) {
// If we failed to parse the template ID but skipped ahead to a >, we're
// not going to be able to form a token annotation. Eat the '>' if
// present.
TryConsumeToken(tok::greater);
+ // FIXME: Annotate the token stream so we don't produce the same errors
+ // again if we're doing this annotation as part of a tentative parse.
return true;
}
@@ -1102,14 +1206,16 @@ void Parser::AnnotateTemplateIdTokenAsType(bool IsClassName) {
TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Tok);
assert((TemplateId->Kind == TNK_Type_template ||
- TemplateId->Kind == TNK_Dependent_template_name) &&
+ TemplateId->Kind == TNK_Dependent_template_name ||
+ TemplateId->Kind == TNK_Undeclared_template) &&
"Only works for type and dependent templates");
ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(),
TemplateId->NumArgs);
TypeResult Type
- = Actions.ActOnTemplateIdType(TemplateId->SS,
+ = Actions.ActOnTemplateIdType(getCurScope(),
+ TemplateId->SS,
TemplateId->TemplateKWLoc,
TemplateId->Template,
TemplateId->Name,
@@ -1266,36 +1372,6 @@ ParsedTemplateArgument Parser::ParseTemplateArgument() {
ExprArg.get(), Loc);
}
-/// Determine whether the current tokens can only be parsed as a
-/// template argument list (starting with the '<') and never as a '<'
-/// expression.
-bool Parser::IsTemplateArgumentList(unsigned Skip) {
- struct AlwaysRevertAction : TentativeParsingAction {
- AlwaysRevertAction(Parser &P) : TentativeParsingAction(P) { }
- ~AlwaysRevertAction() { Revert(); }
- } Tentative(*this);
-
- while (Skip) {
- ConsumeAnyToken();
- --Skip;
- }
-
- // '<'
- if (!TryConsumeToken(tok::less))
- return false;
-
- // An empty template argument list.
- if (Tok.is(tok::greater))
- return true;
-
- // See whether we have declaration specifiers, which indicate a type.
- while (isCXXDeclarationSpecifier() == TPResult::True)
- ConsumeAnyToken();
-
- // If we have a '>' or a ',' then this is a template argument list.
- return Tok.isOneOf(tok::greater, tok::comma);
-}
-
/// ParseTemplateArgumentList - Parse a C++ template-argument-list
/// (C++ [temp.names]). Returns true if there was an error.
///
@@ -1420,7 +1496,7 @@ void Parser::ParseLateTemplatedFuncDef(LateParsedTemplate &LPT) {
// Append the current token at the end of the new token stream so that it
// doesn't get lost.
LPT.Toks.push_back(Tok);
- PP.EnterTokenStream(LPT.Toks, true);
+ PP.EnterTokenStream(LPT.Toks, true, /*IsReinject*/true);
// Consume the previously pushed token.
ConsumeAnyToken(/*ConsumeCodeCompletionTok=*/true);
diff --git a/lib/Parse/ParseTentative.cpp b/lib/Parse/ParseTentative.cpp
index de39e0675fdb..a413f9a94148 100644
--- a/lib/Parse/ParseTentative.cpp
+++ b/lib/Parse/ParseTentative.cpp
@@ -1,9 +1,8 @@
//===--- ParseTentative.cpp - Ambiguity Resolution Parsing ----------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -591,9 +590,11 @@ bool Parser::isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous) {
} else if (Context == TypeIdAsTemplateArgument &&
(Tok.isOneOf(tok::greater, tok::comma) ||
(getLangOpts().CPlusPlus11 &&
- (Tok.is(tok::greatergreater) ||
+ (Tok.isOneOf(tok::greatergreater,
+ tok::greatergreatergreater) ||
(Tok.is(tok::ellipsis) &&
NextToken().isOneOf(tok::greater, tok::greatergreater,
+ tok::greatergreatergreater,
tok::comma)))))) {
TPR = TPResult::True;
isAmbiguous = true;
@@ -652,12 +653,15 @@ Parser::isCXX11AttributeSpecifier(bool Disambiguate,
if (!Disambiguate && !getLangOpts().ObjC)
return CAK_AttributeSpecifier;
+ // '[[using ns: ...]]' is an attribute.
+ if (GetLookAheadToken(2).is(tok::kw_using))
+ return CAK_AttributeSpecifier;
+
RevertingTentativeParsingAction PA(*this);
// Opening brackets were checked for above.
ConsumeBracket();
- // Outside Obj-C++11, treat anything with a matching ']]' as an attribute.
if (!getLangOpts().ObjC) {
ConsumeBracket();
@@ -676,24 +680,45 @@ Parser::isCXX11AttributeSpecifier(bool Disambiguate,
// 4) [[obj]{ return self; }() doStuff]; Lambda in message send.
// (1) is an attribute, (2) is ill-formed, and (3) and (4) are accepted.
- // If we have a lambda-introducer, then this is definitely not a message send.
+ // Check to see if this is a lambda-expression.
// FIXME: If this disambiguation is too slow, fold the tentative lambda parse
// into the tentative attribute parse below.
- LambdaIntroducer Intro;
- if (!TryParseLambdaIntroducer(Intro)) {
- // A lambda cannot end with ']]', and an attribute must.
- bool IsAttribute = Tok.is(tok::r_square);
-
- if (IsAttribute)
- // Case 1: C++11 attribute.
- return CAK_AttributeSpecifier;
+ {
+ RevertingTentativeParsingAction LambdaTPA(*this);
+ LambdaIntroducer Intro;
+ LambdaIntroducerTentativeParse Tentative;
+ if (ParseLambdaIntroducer(Intro, &Tentative)) {
+ // We hit a hard error after deciding this was not an attribute.
+ // FIXME: Don't parse and annotate expressions when disambiguating
+ // against an attribute.
+ return CAK_NotAttributeSpecifier;
+ }
- if (OuterMightBeMessageSend)
- // Case 4: Lambda in message send.
+ switch (Tentative) {
+ case LambdaIntroducerTentativeParse::MessageSend:
+ // Case 3: The inner construct is definitely a message send, so the
+ // outer construct is definitely not an attribute.
return CAK_NotAttributeSpecifier;
- // Case 2: Lambda in array size / index.
- return CAK_InvalidAttributeSpecifier;
+ case LambdaIntroducerTentativeParse::Success:
+ case LambdaIntroducerTentativeParse::Incomplete:
+ // This is a lambda-introducer or attribute-specifier.
+ if (Tok.is(tok::r_square))
+ // Case 1: C++11 attribute.
+ return CAK_AttributeSpecifier;
+
+ if (OuterMightBeMessageSend)
+ // Case 4: Lambda in message send.
+ return CAK_NotAttributeSpecifier;
+
+ // Case 2: Lambda in array size / index.
+ return CAK_InvalidAttributeSpecifier;
+
+ case LambdaIntroducerTentativeParse::Invalid:
+ // No idea what this is; we couldn't parse it as a lambda-introducer.
+ // Might still be an attribute-specifier or a message send.
+ break;
+ }
}
ConsumeBracket();
@@ -1148,7 +1173,7 @@ bool Parser::isTentativelyDeclared(IdentifierInfo *II) {
}
namespace {
-class TentativeParseCCC : public CorrectionCandidateCallback {
+class TentativeParseCCC final : public CorrectionCandidateCallback {
public:
TentativeParseCCC(const Token &Next) {
WantRemainingKeywords = false;
@@ -1166,6 +1191,10 @@ public:
return CorrectionCandidateCallback::ValidateCandidate(Candidate);
}
+
+ std::unique_ptr<CorrectionCandidateCallback> clone() override {
+ return llvm::make_unique<TentativeParseCCC>(*this);
+ }
};
}
/// isCXXDeclarationSpecifier - Returns TPResult::True if it is a declaration
@@ -1173,12 +1202,17 @@ public:
/// be either a decl-specifier or a function-style cast, and TPResult::Error
/// if a parsing error was found and reported.
///
-/// If HasMissingTypename is provided, a name with a dependent scope specifier
-/// will be treated as ambiguous if the 'typename' keyword is missing. If this
-/// happens, *HasMissingTypename will be set to 'true'. This will also be used
-/// as an indicator that undeclared identifiers (which will trigger a later
-/// parse error) should be treated as types. Returns TPResult::Ambiguous in
-/// such cases.
+/// If InvalidAsDeclSpec is not null, some cases that would be ill-formed as
+/// declaration specifiers but possibly valid as some other kind of construct
+/// return TPResult::Ambiguous instead of TPResult::False. When this happens,
+/// the intent is to keep trying to disambiguate, on the basis that we might
+/// find a better reason to treat this construct as a declaration later on.
+/// When this happens and the name could possibly be valid in some other
+/// syntactic context, *InvalidAsDeclSpec is set to 'true'. The current cases
+/// that trigger this are:
+///
+/// * When parsing X::Y (with no 'typename') where X is dependent
+/// * When parsing X<Y> where X is undeclared
///
/// decl-specifier:
/// storage-class-specifier
@@ -1187,6 +1221,7 @@ public:
/// 'friend'
/// 'typedef'
/// [C++11] 'constexpr'
+/// [C++20] 'consteval'
/// [GNU] attributes declaration-specifiers[opt]
///
/// storage-class-specifier:
@@ -1276,7 +1311,7 @@ public:
///
Parser::TPResult
Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
- bool *HasMissingTypename) {
+ bool *InvalidAsDeclSpec) {
switch (Tok.getKind()) {
case tok::identifier: {
// Check for need to substitute AltiVec __vector keyword
@@ -1294,8 +1329,8 @@ Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
// a parse error one way or another. In that case, tell the caller that
// this is ambiguous. Typo-correct to type and expression keywords and
// to types and identifiers, in order to try to recover from errors.
- switch (TryAnnotateName(false /* no nested name specifier */,
- llvm::make_unique<TentativeParseCCC>(Next))) {
+ TentativeParseCCC CCC(Next);
+ switch (TryAnnotateName(false /* no nested name specifier */, &CCC)) {
case ANK_Error:
return TPResult::Error;
case ANK_TentativeDecl:
@@ -1316,7 +1351,7 @@ Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
// argument is an error, and was probably intended to be a type.
return GreaterThanIsOperator ? TPResult::True : TPResult::False;
case ANK_Unresolved:
- return HasMissingTypename ? TPResult::Ambiguous : TPResult::False;
+ return InvalidAsDeclSpec ? TPResult::Ambiguous : TPResult::False;
case ANK_Success:
break;
}
@@ -1337,7 +1372,7 @@ Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
}
// We annotated this token as something. Recurse to handle whatever we got.
- return isCXXDeclarationSpecifier(BracedCastResult, HasMissingTypename);
+ return isCXXDeclarationSpecifier(BracedCastResult, InvalidAsDeclSpec);
}
case tok::kw_typename: // typename T::type
@@ -1345,7 +1380,7 @@ Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
// recurse to handle whatever we get.
if (TryAnnotateTypeOrScopeToken())
return TPResult::Error;
- return isCXXDeclarationSpecifier(BracedCastResult, HasMissingTypename);
+ return isCXXDeclarationSpecifier(BracedCastResult, InvalidAsDeclSpec);
case tok::coloncolon: { // ::foo::bar
const Token &Next = NextToken();
@@ -1360,7 +1395,7 @@ Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
// recurse to handle whatever we get.
if (TryAnnotateTypeOrScopeToken())
return TPResult::Error;
- return isCXXDeclarationSpecifier(BracedCastResult, HasMissingTypename);
+ return isCXXDeclarationSpecifier(BracedCastResult, InvalidAsDeclSpec);
// decl-specifier:
// storage-class-specifier
@@ -1372,6 +1407,7 @@ Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
case tok::kw_friend:
case tok::kw_typedef:
case tok::kw_constexpr:
+ case tok::kw_consteval:
// storage-class-specifier
case tok::kw_register:
case tok::kw_static:
@@ -1411,11 +1447,24 @@ Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
// cv-qualifier
case tok::kw_const:
case tok::kw_volatile:
+ return TPResult::True;
+
+ // OpenCL address space qualifiers
+ case tok::kw_private:
+ if (!getLangOpts().OpenCL)
+ return TPResult::False;
+ LLVM_FALLTHROUGH;
case tok::kw___private:
case tok::kw___local:
case tok::kw___global:
case tok::kw___constant:
case tok::kw___generic:
+ // OpenCL access qualifiers
+ case tok::kw___read_only:
+ case tok::kw___write_only:
+ case tok::kw___read_write:
+ // OpenCL pipe
+ case tok::kw_pipe:
// GNU
case tok::kw_restrict:
@@ -1455,6 +1504,16 @@ Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
case tok::annot_template_id: {
TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Tok);
+ // If lookup for the template-name found nothing, don't assume we have a
+ // definitive disambiguation result yet.
+ if (TemplateId->Kind == TNK_Undeclared_template && InvalidAsDeclSpec) {
+ // 'template-id(' can be a valid expression but not a valid decl spec if
+ // the template-name is not declared, but we don't consider this to be a
+ // definitive disambiguation. In any other context, it's an error either
+ // way.
+ *InvalidAsDeclSpec = NextToken().is(tok::l_paren);
+ return TPResult::Ambiguous;
+ }
if (TemplateId->Kind != TNK_Type_template)
return TPResult::False;
CXXScopeSpec SS;
@@ -1483,17 +1542,28 @@ Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
TPResult TPR = TPResult::False;
if (!isIdentifier)
TPR = isCXXDeclarationSpecifier(BracedCastResult,
- HasMissingTypename);
+ InvalidAsDeclSpec);
if (isIdentifier ||
TPR == TPResult::True || TPR == TPResult::Error)
return TPResult::Error;
- if (HasMissingTypename) {
+ if (InvalidAsDeclSpec) {
// We can't tell whether this is a missing 'typename' or a valid
// expression.
- *HasMissingTypename = true;
+ *InvalidAsDeclSpec = true;
return TPResult::Ambiguous;
+ } else {
+ // In MS mode, if InvalidAsDeclSpec is not provided, and the tokens
+ // are or the form *) or &) *> or &> &&>, this can't be an expression.
+ // The typename must be missing.
+ if (getLangOpts().MSVCCompat) {
+ if (((Tok.is(tok::amp) || Tok.is(tok::star)) &&
+ (NextToken().is(tok::r_paren) ||
+ NextToken().is(tok::greater))) ||
+ (Tok.is(tok::ampamp) && NextToken().is(tok::greater)))
+ return TPResult::True;
+ }
}
} else {
// Try to resolve the name. If it doesn't exist, assume it was
@@ -1520,8 +1590,7 @@ Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
? TPResult::True
: TPResult::False;
case ANK_Unresolved:
- return HasMissingTypename ? TPResult::Ambiguous
- : TPResult::False;
+ return InvalidAsDeclSpec ? TPResult::Ambiguous : TPResult::False;
case ANK_Success:
break;
}
@@ -1529,8 +1598,7 @@ Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
// Annotated it, check again.
assert(Tok.isNot(tok::annot_cxxscope) ||
NextToken().isNot(tok::identifier));
- return isCXXDeclarationSpecifier(BracedCastResult,
- HasMissingTypename);
+ return isCXXDeclarationSpecifier(BracedCastResult, InvalidAsDeclSpec);
}
}
return TPResult::False;
@@ -1601,6 +1669,8 @@ Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
case tok::kw___float128:
case tok::kw_void:
case tok::annot_decltype:
+#define GENERIC_IMAGE_TYPE(ImgType, Id) case tok::kw_##ImgType##_t:
+#include "clang/Basic/OpenCLImageTypes.def"
if (NextToken().is(tok::l_paren))
return TPResult::Ambiguous;
@@ -1694,6 +1764,8 @@ bool Parser::isCXXDeclarationSpecifierAType() {
case tok::kw_void:
case tok::kw___unknown_anytype:
case tok::kw___auto_type:
+#define GENERIC_IMAGE_TYPE(ImgType, Id) case tok::kw_##ImgType##_t:
+#include "clang/Basic/OpenCLImageTypes.def"
return true;
case tok::kw_auto:
@@ -1855,31 +1927,31 @@ Parser::TryParseParameterDeclarationClause(bool *InvalidAsDeclaration,
// decl-specifier-seq '{' is not a parameter in C++11.
TPResult TPR = isCXXDeclarationSpecifier(TPResult::False,
InvalidAsDeclaration);
+ // A declaration-specifier (not followed by '(' or '{') means this can't be
+ // an expression, but it could still be a template argument.
+ if (TPR != TPResult::Ambiguous &&
+ !(VersusTemplateArgument && TPR == TPResult::True))
+ return TPR;
- if (VersusTemplateArgument && TPR == TPResult::True) {
- // Consume the decl-specifier-seq. We have to look past it, since a
- // type-id might appear here in a template argument.
- bool SeenType = false;
- do {
- SeenType |= isCXXDeclarationSpecifierAType();
- if (TryConsumeDeclarationSpecifier() == TPResult::Error)
- return TPResult::Error;
-
- // If we see a parameter name, this can't be a template argument.
- if (SeenType && Tok.is(tok::identifier))
- return TPResult::True;
-
- TPR = isCXXDeclarationSpecifier(TPResult::False,
- InvalidAsDeclaration);
- if (TPR == TPResult::Error)
- return TPR;
- } while (TPR != TPResult::False);
- } else if (TPR == TPResult::Ambiguous) {
- // Disambiguate what follows the decl-specifier.
+ bool SeenType = false;
+ do {
+ SeenType |= isCXXDeclarationSpecifierAType();
if (TryConsumeDeclarationSpecifier() == TPResult::Error)
return TPResult::Error;
- } else
- return TPR;
+
+ // If we see a parameter name, this can't be a template argument.
+ if (SeenType && Tok.is(tok::identifier))
+ return TPResult::True;
+
+ TPR = isCXXDeclarationSpecifier(TPResult::False,
+ InvalidAsDeclaration);
+ if (TPR == TPResult::Error)
+ return TPR;
+
+ // Two declaration-specifiers means this can't be an expression.
+ if (TPR == TPResult::True && !VersusTemplateArgument)
+ return TPR;
+ } while (TPR != TPResult::False);
// declarator
// abstract-declarator[opt]
@@ -1998,3 +2070,54 @@ Parser::TPResult Parser::TryParseBracketDeclarator() {
return TPResult::Ambiguous;
}
+
+/// Determine whether we might be looking at the '<' template-argument-list '>'
+/// of a template-id or simple-template-id, rather than a less-than comparison.
+/// This will often fail and produce an ambiguity, but should never be wrong
+/// if it returns True or False.
+Parser::TPResult Parser::isTemplateArgumentList(unsigned TokensToSkip) {
+ if (!TokensToSkip) {
+ if (Tok.isNot(tok::less))
+ return TPResult::False;
+ if (NextToken().is(tok::greater))
+ return TPResult::True;
+ }
+
+ RevertingTentativeParsingAction PA(*this);
+
+ while (TokensToSkip) {
+ ConsumeAnyToken();
+ --TokensToSkip;
+ }
+
+ if (!TryConsumeToken(tok::less))
+ return TPResult::False;
+
+ // We can't do much to tell an expression apart from a template-argument,
+ // but one good distinguishing factor is that a "decl-specifier" not
+ // followed by '(' or '{' can't appear in an expression.
+ bool InvalidAsTemplateArgumentList = false;
+ if (isCXXDeclarationSpecifier(TPResult::False,
+ &InvalidAsTemplateArgumentList) ==
+ TPResult::True)
+ return TPResult::True;
+ if (InvalidAsTemplateArgumentList)
+ return TPResult::False;
+
+ // FIXME: In many contexts, X<thing1, Type> can only be a
+ // template-argument-list. But that's not true in general:
+ //
+ // using b = int;
+ // void f() {
+ // int a = A<B, b, c = C>D; // OK, declares b, not a template-id.
+ //
+ // X<Y<0, int> // ', int>' might be end of X's template argument list
+ //
+ // We might be able to disambiguate a few more cases if we're careful.
+
+ // A template-argument-list must be terminated by a '>'.
+ if (SkipUntil({tok::greater, tok::greatergreater, tok::greatergreatergreater},
+ StopAtSemi | StopBeforeMatch))
+ return TPResult::Ambiguous;
+ return TPResult::False;
+}
diff --git a/lib/Parse/Parser.cpp b/lib/Parse/Parser.cpp
index a93db799f8fe..9124f1558664 100644
--- a/lib/Parse/Parser.cpp
+++ b/lib/Parse/Parser.cpp
@@ -1,9 +1,8 @@
//===--- Parser.cpp - C Language Family Parser ----------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -316,6 +315,14 @@ bool Parser::SkipUntil(ArrayRef<tok::TokenKind> Toks, SkipUntilFlags Flags) {
else
SkipUntil(tok::r_brace);
break;
+ case tok::question:
+ // Recursively skip ? ... : pairs; these function as brackets. But
+ // still stop at a semicolon if requested.
+ ConsumeToken();
+ SkipUntil(tok::colon,
+ SkipUntilFlags(unsigned(Flags) &
+ unsigned(StopAtCodeCompletion | StopAtSemi)));
+ break;
// Okay, we found a ']' or '}' or ')', which we think should be balanced.
// Since the user wasn't looking for this token (if they were, it would
@@ -461,6 +468,8 @@ void Parser::Initialize() {
Ident_sealed = nullptr;
Ident_override = nullptr;
Ident_GNU_final = nullptr;
+ Ident_import = nullptr;
+ Ident_module = nullptr;
Ident_super = &PP.getIdentifierTable().get("super");
@@ -513,6 +522,11 @@ void Parser::Initialize() {
PP.SetPoisonReason(Ident_AbnormalTermination,diag::err_seh___finally_block);
}
+ if (getLangOpts().CPlusPlusModules) {
+ Ident_import = PP.getIdentifierInfo("import");
+ Ident_module = PP.getIdentifierInfo("module");
+ }
+
Actions.Initialize();
// Prime the lexer look-ahead.
@@ -526,6 +540,16 @@ void Parser::LateTemplateParserCleanupCallback(void *P) {
DestroyTemplateIdAnnotationsRAIIObj CleanupRAII(((Parser *)P)->TemplateIds);
}
+/// Parse the first top-level declaration in a translation unit.
+///
+/// translation-unit:
+/// [C] external-declaration
+/// [C] translation-unit external-declaration
+/// [C++] top-level-declaration-seq[opt]
+/// [C++20] global-module-fragment[opt] module-declaration
+/// top-level-declaration-seq[opt] private-module-fragment[opt]
+///
+/// Note that in C, it is an error if there is no first declaration.
bool Parser::ParseFirstTopLevelDecl(DeclGroupPtrTy &Result) {
Actions.ActOnStartOfTranslationUnit();
@@ -533,7 +557,7 @@ bool Parser::ParseFirstTopLevelDecl(DeclGroupPtrTy &Result) {
// declaration. C++ doesn't have this restriction. We also don't want to
// complain if we have a precompiled header, although technically if the PCH
// is empty we should still emit the (pedantic) diagnostic.
- bool NoTopLevelDecls = ParseTopLevelDecl(Result);
+ bool NoTopLevelDecls = ParseTopLevelDecl(Result, true);
if (NoTopLevelDecls && !Actions.getASTContext().getExternalSource() &&
!getLangOpts().CPlusPlus)
Diag(diag::ext_empty_translation_unit);
@@ -543,7 +567,11 @@ bool Parser::ParseFirstTopLevelDecl(DeclGroupPtrTy &Result) {
/// ParseTopLevelDecl - Parse one top-level declaration, return whatever the
/// action tells us to. This returns true if the EOF was encountered.
-bool Parser::ParseTopLevelDecl(DeclGroupPtrTy &Result) {
+///
+/// top-level-declaration:
+/// declaration
+/// [C++20] module-import-declaration
+bool Parser::ParseTopLevelDecl(DeclGroupPtrTy &Result, bool IsFirstDecl) {
DestroyTemplateIdAnnotationsRAIIObj CleanupRAII(TemplateIds);
// Skip over the EOF token, flagging end of previous input for incremental
@@ -558,13 +586,46 @@ bool Parser::ParseTopLevelDecl(DeclGroupPtrTy &Result) {
return false;
case tok::kw_export:
- if (NextToken().isNot(tok::kw_module))
+ switch (NextToken().getKind()) {
+ case tok::kw_module:
+ goto module_decl;
+
+ // Note: no need to handle kw_import here. We only form kw_import under
+ // the Modules TS, and in that case 'export import' is parsed as an
+ // export-declaration containing an import-declaration.
+
+ // Recognize context-sensitive C++20 'export module' and 'export import'
+ // declarations.
+ case tok::identifier: {
+ IdentifierInfo *II = NextToken().getIdentifierInfo();
+ if ((II == Ident_module || II == Ident_import) &&
+ GetLookAheadToken(2).isNot(tok::coloncolon)) {
+ if (II == Ident_module)
+ goto module_decl;
+ else
+ goto import_decl;
+ }
break;
- LLVM_FALLTHROUGH;
+ }
+
+ default:
+ break;
+ }
+ break;
+
case tok::kw_module:
- Result = ParseModuleDecl();
+ module_decl:
+ Result = ParseModuleDecl(IsFirstDecl);
return false;
+ // tok::kw_import is handled by ParseExternalDeclaration. (Under the Modules
+ // TS, an import can occur within an export block.)
+ import_decl: {
+ Decl *ImportDecl = ParseModuleImport(SourceLocation());
+ Result = Actions.ConvertDeclToDeclGroup(ImportDecl);
+ return false;
+ }
+
case tok::annot_module_include:
Actions.ActOnModuleInclude(Tok.getLocation(),
reinterpret_cast<Module *>(
@@ -584,10 +645,6 @@ bool Parser::ParseTopLevelDecl(DeclGroupPtrTy &Result) {
ConsumeAnnotationToken();
return false;
- case tok::annot_pragma_attribute:
- HandlePragmaAttribute();
- return false;
-
case tok::eof:
// Late template parsing can begin.
if (getLangOpts().DelayedTemplateParsing)
@@ -600,6 +657,21 @@ bool Parser::ParseTopLevelDecl(DeclGroupPtrTy &Result) {
//else don't tell Sema that we ended parsing: more input might come.
return true;
+ case tok::identifier:
+ // C++2a [basic.link]p3:
+ // A token sequence beginning with 'export[opt] module' or
+ // 'export[opt] import' and not immediately followed by '::'
+ // is never interpreted as the declaration of a top-level-declaration.
+ if ((Tok.getIdentifierInfo() == Ident_module ||
+ Tok.getIdentifierInfo() == Ident_import) &&
+ NextToken().isNot(tok::coloncolon)) {
+ if (Tok.getIdentifierInfo() == Ident_module)
+ goto module_decl;
+ else
+ goto import_decl;
+ }
+ break;
+
default:
break;
}
@@ -699,6 +771,9 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
case tok::annot_pragma_dump:
HandlePragmaDump();
return nullptr;
+ case tok::annot_pragma_attribute:
+ HandlePragmaAttribute();
+ return nullptr;
case tok::semi:
// Either a C++11 empty-declaration or attribute-declaration.
SingleDecl =
@@ -770,7 +845,7 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
SingleDecl = ParseModuleImport(SourceLocation());
break;
case tok::kw_export:
- if (getLangOpts().ModulesTS) {
+ if (getLangOpts().CPlusPlusModules || getLangOpts().ModulesTS) {
SingleDecl = ParseExportDeclaration();
break;
}
@@ -914,7 +989,8 @@ bool Parser::isStartOfFunctionDefinition(const ParsingDeclarator &Declarator) {
/// declaration: [C99 6.7]
/// declaration-specifiers init-declarator-list[opt] ';'
/// [!C99] init-declarator-list ';' [TODO: warn in c99 mode]
-/// [OMP] threadprivate-directive [TODO]
+/// [OMP] threadprivate-directive
+/// [OMP] allocate-directive [TODO]
///
Parser::DeclGroupPtrTy
Parser::ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs,
@@ -981,9 +1057,10 @@ Parser::ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs,
if (getLangOpts().ObjC && Tok.is(tok::at)) {
SourceLocation AtLoc = ConsumeToken(); // the "@"
if (!Tok.isObjCAtKeyword(tok::objc_interface) &&
- !Tok.isObjCAtKeyword(tok::objc_protocol)) {
+ !Tok.isObjCAtKeyword(tok::objc_protocol) &&
+ !Tok.isObjCAtKeyword(tok::objc_implementation)) {
Diag(Tok, diag::err_objc_unexpected_attr);
- SkipUntil(tok::semi); // FIXME: better skip?
+ SkipUntil(tok::semi);
return nullptr;
}
@@ -998,6 +1075,9 @@ Parser::ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs,
if (Tok.isObjCAtKeyword(tok::objc_protocol))
return ParseObjCAtProtocolDeclaration(AtLoc, DS.getAttributes());
+ if (Tok.isObjCAtKeyword(tok::objc_implementation))
+ return ParseObjCAtImplementationDeclaration(AtLoc, DS.getAttributes());
+
return Actions.ConvertDeclToDeclGroup(
ParseObjCAtInterfaceDeclaration(AtLoc, DS.getAttributes()));
}
@@ -1465,7 +1545,7 @@ void Parser::AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation) {
if (PP.isBacktrackEnabled())
PP.RevertCachedTokens(1);
else
- PP.EnterToken(Tok);
+ PP.EnterToken(Tok, /*IsReinject=*/true);
Tok.setKind(tok::annot_cxxscope);
Tok.setAnnotationValue(Actions.SaveNestedNameSpecifierAnnotation(SS));
Tok.setAnnotationRange(SS.getRange());
@@ -1488,7 +1568,7 @@ void Parser::AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation) {
/// no typo correction will be performed.
Parser::AnnotatedNameKind
Parser::TryAnnotateName(bool IsAddressOfOperand,
- std::unique_ptr<CorrectionCandidateCallback> CCC) {
+ CorrectionCandidateCallback *CCC) {
assert(Tok.is(tok::identifier) || Tok.is(tok::annot_cxxscope));
const bool EnteringContext = false;
@@ -1524,9 +1604,23 @@ Parser::TryAnnotateName(bool IsAddressOfOperand,
// after a scope specifier, because in general we can't recover from typos
// there (eg, after correcting 'A::template B<X>::C' [sic], we would need to
// jump back into scope specifier parsing).
- Sema::NameClassification Classification = Actions.ClassifyName(
- getCurScope(), SS, Name, NameLoc, Next, IsAddressOfOperand,
- SS.isEmpty() ? std::move(CCC) : nullptr);
+ Sema::NameClassification Classification =
+ Actions.ClassifyName(getCurScope(), SS, Name, NameLoc, Next,
+ IsAddressOfOperand, SS.isEmpty() ? CCC : nullptr);
+
+ // If name lookup found nothing and we guessed that this was a template name,
+ // double-check before committing to that interpretation. C++20 requires that
+ // we interpret this as a template-id if it can be, but if it can't be, then
+ // this is an error recovery case.
+ if (Classification.getKind() == Sema::NC_UndeclaredTemplate &&
+ isTemplateArgumentList(1) == TPResult::False) {
+ // It's not a template-id; re-classify without the '<' as a hint.
+ Token FakeNext = Next;
+ FakeNext.setKind(tok::unknown);
+ Classification =
+ Actions.ClassifyName(getCurScope(), SS, Name, NameLoc, FakeNext,
+ IsAddressOfOperand, SS.isEmpty() ? CCC : nullptr);
+ }
switch (Classification.getKind()) {
case Sema::NC_Error:
@@ -1596,7 +1690,8 @@ Parser::TryAnnotateName(bool IsAddressOfOperand,
}
LLVM_FALLTHROUGH;
case Sema::NC_VarTemplate:
- case Sema::NC_FunctionTemplate: {
+ case Sema::NC_FunctionTemplate:
+ case Sema::NC_UndeclaredTemplate: {
// We have a type, variable or function template followed by '<'.
ConsumeToken();
UnqualifiedId Id;
@@ -1669,7 +1764,7 @@ bool Parser::TryAnnotateTypeOrScopeToken() {
Token TypedefToken;
PP.Lex(TypedefToken);
bool Result = TryAnnotateTypeOrScopeToken();
- PP.EnterToken(Tok);
+ PP.EnterToken(Tok, /*IsReinject=*/true);
Tok = TypedefToken;
if (!Result)
Diag(Tok.getLocation(), diag::warn_expected_qualified_after_typename);
@@ -1719,7 +1814,8 @@ bool Parser::TryAnnotateTypeOrScopeToken() {
} else if (Tok.is(tok::annot_template_id)) {
TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Tok);
if (TemplateId->Kind != TNK_Type_template &&
- TemplateId->Kind != TNK_Dependent_template_name) {
+ TemplateId->Kind != TNK_Dependent_template_name &&
+ TemplateId->Kind != TNK_Undeclared_template) {
Diag(Tok, diag::err_typename_refers_to_non_type_template)
<< Tok.getAnnotationRange();
return true;
@@ -1818,6 +1914,8 @@ bool Parser::TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS,
}
// If this is a template-id, annotate with a template-id or type token.
+ // FIXME: This appears to be dead code. We already have formed template-id
+ // tokens when parsing the scope specifier; this can never form a new one.
if (NextToken().is(tok::less)) {
TemplateTy Template;
UnqualifiedId TemplateName;
@@ -1828,14 +1926,19 @@ bool Parser::TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS,
/*hasTemplateKeyword=*/false, TemplateName,
/*ObjectType=*/nullptr, /*EnteringContext*/false, Template,
MemberOfUnknownSpecialization)) {
- // Consume the identifier.
- ConsumeToken();
- if (AnnotateTemplateIdToken(Template, TNK, SS, SourceLocation(),
- TemplateName)) {
- // If an unrecoverable error occurred, we need to return true here,
- // because the token stream is in a damaged state. We may not return
- // a valid identifier.
- return true;
+ // Only annotate an undeclared template name as a template-id if the
+ // following tokens have the form of a template argument list.
+ if (TNK != TNK_Undeclared_template ||
+ isTemplateArgumentList(1) != TPResult::False) {
+ // Consume the identifier.
+ ConsumeToken();
+ if (AnnotateTemplateIdToken(Template, TNK, SS, SourceLocation(),
+ TemplateName)) {
+ // If an unrecoverable error occurred, we need to return true here,
+ // because the token stream is in a damaged state. We may not
+ // return a valid identifier.
+ return true;
+ }
}
}
}
@@ -2071,38 +2174,82 @@ void Parser::ParseMicrosoftIfExistsExternalDeclaration() {
Braces.consumeClose();
}
-/// Parse a C++ Modules TS module declaration, which appears at the beginning
-/// of a module interface, module partition, or module implementation file.
+/// Parse a declaration beginning with the 'module' keyword or C++20
+/// context-sensitive keyword (optionally preceded by 'export').
///
-/// module-declaration: [Modules TS + P0273R0 + P0629R0]
-/// 'export'[opt] 'module' 'partition'[opt]
-/// module-name attribute-specifier-seq[opt] ';'
+/// module-declaration: [Modules TS + P0629R0]
+/// 'export'[opt] 'module' module-name attribute-specifier-seq[opt] ';'
///
-/// Note that 'partition' is a context-sensitive keyword.
-Parser::DeclGroupPtrTy Parser::ParseModuleDecl() {
+/// global-module-fragment: [C++2a]
+/// 'module' ';' top-level-declaration-seq[opt]
+/// module-declaration: [C++2a]
+/// 'export'[opt] 'module' module-name module-partition[opt]
+/// attribute-specifier-seq[opt] ';'
+/// private-module-fragment: [C++2a]
+/// 'module' ':' 'private' ';' top-level-declaration-seq[opt]
+Parser::DeclGroupPtrTy Parser::ParseModuleDecl(bool IsFirstDecl) {
SourceLocation StartLoc = Tok.getLocation();
Sema::ModuleDeclKind MDK = TryConsumeToken(tok::kw_export)
? Sema::ModuleDeclKind::Interface
: Sema::ModuleDeclKind::Implementation;
- assert(Tok.is(tok::kw_module) && "not a module declaration");
+ assert(
+ (Tok.is(tok::kw_module) ||
+ (Tok.is(tok::identifier) && Tok.getIdentifierInfo() == Ident_module)) &&
+ "not a module declaration");
SourceLocation ModuleLoc = ConsumeToken();
- if (Tok.is(tok::identifier) && NextToken().is(tok::identifier) &&
- Tok.getIdentifierInfo()->isStr("partition")) {
- // If 'partition' is present, this must be a module interface unit.
- if (MDK != Sema::ModuleDeclKind::Interface)
- Diag(Tok.getLocation(), diag::err_module_implementation_partition)
- << FixItHint::CreateInsertion(ModuleLoc, "export ");
- MDK = Sema::ModuleDeclKind::Partition;
+ // Attributes appear after the module name, not before.
+ // FIXME: Suggest moving the attributes later with a fixit.
+ DiagnoseAndSkipCXX11Attributes();
+
+ // Parse a global-module-fragment, if present.
+ if (getLangOpts().CPlusPlusModules && Tok.is(tok::semi)) {
+ SourceLocation SemiLoc = ConsumeToken();
+ if (!IsFirstDecl) {
+ Diag(StartLoc, diag::err_global_module_introducer_not_at_start)
+ << SourceRange(StartLoc, SemiLoc);
+ return nullptr;
+ }
+ if (MDK == Sema::ModuleDeclKind::Interface) {
+ Diag(StartLoc, diag::err_module_fragment_exported)
+ << /*global*/0 << FixItHint::CreateRemoval(StartLoc);
+ }
+ return Actions.ActOnGlobalModuleFragmentDecl(ModuleLoc);
+ }
+
+ // Parse a private-module-fragment, if present.
+ if (getLangOpts().CPlusPlusModules && Tok.is(tok::colon) &&
+ NextToken().is(tok::kw_private)) {
+ if (MDK == Sema::ModuleDeclKind::Interface) {
+ Diag(StartLoc, diag::err_module_fragment_exported)
+ << /*private*/1 << FixItHint::CreateRemoval(StartLoc);
+ }
ConsumeToken();
+ SourceLocation PrivateLoc = ConsumeToken();
+ DiagnoseAndSkipCXX11Attributes();
+ ExpectAndConsumeSemi(diag::err_private_module_fragment_expected_semi);
+ return Actions.ActOnPrivateModuleFragmentDecl(ModuleLoc, PrivateLoc);
}
SmallVector<std::pair<IdentifierInfo *, SourceLocation>, 2> Path;
if (ParseModuleName(ModuleLoc, Path, /*IsImport*/false))
return nullptr;
+ // Parse the optional module-partition.
+ if (Tok.is(tok::colon)) {
+ SourceLocation ColonLoc = ConsumeToken();
+ SmallVector<std::pair<IdentifierInfo *, SourceLocation>, 2> Partition;
+ if (ParseModuleName(ModuleLoc, Partition, /*IsImport*/false))
+ return nullptr;
+
+ // FIXME: Support module partition declarations.
+ Diag(ColonLoc, diag::err_unsupported_module_partition)
+ << SourceRange(ColonLoc, Partition.back().second);
+ // Recover by parsing as a non-partition.
+ }
+
// We don't support any module attributes yet; just parse them and diagnose.
ParsedAttributesWithRange Attrs(AttrFactory);
MaybeParseCXX11Attributes(Attrs);
@@ -2110,7 +2257,7 @@ Parser::DeclGroupPtrTy Parser::ParseModuleDecl() {
ExpectAndConsumeSemi(diag::err_module_expected_semi);
- return Actions.ActOnModuleDecl(StartLoc, ModuleLoc, MDK, Path);
+ return Actions.ActOnModuleDecl(StartLoc, ModuleLoc, MDK, Path, IsFirstDecl);
}
/// Parse a module import declaration. This is essentially the same for
@@ -2121,17 +2268,50 @@ Parser::DeclGroupPtrTy Parser::ParseModuleDecl() {
/// '@' 'import' module-name ';'
/// [ModTS] module-import-declaration:
/// 'import' module-name attribute-specifier-seq[opt] ';'
+/// [C++2a] module-import-declaration:
+/// 'export'[opt] 'import' module-name
+/// attribute-specifier-seq[opt] ';'
+/// 'export'[opt] 'import' module-partition
+/// attribute-specifier-seq[opt] ';'
+/// 'export'[opt] 'import' header-name
+/// attribute-specifier-seq[opt] ';'
Decl *Parser::ParseModuleImport(SourceLocation AtLoc) {
- assert((AtLoc.isInvalid() ? Tok.is(tok::kw_import)
+ SourceLocation StartLoc = AtLoc.isInvalid() ? Tok.getLocation() : AtLoc;
+
+ SourceLocation ExportLoc;
+ TryConsumeToken(tok::kw_export, ExportLoc);
+
+ assert((AtLoc.isInvalid() ? Tok.isOneOf(tok::kw_import, tok::identifier)
: Tok.isObjCAtKeyword(tok::objc_import)) &&
"Improper start to module import");
bool IsObjCAtImport = Tok.isObjCAtKeyword(tok::objc_import);
SourceLocation ImportLoc = ConsumeToken();
- SourceLocation StartLoc = AtLoc.isInvalid() ? ImportLoc : AtLoc;
SmallVector<std::pair<IdentifierInfo *, SourceLocation>, 2> Path;
- if (ParseModuleName(ImportLoc, Path, /*IsImport*/true))
+ Module *HeaderUnit = nullptr;
+
+ if (Tok.is(tok::header_name)) {
+ // This is a header import that the preprocessor decided we should skip
+ // because it was malformed in some way. Parse and ignore it; it's already
+ // been diagnosed.
+ ConsumeToken();
+ } else if (Tok.is(tok::annot_header_unit)) {
+ // This is a header import that the preprocessor mapped to a module import.
+ HeaderUnit = reinterpret_cast<Module *>(Tok.getAnnotationValue());
+ ConsumeAnnotationToken();
+ } else if (getLangOpts().CPlusPlusModules && Tok.is(tok::colon)) {
+ SourceLocation ColonLoc = ConsumeToken();
+ if (ParseModuleName(ImportLoc, Path, /*IsImport*/true))
+ return nullptr;
+
+ // FIXME: Support module partition import.
+ Diag(ColonLoc, diag::err_unsupported_module_partition)
+ << SourceRange(ColonLoc, Path.back().second);
return nullptr;
+ } else {
+ if (ParseModuleName(ImportLoc, Path, /*IsImport*/true))
+ return nullptr;
+ }
ParsedAttributesWithRange Attrs(AttrFactory);
MaybeParseCXX11Attributes(Attrs);
@@ -2144,7 +2324,12 @@ Decl *Parser::ParseModuleImport(SourceLocation AtLoc) {
return nullptr;
}
- DeclResult Import = Actions.ActOnModuleImport(StartLoc, ImportLoc, Path);
+ DeclResult Import;
+ if (HeaderUnit)
+ Import =
+ Actions.ActOnModuleImport(StartLoc, ExportLoc, ImportLoc, HeaderUnit);
+ else if (!Path.empty())
+ Import = Actions.ActOnModuleImport(StartLoc, ExportLoc, ImportLoc, Path);
ExpectAndConsumeSemi(diag::err_module_expected_semi);
if (Import.isInvalid())
return nullptr;
diff --git a/lib/Rewrite/DeltaTree.cpp b/lib/Rewrite/DeltaTree.cpp
index 06f3b4fb4a66..d27795c2f479 100644
--- a/lib/Rewrite/DeltaTree.cpp
+++ b/lib/Rewrite/DeltaTree.cpp
@@ -1,9 +1,8 @@
//===- DeltaTree.cpp - B-Tree for Rewrite Delta tracking ------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Rewrite/HTMLRewrite.cpp b/lib/Rewrite/HTMLRewrite.cpp
index 2088d4571aad..e304fbbed729 100644
--- a/lib/Rewrite/HTMLRewrite.cpp
+++ b/lib/Rewrite/HTMLRewrite.cpp
@@ -1,9 +1,8 @@
//== HTMLRewrite.cpp - Translate source code into prettified HTML --*- C++ -*-//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -307,14 +306,16 @@ h1 { font-size:14pt }
.keyword { color: blue }
.string_literal { color: red }
.directive { color: darkmagenta }
-/* Macro expansions. */
-.expansion { display: none; }
-.macro:hover .expansion {
+
+/* Macros and variables could have pop-up notes hidden by default.
+ - Macro pop-up: expansion of the macro
+ - Variable pop-up: value (table) of the variable */
+.macro_popup, .variable_popup { display: none; }
+
+/* Pop-up appears on mouse-hover event. */
+.macro:hover .macro_popup, .variable:hover .variable_popup {
display: block;
- border: 2px solid #FF0000;
padding: 2px;
- background-color:#FFF0F0;
- font-weight: normal;
-webkit-border-radius:5px;
-webkit-box-shadow:1px 1px 7px #000;
border-radius:5px;
@@ -325,6 +326,27 @@ h1 { font-size:14pt }
z-index: 1
}
+.macro_popup {
+ border: 2px solid red;
+ background-color:#FFF0F0;
+ font-weight: normal;
+}
+
+.variable_popup {
+ border: 2px solid blue;
+ background-color:#F0F0FF;
+ font-weight: bold;
+ font-family: Helvetica, sans-serif;
+ font-size: 9pt;
+}
+
+/* Pop-up notes needs a relative position as a base where they pops up. */
+.macro, .variable {
+ background-color: PaleGoldenRod;
+ position: relative;
+}
+.macro { color: DarkMagenta; }
+
#tooltiphint {
position: fixed;
width: 50em;
@@ -337,12 +359,6 @@ h1 { font-size:14pt }
background-color: #c0c0c0;
z-index: 2;
}
-.macro {
- color: darkmagenta;
- background-color:LemonChiffon;
- /* Macros are position: relative to provide base for expansions. */
- position: relative;
-}
.num { width:2.5em; padding-right:2ex; background-color:#eeeeee }
.num { text-align:right; font-size:8pt }
@@ -370,6 +386,7 @@ h1 { font-size:14pt }
.PathIndex { border-radius:8px }
.PathIndexEvent { background-color:#bfba87 }
.PathIndexControl { background-color:#8c8c8c }
+.PathIndexPopUp { background-color: #879abc; }
.PathNav a { text-decoration:none; font-size: larger }
.CodeInsertionHint { font-weight: bold; background-color: #10dd10 }
.CodeRemovalHint { background-color:#de1010 }
@@ -573,7 +590,7 @@ void html::HighlightMacros(Rewriter &R, FileID FID, const Preprocessor& PP) {
// Enter the tokens we just lexed. This will cause them to be macro expanded
// but won't enter sub-files (because we removed #'s).
- TmpPP.EnterTokenStream(TokenStream, false);
+ TmpPP.EnterTokenStream(TokenStream, false, /*IsReinject=*/false);
TokenConcatenation ConcatInfo(TmpPP);
@@ -637,10 +654,9 @@ void html::HighlightMacros(Rewriter &R, FileID FID, const Preprocessor& PP) {
TmpPP.Lex(Tok);
}
-
- // Insert the expansion as the end tag, so that multi-line macros all get
- // highlighted.
- Expansion = "<span class='expansion'>" + Expansion + "</span></span>";
+ // Insert the 'macro_popup' as the end tag, so that multi-line macros all
+ // get highlighted.
+ Expansion = "<span class='macro_popup'>" + Expansion + "</span></span>";
HighlightRange(R, LLoc.getBegin(), LLoc.getEnd(), "<span class='macro'>",
Expansion.c_str(), LLoc.isTokenRange());
diff --git a/lib/Rewrite/RewriteRope.cpp b/lib/Rewrite/RewriteRope.cpp
index e3b47a1c52f8..980d0f01e277 100644
--- a/lib/Rewrite/RewriteRope.cpp
+++ b/lib/Rewrite/RewriteRope.cpp
@@ -1,9 +1,8 @@
//===- RewriteRope.cpp - Rope specialized for rewriter --------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Rewrite/Rewriter.cpp b/lib/Rewrite/Rewriter.cpp
index a5421ec807b7..881399e98e33 100644
--- a/lib/Rewrite/Rewriter.cpp
+++ b/lib/Rewrite/Rewriter.cpp
@@ -1,9 +1,8 @@
//===- Rewriter.cpp - Code rewriting interface ----------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -171,7 +170,7 @@ int Rewriter::getRangeSize(SourceRange Range, RewriteOptions opts) const {
/// in different buffers, this returns an empty string.
///
/// Note that this method is not particularly efficient.
-std::string Rewriter::getRewrittenText(SourceRange Range) const {
+std::string Rewriter::getRewrittenText(CharSourceRange Range) const {
if (!isRewritable(Range.getBegin()) ||
!isRewritable(Range.getEnd()))
return {};
@@ -194,7 +193,9 @@ std::string Rewriter::getRewrittenText(SourceRange Range) const {
// Adjust the end offset to the end of the last token, instead of being the
// start of the last token.
- EndOff += Lexer::MeasureTokenLength(Range.getEnd(), *SourceMgr, *LangOpts);
+ if (Range.isTokenRange())
+ EndOff +=
+ Lexer::MeasureTokenLength(Range.getEnd(), *SourceMgr, *LangOpts);
return std::string(Ptr, Ptr+EndOff-StartOff);
}
@@ -204,7 +205,8 @@ std::string Rewriter::getRewrittenText(SourceRange Range) const {
// Adjust the end offset to the end of the last token, instead of being the
// start of the last token.
- EndOff += Lexer::MeasureTokenLength(Range.getEnd(), *SourceMgr, *LangOpts);
+ if (Range.isTokenRange())
+ EndOff += Lexer::MeasureTokenLength(Range.getEnd(), *SourceMgr, *LangOpts);
// Advance the iterators to the right spot, yay for linear time algorithms.
RewriteBuffer::iterator Start = RB.begin();
diff --git a/lib/Rewrite/TokenRewriter.cpp b/lib/Rewrite/TokenRewriter.cpp
index 1f5dec499c92..538622e36b38 100644
--- a/lib/Rewrite/TokenRewriter.cpp
+++ b/lib/Rewrite/TokenRewriter.cpp
@@ -1,9 +1,8 @@
//===- TokenRewriter.cpp - Token-based code rewriting interface -----------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Sema/AnalysisBasedWarnings.cpp b/lib/Sema/AnalysisBasedWarnings.cpp
index c818d40c7771..ce01909f1858 100644
--- a/lib/Sema/AnalysisBasedWarnings.cpp
+++ b/lib/Sema/AnalysisBasedWarnings.cpp
@@ -1,9 +1,8 @@
//=- AnalysisBasedWarnings.cpp - Sema warnings based on libAnalysis -*- C++ -*-=//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -250,6 +249,10 @@ static void checkRecursiveFunction(Sema &S, const FunctionDecl *FD,
CFG *cfg = AC.getCFG();
if (!cfg) return;
+ // If the exit block is unreachable, skip processing the function.
+ if (cfg->getExit().pred_empty())
+ return;
+
// Emit diagnostic if a recursive function call is detected for all paths.
if (checkForRecursiveFunctionCall(FD, cfg))
S.Diag(Body->getBeginLoc(), diag::warn_infinite_recursive_function);
@@ -395,7 +398,8 @@ static ControlFlowKind CheckFallThrough(AnalysisDeclContext &AC) {
for (const auto *B : *cfg) {
if (!live[B->getBlockID()]) {
if (B->pred_begin() == B->pred_end()) {
- if (B->getTerminator() && isa<CXXTryStmt>(B->getTerminator()))
+ const Stmt *Term = B->getTerminatorStmt();
+ if (Term && isa<CXXTryStmt>(Term))
// When not adding EH edges from calls, catch clauses
// can otherwise seem dead. Avoid noting them as dead.
count += reachable_code::ScanReachableFromBlock(B, live);
@@ -443,7 +447,8 @@ static ControlFlowKind CheckFallThrough(AnalysisDeclContext &AC) {
// No more CFGElements in the block?
if (ri == re) {
- if (B.getTerminator() && isa<CXXTryStmt>(B.getTerminator())) {
+ const Stmt *Term = B.getTerminatorStmt();
+ if (Term && isa<CXXTryStmt>(Term)) {
HasAbnormalEdge = true;
continue;
}
@@ -615,7 +620,7 @@ struct CheckFallThroughDiagnostics {
/// of a noreturn function. We assume that functions and blocks not marked
/// noreturn will return.
static void CheckFallThroughForBody(Sema &S, const Decl *D, const Stmt *Body,
- const BlockExpr *blkExpr,
+ QualType BlockType,
const CheckFallThroughDiagnostics &CD,
AnalysisDeclContext &AC,
sema::FunctionScopeInfo *FSI) {
@@ -636,9 +641,8 @@ static void CheckFallThroughForBody(Sema &S, const Decl *D, const Stmt *Body,
HasNoReturn = MD->hasAttr<NoReturnAttr>();
}
else if (isa<BlockDecl>(D)) {
- QualType BlockTy = blkExpr->getType();
if (const FunctionType *FT =
- BlockTy->getPointeeType()->getAs<FunctionType>()) {
+ BlockType->getPointeeType()->getAs<FunctionType>()) {
if (FT->getReturnType()->isVoidType())
ReturnsVoid = true;
if (FT->getNoReturnAttr())
@@ -995,7 +999,8 @@ static bool DiagnoseUninitializedUse(Sema &S, const VarDecl *VD,
if (VD->getType()->isBlockPointerType() && !VD->hasAttr<BlocksAttr>())
S.Diag(BE->getBeginLoc(),
diag::warn_uninit_byref_blockvar_captured_by_block)
- << VD->getDeclName();
+ << VD->getDeclName()
+ << VD->getType().getQualifiers().hasObjCLifetime();
else
DiagUninitUse(S, VD, Use, true);
}
@@ -1073,7 +1078,7 @@ namespace {
BlockQueue.pop_front();
if (!P) continue;
- const Stmt *Term = P->getTerminator();
+ const Stmt *Term = P->getTerminatorStmt();
if (Term && isa<SwitchStmt>(Term))
continue; // Switch statement, good.
@@ -1171,7 +1176,7 @@ namespace {
}
static const Stmt *getLastStmt(const CFGBlock &B) {
- if (const Stmt *Term = B.getTerminator())
+ if (const Stmt *Term = B.getTerminatorStmt())
return Term;
for (CFGBlock::const_reverse_iterator ElemIt = B.rbegin(),
ElemEnd = B.rend();
@@ -1277,11 +1282,11 @@ static void DiagnoseSwitchLabelsFallthrough(Sema &S, AnalysisDeclContext &AC,
if (L.isMacroID())
continue;
if (S.getLangOpts().CPlusPlus11) {
- const Stmt *Term = B->getTerminator();
+ const Stmt *Term = B->getTerminatorStmt();
// Skip empty cases.
while (B->empty() && !Term && B->succ_size() == 1) {
B = *B->succ_begin();
- Term = B->getTerminator();
+ Term = B->getTerminatorStmt();
}
if (!(B->empty() && Term && isa<BreakStmt>(Term))) {
Preprocessor &PP = S.getPreprocessor();
@@ -1639,15 +1644,11 @@ class ThreadSafetyReporter : public clang::threadSafety::ThreadSafetyHandler {
return ONS;
}
- // Helper functions
- void warnLockMismatch(unsigned DiagID, StringRef Kind, Name LockName,
- SourceLocation Loc) {
- // Gracefully handle rare cases when the analysis can't get a more
- // precise source location.
- if (!Loc.isValid())
- Loc = FunLocation;
- PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID) << Kind << LockName);
- Warnings.emplace_back(std::move(Warning), getNotes());
+ OptionalNotes makeLockedHereNote(SourceLocation LocLocked, StringRef Kind) {
+ return LocLocked.isValid()
+ ? getNotes(PartialDiagnosticAt(
+ LocLocked, S.PDiag(diag::note_locked_here) << Kind))
+ : getNotes();
}
public:
@@ -1678,22 +1679,34 @@ class ThreadSafetyReporter : public clang::threadSafety::ThreadSafetyHandler {
void handleUnmatchedUnlock(StringRef Kind, Name LockName,
SourceLocation Loc) override {
- warnLockMismatch(diag::warn_unlock_but_no_lock, Kind, LockName, Loc);
- }
-
- void handleIncorrectUnlockKind(StringRef Kind, Name LockName,
- LockKind Expected, LockKind Received,
- SourceLocation Loc) override {
if (Loc.isInvalid())
Loc = FunLocation;
- PartialDiagnosticAt Warning(Loc, S.PDiag(diag::warn_unlock_kind_mismatch)
- << Kind << LockName << Received
- << Expected);
+ PartialDiagnosticAt Warning(Loc, S.PDiag(diag::warn_unlock_but_no_lock)
+ << Kind << LockName);
Warnings.emplace_back(std::move(Warning), getNotes());
}
- void handleDoubleLock(StringRef Kind, Name LockName, SourceLocation Loc) override {
- warnLockMismatch(diag::warn_double_lock, Kind, LockName, Loc);
+ void handleIncorrectUnlockKind(StringRef Kind, Name LockName,
+ LockKind Expected, LockKind Received,
+ SourceLocation LocLocked,
+ SourceLocation LocUnlock) override {
+ if (LocUnlock.isInvalid())
+ LocUnlock = FunLocation;
+ PartialDiagnosticAt Warning(
+ LocUnlock, S.PDiag(diag::warn_unlock_kind_mismatch)
+ << Kind << LockName << Received << Expected);
+ Warnings.emplace_back(std::move(Warning),
+ makeLockedHereNote(LocLocked, Kind));
+ }
+
+ void handleDoubleLock(StringRef Kind, Name LockName, SourceLocation LocLocked,
+ SourceLocation LocDoubleLock) override {
+ if (LocDoubleLock.isInvalid())
+ LocDoubleLock = FunLocation;
+ PartialDiagnosticAt Warning(LocDoubleLock, S.PDiag(diag::warn_double_lock)
+ << Kind << LockName);
+ Warnings.emplace_back(std::move(Warning),
+ makeLockedHereNote(LocLocked, Kind));
}
void handleMutexHeldEndOfScope(StringRef Kind, Name LockName,
@@ -1720,13 +1733,8 @@ class ThreadSafetyReporter : public clang::threadSafety::ThreadSafetyHandler {
PartialDiagnosticAt Warning(LocEndOfScope, S.PDiag(DiagID) << Kind
<< LockName);
- if (LocLocked.isValid()) {
- PartialDiagnosticAt Note(LocLocked, S.PDiag(diag::note_locked_here)
- << Kind);
- Warnings.emplace_back(std::move(Warning), getNotes(Note));
- return;
- }
- Warnings.emplace_back(std::move(Warning), getNotes());
+ Warnings.emplace_back(std::move(Warning),
+ makeLockedHereNote(LocLocked, Kind));
}
void handleExclusiveAndShared(StringRef Kind, Name LockName,
@@ -2003,7 +2011,7 @@ static void flushDiagnostics(Sema &S, const sema::FunctionScopeInfo *fscope) {
void clang::sema::
AnalysisBasedWarnings::IssueWarnings(sema::AnalysisBasedWarnings::Policy P,
sema::FunctionScopeInfo *fscope,
- const Decl *D, const BlockExpr *blkExpr) {
+ const Decl *D, QualType BlockType) {
// We avoid doing analysis-based warnings when there are errors for
// two reasons:
@@ -2082,16 +2090,16 @@ AnalysisBasedWarnings::IssueWarnings(sema::AnalysisBasedWarnings::Policy P,
// Register the expressions with the CFGBuilder.
for (const auto &D : fscope->PossiblyUnreachableDiags) {
- if (D.stmt)
- AC.registerForcedBlockExpression(D.stmt);
+ for (const Stmt *S : D.Stmts)
+ AC.registerForcedBlockExpression(S);
}
if (AC.getCFG()) {
analyzed = true;
for (const auto &D : fscope->PossiblyUnreachableDiags) {
- bool processed = false;
- if (D.stmt) {
- const CFGBlock *block = AC.getBlockForRegisteredExpression(D.stmt);
+ bool AllReachable = true;
+ for (const Stmt *S : D.Stmts) {
+ const CFGBlock *block = AC.getBlockForRegisteredExpression(S);
CFGReverseBlockReachabilityAnalysis *cra =
AC.getCFGReachablityAnalysis();
// FIXME: We should be able to assert that block is non-null, but
@@ -2099,15 +2107,17 @@ AnalysisBasedWarnings::IssueWarnings(sema::AnalysisBasedWarnings::Policy P,
// edge cases; see test/Sema/vla-2.c.
if (block && cra) {
// Can this block be reached from the entrance?
- if (cra->isReachable(&AC.getCFG()->getEntry(), block))
- S.Diag(D.Loc, D.PD);
- processed = true;
+ if (!cra->isReachable(&AC.getCFG()->getEntry(), block)) {
+ AllReachable = false;
+ break;
+ }
}
+ // If we cannot map to a basic block, assume the statement is
+ // reachable.
}
- if (!processed) {
- // Emit the warning anyway if we cannot map to a basic block.
+
+ if (AllReachable)
S.Diag(D.Loc, D.PD);
- }
}
}
@@ -2127,7 +2137,7 @@ AnalysisBasedWarnings::IssueWarnings(sema::AnalysisBasedWarnings::Policy P,
: (fscope->isCoroutine()
? CheckFallThroughDiagnostics::MakeForCoroutine(D)
: CheckFallThroughDiagnostics::MakeForFunction(D)));
- CheckFallThroughForBody(S, D, Body, blkExpr, CD, AC, fscope);
+ CheckFallThroughForBody(S, D, Body, BlockType, CD, AC, fscope);
}
// Warning: check for unreachable code
diff --git a/lib/Sema/CodeCompleteConsumer.cpp b/lib/Sema/CodeCompleteConsumer.cpp
index 92e65c4b819b..b88ff9dd64cd 100644
--- a/lib/Sema/CodeCompleteConsumer.cpp
+++ b/lib/Sema/CodeCompleteConsumer.cpp
@@ -1,9 +1,8 @@
//===- CodeCompleteConsumer.cpp - Code Completion Interface ---------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Sema/CoroutineStmtBuilder.h b/lib/Sema/CoroutineStmtBuilder.h
index d15cf0b756e7..42499a32fc16 100644
--- a/lib/Sema/CoroutineStmtBuilder.h
+++ b/lib/Sema/CoroutineStmtBuilder.h
@@ -1,9 +1,8 @@
//===- CoroutineStmtBuilder.h - Implicit coroutine stmt builder -*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//===----------------------------------------------------------------------===//
//
// This file defines CoroutineStmtBuilder, a class for building the implicit
diff --git a/lib/Sema/DeclSpec.cpp b/lib/Sema/DeclSpec.cpp
index 8b002dac1343..77e5eb095693 100644
--- a/lib/Sema/DeclSpec.cpp
+++ b/lib/Sema/DeclSpec.cpp
@@ -1,9 +1,8 @@
//===--- DeclSpec.cpp - Declaration Specifier Semantic Analysis -----------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -446,7 +445,7 @@ unsigned DeclSpec::getParsedSpecifiers() const {
if (hasTypeSpecifier())
Res |= PQ_TypeSpecifier;
- if (FS_inline_specified || FS_virtual_specified || FS_explicit_specified ||
+ if (FS_inline_specified || FS_virtual_specified || hasExplicitSpecifier() ||
FS_noreturn_specified || FS_forceinline_specified)
Res |= PQ_FunctionSpecifier;
return Res;
@@ -565,6 +564,15 @@ const char *DeclSpec::getSpecifierName(DeclSpec::TST T,
llvm_unreachable("Unknown typespec!");
}
+const char *DeclSpec::getSpecifierName(ConstexprSpecKind C) {
+ switch (C) {
+ case CSK_unspecified: return "unspecified";
+ case CSK_constexpr: return "constexpr";
+ case CSK_consteval: return "consteval";
+ }
+ llvm_unreachable("Unknown ConstexprSpecKind");
+}
+
const char *DeclSpec::getSpecifierName(TQ T) {
switch (T) {
case DeclSpec::TQ_unspecified: return "unspecified";
@@ -588,7 +596,6 @@ bool DeclSpec::SetStorageClassSpec(Sema &S, SCS SC, SourceLocation Loc,
// these storage-class specifiers.
// OpenCL v1.2 s6.8 changes this to "The auto and register storage-class
// specifiers are not supported."
- // OpenCL C++ v1.0 s2.9 restricts register.
if (S.getLangOpts().OpenCL &&
!S.getOpenCLOptions().isEnabled("cl_clang_storage_class_specifiers")) {
switch (SC) {
@@ -707,6 +714,8 @@ bool DeclSpec::SetTypeSpecType(TST T, SourceLocation TagKwLoc,
const PrintingPolicy &Policy) {
assert(isTypeRep(T) && "T does not store a type");
assert(Rep && "no type provided!");
+ if (TypeSpecType == TST_error)
+ return false;
if (TypeSpecType != TST_unspecified) {
PrevSpec = DeclSpec::getSpecifierName((TST) TypeSpecType, Policy);
DiagID = diag::err_invalid_decl_spec_combination;
@@ -727,6 +736,8 @@ bool DeclSpec::SetTypeSpecType(TST T, SourceLocation Loc,
const PrintingPolicy &Policy) {
assert(isExprRep(T) && "T does not store an expr");
assert(Rep && "no expression provided!");
+ if (TypeSpecType == TST_error)
+ return false;
if (TypeSpecType != TST_unspecified) {
PrevSpec = DeclSpec::getSpecifierName((TST) TypeSpecType, Policy);
DiagID = diag::err_invalid_decl_spec_combination;
@@ -757,6 +768,8 @@ bool DeclSpec::SetTypeSpecType(TST T, SourceLocation TagKwLoc,
assert(isDeclRep(T) && "T does not store a decl");
// Unlike the other cases, we don't assert that we actually get a decl.
+ if (TypeSpecType == TST_error)
+ return false;
if (TypeSpecType != TST_unspecified) {
PrevSpec = DeclSpec::getSpecifierName((TST) TypeSpecType, Policy);
DiagID = diag::err_invalid_decl_spec_combination;
@@ -776,6 +789,8 @@ bool DeclSpec::SetTypeSpecType(TST T, SourceLocation Loc,
const PrintingPolicy &Policy) {
assert(!isDeclRep(T) && !isTypeRep(T) && !isExprRep(T) &&
"rep required for these type-spec kinds!");
+ if (TypeSpecType == TST_error)
+ return false;
if (TypeSpecType != TST_unspecified) {
PrevSpec = DeclSpec::getSpecifierName((TST) TypeSpecType, Policy);
DiagID = diag::err_invalid_decl_spec_combination;
@@ -808,6 +823,8 @@ bool DeclSpec::SetTypeSpecSat(SourceLocation Loc, const char *&PrevSpec,
bool DeclSpec::SetTypeAltiVecVector(bool isAltiVecVector, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
const PrintingPolicy &Policy) {
+ if (TypeSpecType == TST_error)
+ return false;
if (TypeSpecType != TST_unspecified) {
PrevSpec = DeclSpec::getSpecifierName((TST) TypeSpecType, Policy);
DiagID = diag::err_invalid_vector_decl_spec_combination;
@@ -821,7 +838,8 @@ bool DeclSpec::SetTypeAltiVecVector(bool isAltiVecVector, SourceLocation Loc,
bool DeclSpec::SetTypePipe(bool isPipe, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
const PrintingPolicy &Policy) {
-
+ if (TypeSpecType == TST_error)
+ return false;
if (TypeSpecType != TST_unspecified) {
PrevSpec = DeclSpec::getSpecifierName((TST)TypeSpecType, Policy);
DiagID = diag::err_invalid_decl_spec_combination;
@@ -837,6 +855,8 @@ bool DeclSpec::SetTypePipe(bool isPipe, SourceLocation Loc,
bool DeclSpec::SetTypeAltiVecPixel(bool isAltiVecPixel, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
const PrintingPolicy &Policy) {
+ if (TypeSpecType == TST_error)
+ return false;
if (!TypeAltiVecVector || TypeAltiVecPixel ||
(TypeSpecType != TST_unspecified)) {
PrevSpec = DeclSpec::getSpecifierName((TST) TypeSpecType, Policy);
@@ -852,6 +872,8 @@ bool DeclSpec::SetTypeAltiVecPixel(bool isAltiVecPixel, SourceLocation Loc,
bool DeclSpec::SetTypeAltiVecBool(bool isAltiVecBool, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
const PrintingPolicy &Policy) {
+ if (TypeSpecType == TST_error)
+ return false;
if (!TypeAltiVecVector || TypeAltiVecBool ||
(TypeSpecType != TST_unspecified)) {
PrevSpec = DeclSpec::getSpecifierName((TST) TypeSpecType, Policy);
@@ -945,17 +967,24 @@ bool DeclSpec::setFunctionSpecVirtual(SourceLocation Loc,
}
bool DeclSpec::setFunctionSpecExplicit(SourceLocation Loc,
- const char *&PrevSpec,
- unsigned &DiagID) {
+ const char *&PrevSpec, unsigned &DiagID,
+ ExplicitSpecifier ExplicitSpec,
+ SourceLocation CloseParenLoc) {
+ assert((ExplicitSpec.getKind() == ExplicitSpecKind::ResolvedTrue ||
+ ExplicitSpec.getExpr()) &&
+ "invalid ExplicitSpecifier");
// 'explicit explicit' is ok, but warn as this is likely not what the user
// intended.
- if (FS_explicit_specified) {
- DiagID = diag::warn_duplicate_declspec;
+ if (hasExplicitSpecifier()) {
+ DiagID = (ExplicitSpec.getExpr() || FS_explicit_specifier.getExpr())
+ ? diag::err_duplicate_declspec
+ : diag::ext_warn_duplicate_declspec;
PrevSpec = "explicit";
return true;
}
- FS_explicit_specified = true;
+ FS_explicit_specifier = ExplicitSpec;
FS_explicitLoc = Loc;
+ FS_explicitCloseParenLoc = CloseParenLoc;
return false;
}
@@ -1004,16 +1033,17 @@ bool DeclSpec::setModulePrivateSpec(SourceLocation Loc, const char *&PrevSpec,
return false;
}
-bool DeclSpec::SetConstexprSpec(SourceLocation Loc, const char *&PrevSpec,
+bool DeclSpec::SetConstexprSpec(ConstexprSpecKind ConstexprKind,
+ SourceLocation Loc, const char *&PrevSpec,
unsigned &DiagID) {
- // 'constexpr constexpr' is ok, but warn as this is likely not what the user
- // intended.
- if (Constexpr_specified) {
+ if (getConstexprSpecifier() != CSK_unspecified) {
+ if (getConstexprSpecifier() == CSK_consteval || ConstexprKind == CSK_consteval)
+ return BadSpecifier(ConstexprKind, getConstexprSpecifier(), PrevSpec, DiagID);
DiagID = diag::warn_duplicate_declspec;
PrevSpec = "constexpr";
return true;
}
- Constexpr_specified = true;
+ ConstexprSpecifier = ConstexprKind;
ConstexprLoc = Loc;
return false;
}
@@ -1034,7 +1064,10 @@ void DeclSpec::Finish(Sema &S, const PrintingPolicy &Policy) {
// Before possibly changing their values, save specs as written.
SaveWrittenBuiltinSpecs();
- // Check the type specifier components first.
+ // Check the type specifier components first. No checking for an invalid
+ // type.
+ if (TypeSpecType == TST_error)
+ return;
// If decltype(auto) is used, no other type specifiers are permitted.
if (TypeSpecType == TST_decltype_auto &&
@@ -1256,9 +1289,10 @@ void DeclSpec::Finish(Sema &S, const PrintingPolicy &Policy) {
else if (TypeSpecType == TST_char16 || TypeSpecType == TST_char32)
S.Diag(TSTLoc, diag::warn_cxx98_compat_unicode_type)
<< (TypeSpecType == TST_char16 ? "char16_t" : "char32_t");
- if (Constexpr_specified)
+ if (getConstexprSpecifier() == CSK_constexpr)
S.Diag(ConstexprLoc, diag::warn_cxx98_compat_constexpr);
-
+ if (getConstexprSpecifier() == CSK_consteval)
+ S.Diag(ConstexprLoc, diag::warn_cxx20_compat_consteval);
// C++ [class.friend]p6:
// No storage-class-specifier shall appear in the decl-specifier-seq
// of a friend declaration.
@@ -1294,23 +1328,26 @@ void DeclSpec::Finish(Sema &S, const PrintingPolicy &Policy) {
// The explicit specifier shall be used only in the declaration of
// a constructor or conversion function within its class
// definition;
- if (isFriendSpecified() && (isVirtualSpecified() || isExplicitSpecified())) {
+ if (isFriendSpecified() && (isVirtualSpecified() || hasExplicitSpecifier())) {
StringRef Keyword;
+ FixItHint Hint;
SourceLocation SCLoc;
if (isVirtualSpecified()) {
Keyword = "virtual";
SCLoc = getVirtualSpecLoc();
+ Hint = FixItHint::CreateRemoval(SCLoc);
} else {
Keyword = "explicit";
SCLoc = getExplicitSpecLoc();
+ Hint = FixItHint::CreateRemoval(getExplicitSpecRange());
}
- FixItHint Hint = FixItHint::CreateRemoval(SCLoc);
S.Diag(SCLoc, diag::err_friend_decl_spec)
<< Keyword << Hint;
- FS_virtual_specified = FS_explicit_specified = false;
+ FS_virtual_specified = false;
+ FS_explicit_specifier = ExplicitSpecifier();
FS_virtualLoc = FS_explicitLoc = SourceLocation();
}
diff --git a/lib/Sema/DelayedDiagnostic.cpp b/lib/Sema/DelayedDiagnostic.cpp
index a064e492c098..cb2721b92090 100644
--- a/lib/Sema/DelayedDiagnostic.cpp
+++ b/lib/Sema/DelayedDiagnostic.cpp
@@ -1,9 +1,8 @@
//===- DelayedDiagnostic.cpp - Delayed declarator diagnostics -------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Sema/IdentifierResolver.cpp b/lib/Sema/IdentifierResolver.cpp
index b439f7255728..333f4d70986a 100644
--- a/lib/Sema/IdentifierResolver.cpp
+++ b/lib/Sema/IdentifierResolver.cpp
@@ -1,9 +1,8 @@
//===- IdentifierResolver.cpp - Lexical Scope Name lookup -----------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Sema/JumpDiagnostics.cpp b/lib/Sema/JumpDiagnostics.cpp
index a7495e8e0482..c8743df90e34 100644
--- a/lib/Sema/JumpDiagnostics.cpp
+++ b/lib/Sema/JumpDiagnostics.cpp
@@ -1,9 +1,8 @@
//===--- JumpDiagnostics.cpp - Protected scope jump analysis ------*- C++ -*-=//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -66,8 +65,10 @@ class JumpScopeChecker {
llvm::DenseMap<Stmt*, unsigned> LabelAndGotoScopes;
SmallVector<Stmt*, 16> Jumps;
- SmallVector<IndirectGotoStmt*, 4> IndirectJumps;
+ SmallVector<Stmt*, 4> IndirectJumps;
+ SmallVector<Stmt*, 4> AsmJumps;
SmallVector<LabelDecl*, 4> IndirectJumpTargets;
+ SmallVector<LabelDecl*, 4> AsmJumpTargets;
public:
JumpScopeChecker(Stmt *Body, Sema &S);
private:
@@ -77,10 +78,10 @@ private:
void BuildScopeInformation(Stmt *S, unsigned &origParentScope);
void VerifyJumps();
- void VerifyIndirectJumps();
+ void VerifyIndirectOrAsmJumps(bool IsAsmGoto);
void NoteJumpIntoScopes(ArrayRef<unsigned> ToScopes);
- void DiagnoseIndirectJump(IndirectGotoStmt *IG, unsigned IGScope,
- LabelDecl *Target, unsigned TargetScope);
+ void DiagnoseIndirectOrAsmJump(Stmt *IG, unsigned IGScope, LabelDecl *Target,
+ unsigned TargetScope);
void CheckJump(Stmt *From, Stmt *To, SourceLocation DiagLoc,
unsigned JumpDiag, unsigned JumpDiagWarning,
unsigned JumpDiagCXX98Compat);
@@ -104,7 +105,8 @@ JumpScopeChecker::JumpScopeChecker(Stmt *Body, Sema &s)
// Check that all jumps we saw are kosher.
VerifyJumps();
- VerifyIndirectJumps();
+ VerifyIndirectOrAsmJumps(false);
+ VerifyIndirectOrAsmJumps(true);
}
/// GetDeepestCommonScope - Finds the innermost scope enclosing the
@@ -317,7 +319,7 @@ void JumpScopeChecker::BuildScopeInformation(Stmt *S,
}
LabelAndGotoScopes[S] = ParentScope;
- IndirectJumps.push_back(cast<IndirectGotoStmt>(S));
+ IndirectJumps.push_back(S);
break;
case Stmt::SwitchStmtClass:
@@ -340,6 +342,18 @@ void JumpScopeChecker::BuildScopeInformation(Stmt *S,
Jumps.push_back(S);
break;
+ case Stmt::GCCAsmStmtClass:
+ if (auto *GS = dyn_cast<GCCAsmStmt>(S))
+ if (GS->isAsmGoto()) {
+ // Remember both what scope a goto is in as well as the fact that we
+ // have it. This makes the second scan not have to walk the AST again.
+ LabelAndGotoScopes[S] = ParentScope;
+ AsmJumps.push_back(GS);
+ for (auto *E : GS->labels())
+ AsmJumpTargets.push_back(E->getLabel());
+ }
+ break;
+
case Stmt::IfStmtClass: {
IfStmt *IS = cast<IfStmt>(S);
if (!(IS->isConstexpr() || IS->isObjCAvailabilityCheck()))
@@ -630,14 +644,13 @@ void JumpScopeChecker::VerifyJumps() {
}
}
-/// VerifyIndirectJumps - Verify whether any possible indirect jump
-/// might cross a protection boundary. Unlike direct jumps, indirect
-/// jumps count cleanups as protection boundaries: since there's no
-/// way to know where the jump is going, we can't implicitly run the
-/// right cleanups the way we can with direct jumps.
-///
-/// Thus, an indirect jump is "trivial" if it bypasses no
-/// initializations and no teardowns. More formally, an indirect jump
+/// VerifyIndirectOrAsmJumps - Verify whether any possible indirect goto or
+/// asm goto jump might cross a protection boundary. Unlike direct jumps,
+/// indirect or asm goto jumps count cleanups as protection boundaries:
+/// since there's no way to know where the jump is going, we can't implicitly
+/// run the right cleanups the way we can with direct jumps.
+/// Thus, an indirect/asm jump is "trivial" if it bypasses no
+/// initializations and no teardowns. More formally, an indirect/asm jump
/// from A to B is trivial if the path out from A to DCA(A,B) is
/// trivial and the path in from DCA(A,B) to B is trivial, where
/// DCA(A,B) is the deepest common ancestor of A and B.
@@ -649,36 +662,41 @@ void JumpScopeChecker::VerifyJumps() {
/// Under these definitions, this function checks that the indirect
/// jump between A and B is trivial for every indirect goto statement A
/// and every label B whose address was taken in the function.
-void JumpScopeChecker::VerifyIndirectJumps() {
- if (IndirectJumps.empty()) return;
-
+void JumpScopeChecker::VerifyIndirectOrAsmJumps(bool IsAsmGoto) {
+ SmallVector<Stmt*, 4> GotoJumps = IsAsmGoto ? AsmJumps : IndirectJumps;
+ if (GotoJumps.empty())
+ return;
+ SmallVector<LabelDecl *, 4> JumpTargets =
+ IsAsmGoto ? AsmJumpTargets : IndirectJumpTargets;
// If there aren't any address-of-label expressions in this function,
// complain about the first indirect goto.
- if (IndirectJumpTargets.empty()) {
- S.Diag(IndirectJumps[0]->getGotoLoc(),
+ if (JumpTargets.empty()) {
+ assert(!IsAsmGoto &&"only indirect goto can get here");
+ S.Diag(GotoJumps[0]->getBeginLoc(),
diag::err_indirect_goto_without_addrlabel);
return;
}
-
// Collect a single representative of every scope containing an
- // indirect goto. For most code bases, this substantially cuts
+ // indirect or asm goto. For most code bases, this substantially cuts
// down on the number of jump sites we'll have to consider later.
- typedef std::pair<unsigned, IndirectGotoStmt*> JumpScope;
+ typedef std::pair<unsigned, Stmt*> JumpScope;
SmallVector<JumpScope, 32> JumpScopes;
{
- llvm::DenseMap<unsigned, IndirectGotoStmt*> JumpScopesMap;
- for (SmallVectorImpl<IndirectGotoStmt*>::iterator
- I = IndirectJumps.begin(), E = IndirectJumps.end(); I != E; ++I) {
- IndirectGotoStmt *IG = *I;
+ llvm::DenseMap<unsigned, Stmt*> JumpScopesMap;
+ for (SmallVectorImpl<Stmt *>::iterator I = GotoJumps.begin(),
+ E = GotoJumps.end();
+ I != E; ++I) {
+ Stmt *IG = *I;
if (CHECK_PERMISSIVE(!LabelAndGotoScopes.count(IG)))
continue;
unsigned IGScope = LabelAndGotoScopes[IG];
- IndirectGotoStmt *&Entry = JumpScopesMap[IGScope];
+ Stmt *&Entry = JumpScopesMap[IGScope];
if (!Entry) Entry = IG;
}
JumpScopes.reserve(JumpScopesMap.size());
- for (llvm::DenseMap<unsigned, IndirectGotoStmt*>::iterator
- I = JumpScopesMap.begin(), E = JumpScopesMap.end(); I != E; ++I)
+ for (llvm::DenseMap<unsigned, Stmt *>::iterator I = JumpScopesMap.begin(),
+ E = JumpScopesMap.end();
+ I != E; ++I)
JumpScopes.push_back(*I);
}
@@ -686,8 +704,8 @@ void JumpScopeChecker::VerifyIndirectJumps() {
// label whose address was taken somewhere in the function.
// For most code bases, there will be only one such scope.
llvm::DenseMap<unsigned, LabelDecl*> TargetScopes;
- for (SmallVectorImpl<LabelDecl*>::iterator
- I = IndirectJumpTargets.begin(), E = IndirectJumpTargets.end();
+ for (SmallVectorImpl<LabelDecl *>::iterator I = JumpTargets.begin(),
+ E = JumpTargets.end();
I != E; ++I) {
LabelDecl *TheLabel = *I;
if (CHECK_PERMISSIVE(!LabelAndGotoScopes.count(TheLabel->getStmt())))
@@ -764,7 +782,7 @@ void JumpScopeChecker::VerifyIndirectJumps() {
// Only diagnose if we didn't find something.
if (IsReachable) continue;
- DiagnoseIndirectJump(I->second, I->first, TargetLabel, TargetScope);
+ DiagnoseIndirectOrAsmJump(I->second, I->first, TargetLabel, TargetScope);
}
}
}
@@ -785,12 +803,15 @@ static bool IsCXX98CompatWarning(Sema &S, unsigned InDiagNote) {
}
/// Produce primary diagnostic for an indirect jump statement.
-static void DiagnoseIndirectJumpStmt(Sema &S, IndirectGotoStmt *Jump,
- LabelDecl *Target, bool &Diagnosed) {
+static void DiagnoseIndirectOrAsmJumpStmt(Sema &S, Stmt *Jump,
+ LabelDecl *Target, bool &Diagnosed) {
if (Diagnosed)
return;
- S.Diag(Jump->getGotoLoc(), diag::err_indirect_goto_in_protected_scope);
- S.Diag(Target->getStmt()->getIdentLoc(), diag::note_indirect_goto_target);
+ bool IsAsmGoto = isa<GCCAsmStmt>(Jump);
+ S.Diag(Jump->getBeginLoc(), diag::err_indirect_goto_in_protected_scope)
+ << IsAsmGoto;
+ S.Diag(Target->getStmt()->getIdentLoc(), diag::note_indirect_goto_target)
+ << IsAsmGoto;
Diagnosed = true;
}
@@ -804,10 +825,9 @@ void JumpScopeChecker::NoteJumpIntoScopes(ArrayRef<unsigned> ToScopes) {
}
/// Diagnose an indirect jump which is known to cross scopes.
-void JumpScopeChecker::DiagnoseIndirectJump(IndirectGotoStmt *Jump,
- unsigned JumpScope,
- LabelDecl *Target,
- unsigned TargetScope) {
+void JumpScopeChecker::DiagnoseIndirectOrAsmJump(Stmt *Jump, unsigned JumpScope,
+ LabelDecl *Target,
+ unsigned TargetScope) {
if (CHECK_PERMISSIVE(JumpScope == TargetScope))
return;
@@ -817,7 +837,7 @@ void JumpScopeChecker::DiagnoseIndirectJump(IndirectGotoStmt *Jump,
// Walk out the scope chain until we reach the common ancestor.
for (unsigned I = JumpScope; I != Common; I = Scopes[I].ParentScope)
if (Scopes[I].OutDiag) {
- DiagnoseIndirectJumpStmt(S, Jump, Target, Diagnosed);
+ DiagnoseIndirectOrAsmJumpStmt(S, Jump, Target, Diagnosed);
S.Diag(Scopes[I].Loc, Scopes[I].OutDiag);
}
@@ -828,15 +848,18 @@ void JumpScopeChecker::DiagnoseIndirectJump(IndirectGotoStmt *Jump,
if (IsCXX98CompatWarning(S, Scopes[I].InDiag))
ToScopesCXX98Compat.push_back(I);
else if (Scopes[I].InDiag) {
- DiagnoseIndirectJumpStmt(S, Jump, Target, Diagnosed);
+ DiagnoseIndirectOrAsmJumpStmt(S, Jump, Target, Diagnosed);
S.Diag(Scopes[I].Loc, Scopes[I].InDiag);
}
// Diagnose this jump if it would be ill-formed in C++98.
if (!Diagnosed && !ToScopesCXX98Compat.empty()) {
- S.Diag(Jump->getGotoLoc(),
- diag::warn_cxx98_compat_indirect_goto_in_protected_scope);
- S.Diag(Target->getStmt()->getIdentLoc(), diag::note_indirect_goto_target);
+ bool IsAsmGoto = isa<GCCAsmStmt>(Jump);
+ S.Diag(Jump->getBeginLoc(),
+ diag::warn_cxx98_compat_indirect_goto_in_protected_scope)
+ << IsAsmGoto;
+ S.Diag(Target->getStmt()->getIdentLoc(), diag::note_indirect_goto_target)
+ << IsAsmGoto;
NoteJumpIntoScopes(ToScopesCXX98Compat);
}
}
diff --git a/lib/Sema/MultiplexExternalSemaSource.cpp b/lib/Sema/MultiplexExternalSemaSource.cpp
index 50808effe007..b0aa67454a7b 100644
--- a/lib/Sema/MultiplexExternalSemaSource.cpp
+++ b/lib/Sema/MultiplexExternalSemaSource.cpp
@@ -1,9 +1,8 @@
//===--- MultiplexExternalSemaSource.cpp ---------------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Sema/OpenCLBuiltins.td b/lib/Sema/OpenCLBuiltins.td
new file mode 100644
index 000000000000..7e37e55dbafa
--- /dev/null
+++ b/lib/Sema/OpenCLBuiltins.td
@@ -0,0 +1,296 @@
+//==--- OpenCLBuiltins.td - OpenCL builtin declarations -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains TableGen definitions for OpenCL builtin function
+// declarations. In case of an unresolved function name in OpenCL, Clang will
+// check for a function described in this file when -fdeclare-opencl-builtins
+// is specified.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Definitions of miscellaneous basic entities.
+//===----------------------------------------------------------------------===//
+// Versions of OpenCL
+class Version<int _Version> {
+ int Version = _Version;
+}
+def CL10: Version<100>;
+def CL11: Version<110>;
+def CL12: Version<120>;
+def CL20: Version<200>;
+
+// Address spaces
+// Pointer types need to be assigned an address space.
+class AddressSpace<string _AS> {
+ string AddrSpace = _AS;
+}
+def default_as : AddressSpace<"clang::LangAS::Default">;
+def private_as : AddressSpace<"clang::LangAS::opencl_private">;
+def global_as : AddressSpace<"clang::LangAS::opencl_global">;
+def constant_as : AddressSpace<"clang::LangAS::opencl_constant">;
+def local_as : AddressSpace<"clang::LangAS::opencl_local">;
+def generic_as : AddressSpace<"clang::LangAS::opencl_generic">;
+
+
+// Qualified Type. Allow to retrieve one ASTContext QualType.
+class QualType<string _Name> {
+ // Name of the field or function in a clang::ASTContext
+ // E.g. Name="IntTy" for the int type, and "getIntPtrType()" for an intptr_t
+ string Name = _Name;
+}
+
+// Helper class to store type access qualifiers (volatile, const, ...).
+class Qualifier<string _QualName> {
+ string QualName = _QualName;
+}
+
+//===----------------------------------------------------------------------===//
+// OpenCL C classes for types
+//===----------------------------------------------------------------------===//
+// OpenCL types (int, float, ...)
+class Type<string _Name, QualType _QTName> {
+ // Name of the Type
+ string Name = _Name;
+ // QualType associated with this type
+ QualType QTName = _QTName;
+ // Size of the vector (if applicable)
+ int VecWidth = 0;
+ // Is pointer
+ bit IsPointer = 0;
+ // List of qualifiers associated with the type (volatile, ...)
+ list<Qualifier> QualList = [];
+ // Address space
+ string AddrSpace = "clang::LangAS::Default";
+ // Access qualifier. Must be one of ("RO", "WO", "RW").
+ string AccessQualifier = "";
+}
+
+// OpenCL vector types (e.g. int2, int3, int16, float8, ...)
+class VectorType<Type _Ty, int _VecWidth> : Type<_Ty.Name, _Ty.QTName> {
+ int VecWidth = _VecWidth;
+}
+
+// OpenCL pointer types (e.g. int*, float*, ...)
+class PointerType<Type _Ty, AddressSpace _AS = global_as> :
+ Type<_Ty.Name, _Ty.QTName> {
+ bit IsPointer = 1;
+ string AddrSpace = _AS.AddrSpace;
+}
+
+// OpenCL image types (e.g. image2d_t, ...)
+class ImageType<Type _Ty, QualType _QTName, string _AccessQualifier> :
+ Type<_Ty.Name, _QTName> {
+ let AccessQualifier = _AccessQualifier;
+}
+
+//===----------------------------------------------------------------------===//
+// OpenCL C class for builtin functions
+//===----------------------------------------------------------------------===//
+class Builtin<string _Name, list<Type> _Signature> {
+ // Name of the builtin function
+ string Name = _Name;
+ // List of types used by the function. The first one is the return type and
+ // the following are the arguments. The list must have at least one element
+ // (the return type).
+ list<Type> Signature = _Signature;
+ // OpenCL Extension to which the function belongs (cl_khr_subgroups, ...)
+ string Extension = "";
+ // OpenCL Version to which the function belongs (CL10, ...)
+ Version Version = CL10;
+}
+
+//===----------------------------------------------------------------------===//
+// Multiclass definitions
+//===----------------------------------------------------------------------===//
+// multiclass BifN: Creates Builtin class instances for OpenCL builtin
+// functions with N arguments.
+// _Name : Name of the function
+// _Signature : Signature of the function (list of the Type used by the
+// function, the first one being the return type).
+// _IsVector : List of bit indicating if the type in the _Signature at the
+// same index is to be a vector in the multiple overloads. The
+// list must have at least one non-zero value.
+multiclass Bif0<string _Name, list<Type> _Signature, list<bit> _IsVector> {
+ def : Builtin<_Name, _Signature>;
+ foreach v = [2, 3, 4, 8, 16] in {
+ def : Builtin<_Name,
+ [!if(_IsVector[0], VectorType<_Signature[0], v>, _Signature[0])]>;
+ }
+}
+multiclass Bif1<string _Name, list<Type> _Signature, list<bit> _IsVector> {
+ def : Builtin<_Name, _Signature>;
+ foreach v = [2, 3, 4, 8, 16] in {
+ def : Builtin<_Name,
+ [!if(_IsVector[0], VectorType<_Signature[0], v>, _Signature[0]),
+ !if(_IsVector[1], VectorType<_Signature[1], v>, _Signature[1])]>;
+ }
+}
+multiclass Bif2<string _Name, list<Type> _Signature, list<bit> _IsVector> {
+ def : Builtin<_Name, _Signature>;
+ foreach v = [2, 3, 4, 8, 16] in {
+ def : Builtin<_Name,
+ [!if(_IsVector[0], VectorType<_Signature[0], v>, _Signature[0]),
+ !if(_IsVector[1], VectorType<_Signature[1], v>, _Signature[1]),
+ !if(_IsVector[2], VectorType<_Signature[2], v>, _Signature[2])]>;
+ }
+}
+multiclass Bif3<string _Name, list<Type> _Signature, list<bit> _IsVector> {
+ def : Builtin<_Name, _Signature>;
+ foreach v = [2, 3, 4, 8, 16] in {
+ def : Builtin<_Name,
+ [!if(_IsVector[0], VectorType<_Signature[0], v>, _Signature[0]),
+ !if(_IsVector[1], VectorType<_Signature[1], v>, _Signature[1]),
+ !if(_IsVector[2], VectorType<_Signature[2], v>, _Signature[2]),
+ !if(_IsVector[3], VectorType<_Signature[3], v>, _Signature[3])]>;
+ }
+}
+//===----------------------------------------------------------------------===//
+// Definitions of OpenCL C types
+//===----------------------------------------------------------------------===//
+// OpenCL v1.2 s6.1.1: Built-in Scalar Data Types
+def bool_t : Type<"bool", QualType<"BoolTy">>;
+def char_t : Type<"char", QualType<"CharTy">>;
+def uchar_t : Type<"uchar", QualType<"UnsignedCharTy">>;
+def short_t : Type<"short", QualType<"ShortTy">>;
+def ushort_t : Type<"ushort", QualType<"UnsignedShortTy">>;
+def int_t : Type<"int", QualType<"IntTy">>;
+def uint_t : Type<"uint", QualType<"UnsignedIntTy">>;
+def long_t : Type<"long", QualType<"LongTy">>;
+def ulong_t : Type<"ulong", QualType<"UnsignedLongTy">>;
+def float_t : Type<"float", QualType<"FloatTy">>;
+def double_t : Type<"double", QualType<"DoubleTy">>;
+def half_t : Type<"half", QualType<"HalfTy">>;
+def size_t : Type<"size_t", QualType<"getSizeType()">>;
+def ptrdiff_t : Type<"ptrdiff_t", QualType<"getPointerDiffType()">>;
+def intptr_t : Type<"intptr_t", QualType<"getIntPtrType()">>;
+def uintptr_t : Type<"uintptr_t", QualType<"getUIntPtrType()">>;
+def void_t : Type<"void", QualType<"VoidTy">>;
+
+// OpenCL v1.2 s6.1.2: Built-in Vector Data Types
+foreach v = [2, 3, 4, 8, 16] in {
+ def char#v#_t : VectorType<char_t, v>;
+ def uchar#v#_t : VectorType<uchar_t, v>;
+ def short#v#_t : VectorType<short_t, v>;
+ def ushort#v#_t : VectorType<ushort_t, v>;
+ def "int"#v#_t : VectorType<int_t, v>;
+ def uint#v#_t : VectorType<uint_t, v>;
+ def long#v#_t : VectorType<long_t, v>;
+ def ulong#v#_t : VectorType<ulong_t, v>;
+ def float#v#_t : VectorType<float_t, v>;
+ def double#v#_t : VectorType<double_t, v>;
+ def half#v#_t : VectorType<half_t, v>;
+}
+
+// OpenCL v1.2 s6.1.3: Other Built-in Data Types
+// These definitions with a "null" name are "abstract". They should not
+// be used in definitions of Builtin functions.
+def image2d_t : Type<"image2d_t", QualType<"null">>;
+def image3d_t : Type<"image3d_t", QualType<"null">>;
+def image2d_array_t : Type<"image2d_array_t", QualType<"null">>;
+def image1d_t : Type<"image1d_t", QualType<"null">>;
+def image1d_buffer_t : Type<"image1d_buffer_t", QualType<"null">>;
+def image1d_array_t : Type<"image1d_array_t", QualType<"null">>;
+// Unlike the few functions above, the following definitions can be used
+// in definitions of Builtin functions (they have a QualType with a name).
+foreach v = ["RO", "WO", "RW"] in {
+ def image2d_#v#_t : ImageType<image2d_t,
+ QualType<"OCLImage2d"#v#"Ty">,
+ v>;
+ def image3d_#v#_t : ImageType<image3d_t,
+ QualType<"OCLImage3d"#v#"Ty">,
+ v>;
+ def image2d_array#v#_t : ImageType<image2d_array_t,
+ QualType<"OCLImage2dArray"#v#"Ty">,
+ v>;
+ def image1d_#v#_t : ImageType<image1d_t,
+ QualType<"OCLImage1d"#v#"Ty">,
+ v>;
+ def image1d_buffer#v#_t : ImageType<image1d_buffer_t,
+ QualType<"OCLImage1dBuffer"#v#"Ty">,
+ v>;
+ def image1d_array#v#_t : ImageType<image1d_array_t,
+ QualType<"OCLImage1dArray"#v#"Ty">,
+ v>;
+}
+
+def sampler_t : Type<"sampler_t", QualType<"OCLSamplerTy">>;
+def event_t : Type<"event_t", QualType<"OCLEventTy">>;
+
+//===----------------------------------------------------------------------===//
+// Definitions of OpenCL builtin functions
+//===----------------------------------------------------------------------===//
+// OpenCL v1.2 s6.2.3: Explicit Conversions
+// Generate the convert_ builtins.
+foreach RType = [float_t, double_t, char_t, uchar_t, short_t, ushort_t,
+ int_t, uint_t, long_t, ulong_t] in {
+ foreach IType = [float_t, double_t, char_t, uchar_t, short_t, ushort_t,
+ int_t, uint_t, long_t, ulong_t] in {
+ foreach sat = ["", "_sat"] in {
+ foreach rte = ["", "_rte", "_rtz", "_rtp", "_rtn"] in {
+ def : Builtin<"convert_" # RType.Name # sat # rte, [RType, IType]>;
+ foreach v = [2, 3, 4, 8, 16] in {
+ def : Builtin<"convert_" # RType.Name # v # sat # rte,
+ [VectorType<RType, v>,
+ VectorType<IType, v>]>;
+ }
+ }
+ }
+ }
+}
+
+// OpenCL v1.2 s6.12.1: Work-Item Functions
+def get_work_dim : Builtin<"get_work_dim", [uint_t]>;
+foreach name = ["get_global_size", "get_global_id", "get_local_size",
+ "get_local_id", "get_num_groups", "get_group_id",
+ "get_global_offset"] in {
+ def : Builtin<name, [size_t, uint_t]>;
+}
+
+// OpenCL v1.2 s6.12.2: Math Functions
+foreach name = ["acos", "acosh", "acospi",
+ "asin", "asinh", "asinpi",
+ "atan", "atanh", "atanpi"] in {
+ foreach type = [float_t, double_t, half_t] in {
+ defm : Bif1<name, [type, type], [1, 1]>;
+ }
+}
+
+foreach name = ["atan2", "atan2pi"] in {
+ foreach type = [float_t, double_t, half_t] in {
+ defm : Bif2<name, [type, type, type], [1, 1, 1]>;
+ }
+}
+
+foreach name = ["fmax", "fmin"] in {
+ foreach type = [float_t, double_t, half_t] in {
+ defm : Bif2<name, [type, type, type], [1, 1, 1]>;
+ defm : Bif2<name, [type, type, type], [1, 1, 0]>;
+ }
+}
+
+// OpenCL v1.2 s6.12.14: Built-in Image Read Functions
+def read_imagef : Builtin<"read_imagef",
+ [float4_t, image2d_RO_t, VectorType<int_t, 2>]>;
+def write_imagef : Builtin<"write_imagef",
+ [void_t,
+ image2d_WO_t,
+ VectorType<int_t, 2>,
+ VectorType<float_t, 4>]>;
+
+
+// OpenCL v2.0 s9.17.3: Additions to section 6.13.1: Work-Item Functions
+let Version = CL20 in {
+ let Extension = "cl_khr_subgroups" in {
+ def get_sub_group_size : Builtin<"get_sub_group_size", [uint_t]>;
+ def get_max_sub_group_size : Builtin<"get_max_sub_group_size", [uint_t]>;
+ def get_num_sub_groups : Builtin<"get_num_sub_groups", [uint_t]>;
+ }
+}
diff --git a/lib/Sema/ParsedAttr.cpp b/lib/Sema/ParsedAttr.cpp
index 59e5aab677a9..5c04443460bc 100644
--- a/lib/Sema/ParsedAttr.cpp
+++ b/lib/Sema/ParsedAttr.cpp
@@ -1,9 +1,8 @@
//======- ParsedAttr.cpp --------------------------------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Sema/Scope.cpp b/lib/Sema/Scope.cpp
index eae5a328bfa2..51b0b24e57b7 100644
--- a/lib/Sema/Scope.cpp
+++ b/lib/Sema/Scope.cpp
@@ -1,9 +1,8 @@
//===- Scope.cpp - Lexical scope information --------------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -167,7 +166,9 @@ void Scope::dumpImpl(raw_ostream &OS) const {
{SEHExceptScope, "SEHExceptScope"},
{SEHFilterScope, "SEHFilterScope"},
{CompoundStmtScope, "CompoundStmtScope"},
- {ClassInheritanceScope, "ClassInheritanceScope"}};
+ {ClassInheritanceScope, "ClassInheritanceScope"},
+ {CatchScope, "CatchScope"},
+ };
for (auto Info : FlagInfo) {
if (Flags & Info.first) {
diff --git a/lib/Sema/ScopeInfo.cpp b/lib/Sema/ScopeInfo.cpp
index bd8db6f4ed91..b2a26af9b4a5 100644
--- a/lib/Sema/ScopeInfo.cpp
+++ b/lib/Sema/ScopeInfo.cpp
@@ -1,9 +1,8 @@
//===--- ScopeInfo.cpp - Information about a semantic context -------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -113,21 +112,6 @@ FunctionScopeInfo::WeakObjectProfileTy::getBaseInfo(const Expr *E) {
return BaseInfoTy(D, IsExact);
}
-bool CapturingScopeInfo::isVLATypeCaptured(const VariableArrayType *VAT) const {
- RecordDecl *RD = nullptr;
- if (auto *LSI = dyn_cast<LambdaScopeInfo>(this))
- RD = LSI->Lambda;
- else if (auto CRSI = dyn_cast<CapturedRegionScopeInfo>(this))
- RD = CRSI->TheRecordDecl;
-
- if (RD)
- for (auto *FD : RD->fields()) {
- if (FD->hasCapturedVLAType() && FD->getCapturedVLAType() == VAT)
- return true;
- }
- return false;
-}
-
FunctionScopeInfo::WeakObjectProfileTy::WeakObjectProfileTy(
const ObjCPropertyRefExpr *PropE)
: Base(nullptr, true), Property(getBestPropertyDecl(PropE)) {
@@ -232,20 +216,33 @@ void FunctionScopeInfo::markSafeWeakUse(const Expr *E) {
ThisUse->markSafe();
}
-void LambdaScopeInfo::getPotentialVariableCapture(unsigned Idx, VarDecl *&VD,
- Expr *&E) const {
- assert(Idx < getNumPotentialVariableCaptures() &&
- "Index of potential capture must be within 0 to less than the "
- "number of captures!");
- E = PotentiallyCapturingExprs[Idx];
- if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E))
- VD = dyn_cast<VarDecl>(DRE->getFoundDecl());
- else if (MemberExpr *ME = dyn_cast<MemberExpr>(E))
- VD = dyn_cast<VarDecl>(ME->getMemberDecl());
- else
- llvm_unreachable("Only DeclRefExprs or MemberExprs should be added for "
- "potential captures");
- assert(VD);
+bool Capture::isInitCapture() const {
+ // Note that a nested capture of an init-capture is not itself an
+ // init-capture.
+ return !isNested() && isVariableCapture() && getVariable()->isInitCapture();
+}
+
+bool CapturingScopeInfo::isVLATypeCaptured(const VariableArrayType *VAT) const {
+ for (auto &Cap : Captures)
+ if (Cap.isVLATypeCapture() && Cap.getCapturedVLAType() == VAT)
+ return true;
+ return false;
+}
+
+void LambdaScopeInfo::visitPotentialCaptures(
+ llvm::function_ref<void(VarDecl *, Expr *)> Callback) const {
+ for (Expr *E : PotentiallyCapturingExprs) {
+ if (auto *DRE = dyn_cast<DeclRefExpr>(E)) {
+ Callback(cast<VarDecl>(DRE->getFoundDecl()), E);
+ } else if (auto *ME = dyn_cast<MemberExpr>(E)) {
+ Callback(cast<VarDecl>(ME->getMemberDecl()), E);
+ } else if (auto *FP = dyn_cast<FunctionParmPackExpr>(E)) {
+ for (VarDecl *VD : *FP)
+ Callback(VD, E);
+ } else {
+ llvm_unreachable("unexpected expression in potential captures list");
+ }
+ }
}
FunctionScopeInfo::~FunctionScopeInfo() { }
diff --git a/lib/Sema/Sema.cpp b/lib/Sema/Sema.cpp
index 9fa39968625a..11fed28b52db 100644
--- a/lib/Sema/Sema.cpp
+++ b/lib/Sema/Sema.cpp
@@ -1,9 +1,8 @@
//===--- Sema.cpp - AST Builder and Semantic Analysis Implementation ------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -40,6 +39,8 @@
#include "clang/Sema/TemplateInstCallback.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallSet.h"
+#include "llvm/Support/TimeProfiler.h"
+
using namespace clang;
using namespace sema;
@@ -93,6 +94,12 @@ public:
SourceManager &SM = S->getSourceManager();
SourceLocation IncludeLoc = SM.getIncludeLoc(SM.getFileID(Loc));
if (IncludeLoc.isValid()) {
+ if (llvm::timeTraceProfilerEnabled()) {
+ const FileEntry *FE = SM.getFileEntryForID(SM.getFileID(Loc));
+ llvm::timeTraceProfilerBegin(
+ "Source", FE != nullptr ? FE->getName() : StringRef("<unknown>"));
+ }
+
IncludeStack.push_back(IncludeLoc);
S->DiagnoseNonDefaultPragmaPack(
Sema::PragmaPackDiagnoseKind::NonDefaultStateAtInclude, IncludeLoc);
@@ -100,10 +107,14 @@ public:
break;
}
case ExitFile:
- if (!IncludeStack.empty())
+ if (!IncludeStack.empty()) {
+ if (llvm::timeTraceProfilerEnabled())
+ llvm::timeTraceProfilerEnd();
+
S->DiagnoseNonDefaultPragmaPack(
Sema::PragmaPackDiagnoseKind::ChangedStateAtExit,
IncludeStack.pop_back_val());
+ }
break;
default:
break;
@@ -147,6 +158,7 @@ Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
ThreadSafetyDeclCache(nullptr), VarDataSharingAttributesStack(nullptr),
CurScope(nullptr), Ident_super(nullptr), Ident___float128(nullptr) {
TUScope = nullptr;
+ isConstantEvaluatedOverride = false;
LoadedExternalKnownNamespaces = false;
for (unsigned I = 0; I != NSAPI::NumNSNumberLiteralMethods; ++I)
@@ -165,8 +177,6 @@ Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
ExpressionEvaluationContext::PotentiallyEvaluated, 0, CleanupInfo{},
nullptr, ExpressionEvaluationContextRecord::EK_Other);
- PreallocatedFunctionScope.reset(new FunctionScopeInfo(Diags));
-
// Initialization of data sharing attributes stack for OpenMP
InitDataSharingAttributesStack();
@@ -256,11 +266,12 @@ void Sema::Initialize() {
// Initialize predefined OpenCL types and supported extensions and (optional)
// core features.
if (getLangOpts().OpenCL) {
- getOpenCLOptions().addSupport(Context.getTargetInfo().getSupportedOpenCLOpts());
- getOpenCLOptions().enableSupportedCore(getLangOpts().OpenCLVersion);
+ getOpenCLOptions().addSupport(
+ Context.getTargetInfo().getSupportedOpenCLOpts());
+ getOpenCLOptions().enableSupportedCore(getLangOpts());
addImplicitTypedef("sampler_t", Context.OCLSamplerTy);
addImplicitTypedef("event_t", Context.OCLEventTy);
- if (getLangOpts().OpenCLVersion >= 200) {
+ if (getLangOpts().OpenCLCPlusPlus || getLangOpts().OpenCLVersion >= 200) {
addImplicitTypedef("clk_event_t", Context.OCLClkEventTy);
addImplicitTypedef("queue_t", Context.OCLQueueTy);
addImplicitTypedef("reserve_id_t", Context.OCLReserveIDTy);
@@ -342,8 +353,7 @@ Sema::~Sema() {
// Kill all the active scopes.
for (sema::FunctionScopeInfo *FSI : FunctionScopes)
- if (FSI != PreallocatedFunctionScope.get())
- delete FSI;
+ delete FSI;
// Tell the SemaConsumer to forget about us; we're going out of scope.
if (SemaConsumer *SC = dyn_cast<SemaConsumer>(&Consumer))
@@ -481,6 +491,7 @@ ExprResult Sema::ImpCastExprToType(Expr *E, QualType Ty,
default:
llvm_unreachable("can't implicitly cast lvalue to rvalue with this cast "
"kind");
+ case CK_Dependent:
case CK_LValueToRValue:
case CK_ArrayToPointerDecay:
case CK_FunctionToPointerDecay:
@@ -489,7 +500,8 @@ ExprResult Sema::ImpCastExprToType(Expr *E, QualType Ty,
break;
}
}
- assert((VK == VK_RValue || !E->isRValue()) && "can't cast rvalue to lvalue");
+ assert((VK == VK_RValue || Kind == CK_Dependent || !E->isRValue()) &&
+ "can't cast rvalue to lvalue");
#endif
diagnoseNullableToNonnullConversion(Ty, E->getType(), E->getBeginLoc());
@@ -578,7 +590,7 @@ static bool ShouldRemoveFromUnused(Sema *SemaRef, const DeclaratorDecl *D) {
// warn even if the variable isn't odr-used. (isReferenced doesn't
// precisely reflect that, but it's a decent approximation.)
if (VD->isReferenced() &&
- VD->isUsableInConstantExpressions(SemaRef->Context))
+ VD->mightBeUsableInConstantExpressions(SemaRef->Context))
return true;
if (VarTemplateDecl *Template = VD->getDescribedVarTemplate())
@@ -837,41 +849,21 @@ void Sema::ActOnStartOfTranslationUnit() {
if (getLangOpts().ModulesTS &&
(getLangOpts().getCompilingModule() == LangOptions::CMK_ModuleInterface ||
getLangOpts().getCompilingModule() == LangOptions::CMK_None)) {
+ // We start in an implied global module fragment.
SourceLocation StartOfTU =
SourceMgr.getLocForStartOfFile(SourceMgr.getMainFileID());
-
- // We start in the global module; all those declarations are implicitly
- // module-private (though they do not have module linkage).
- auto &Map = PP.getHeaderSearchInfo().getModuleMap();
- auto *GlobalModule = Map.createGlobalModuleForInterfaceUnit(StartOfTU);
- assert(GlobalModule && "module creation should not fail");
-
- // Enter the scope of the global module.
- ModuleScopes.push_back({});
- ModuleScopes.back().Module = GlobalModule;
- VisibleModules.setVisible(GlobalModule, StartOfTU);
-
- // All declarations created from now on are owned by the global module.
- auto *TU = Context.getTranslationUnitDecl();
- TU->setModuleOwnershipKind(Decl::ModuleOwnershipKind::Visible);
- TU->setLocalOwningModule(GlobalModule);
+ ActOnGlobalModuleFragmentDecl(StartOfTU);
+ ModuleScopes.back().ImplicitGlobalModuleFragment = true;
}
}
-/// ActOnEndOfTranslationUnit - This is called at the very end of the
-/// translation unit when EOF is reached and all but the top-level scope is
-/// popped.
-void Sema::ActOnEndOfTranslationUnit() {
- assert(DelayedDiagnostics.getCurrentPool() == nullptr
- && "reached end of translation unit with a pool attached?");
-
- // If code completion is enabled, don't perform any end-of-translation-unit
- // work.
- if (PP.isCodeCompletionEnabled())
+void Sema::ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind) {
+ // No explicit actions are required at the end of the global module fragment.
+ if (Kind == TUFragmentKind::Global)
return;
// Transfer late parsed template instantiations over to the pending template
- // instantiation list. During normal compliation, the late template parser
+ // instantiation list. During normal compilation, the late template parser
// will be installed and instantiating these templates will succeed.
//
// If we are building a TU prefix for serialization, it is also safe to
@@ -884,46 +876,79 @@ void Sema::ActOnEndOfTranslationUnit() {
LateParsedInstantiations.end());
LateParsedInstantiations.clear();
+ // If DefinedUsedVTables ends up marking any virtual member functions it
+ // might lead to more pending template instantiations, which we then need
+ // to instantiate.
+ DefineUsedVTables();
+
+ // C++: Perform implicit template instantiations.
+ //
+ // FIXME: When we perform these implicit instantiations, we do not
+ // carefully keep track of the point of instantiation (C++ [temp.point]).
+ // This means that name lookup that occurs within the template
+ // instantiation will always happen at the end of the translation unit,
+ // so it will find some names that are not required to be found. This is
+ // valid, but we could do better by diagnosing if an instantiation uses a
+ // name that was not visible at its first point of instantiation.
+ if (ExternalSource) {
+ // Load pending instantiations from the external source.
+ SmallVector<PendingImplicitInstantiation, 4> Pending;
+ ExternalSource->ReadPendingInstantiations(Pending);
+ for (auto PII : Pending)
+ if (auto Func = dyn_cast<FunctionDecl>(PII.first))
+ Func->setInstantiationIsPending(true);
+ PendingInstantiations.insert(PendingInstantiations.begin(),
+ Pending.begin(), Pending.end());
+ }
+
+ {
+ llvm::TimeTraceScope TimeScope("PerformPendingInstantiations",
+ StringRef(""));
+ PerformPendingInstantiations();
+ }
+
+ assert(LateParsedInstantiations.empty() &&
+ "end of TU template instantiation should not create more "
+ "late-parsed templates");
+}
+
+/// ActOnEndOfTranslationUnit - This is called at the very end of the
+/// translation unit when EOF is reached and all but the top-level scope is
+/// popped.
+void Sema::ActOnEndOfTranslationUnit() {
+ assert(DelayedDiagnostics.getCurrentPool() == nullptr
+ && "reached end of translation unit with a pool attached?");
+
+ // If code completion is enabled, don't perform any end-of-translation-unit
+ // work.
+ if (PP.isCodeCompletionEnabled())
+ return;
+
// Complete translation units and modules define vtables and perform implicit
// instantiations. PCH files do not.
if (TUKind != TU_Prefix) {
DiagnoseUseOfUnimplementedSelectors();
- // If DefinedUsedVTables ends up marking any virtual member functions it
- // might lead to more pending template instantiations, which we then need
- // to instantiate.
- DefineUsedVTables();
-
- // C++: Perform implicit template instantiations.
- //
- // FIXME: When we perform these implicit instantiations, we do not
- // carefully keep track of the point of instantiation (C++ [temp.point]).
- // This means that name lookup that occurs within the template
- // instantiation will always happen at the end of the translation unit,
- // so it will find some names that are not required to be found. This is
- // valid, but we could do better by diagnosing if an instantiation uses a
- // name that was not visible at its first point of instantiation.
- if (ExternalSource) {
- // Load pending instantiations from the external source.
- SmallVector<PendingImplicitInstantiation, 4> Pending;
- ExternalSource->ReadPendingInstantiations(Pending);
- for (auto PII : Pending)
- if (auto Func = dyn_cast<FunctionDecl>(PII.first))
- Func->setInstantiationIsPending(true);
- PendingInstantiations.insert(PendingInstantiations.begin(),
- Pending.begin(), Pending.end());
- }
-
- PerformPendingInstantiations();
-
- assert(LateParsedInstantiations.empty() &&
- "end of TU template instantiation should not create more "
- "late-parsed templates");
+ ActOnEndOfTranslationUnitFragment(
+ !ModuleScopes.empty() && ModuleScopes.back().Module->Kind ==
+ Module::PrivateModuleFragment
+ ? TUFragmentKind::Private
+ : TUFragmentKind::Normal);
if (LateTemplateParserCleanup)
LateTemplateParserCleanup(OpaqueParser);
CheckDelayedMemberExceptionSpecs();
+ } else {
+ // If we are building a TU prefix for serialization, it is safe to transfer
+ // these over, even though they are not parsed. The end of the TU should be
+ // outside of any eager template instantiation scope, so when this AST is
+ // deserialized, these templates will not be parsed until the end of the
+ // combined TU.
+ PendingInstantiations.insert(PendingInstantiations.end(),
+ LateParsedInstantiations.begin(),
+ LateParsedInstantiations.end());
+ LateParsedInstantiations.clear();
}
DiagnoseUnterminatedPragmaPack();
@@ -933,7 +958,6 @@ void Sema::ActOnEndOfTranslationUnit() {
// incompatible declarations.
assert(DelayedOverridingExceptionSpecChecks.empty());
assert(DelayedEquivalentExceptionSpecChecks.empty());
- assert(DelayedDefaultedMemberExceptionSpecs.empty());
// All dllexport classes should have been processed already.
assert(DelayedDllExportClasses.empty());
@@ -981,13 +1005,24 @@ void Sema::ActOnEndOfTranslationUnit() {
checkUndefinedButUsed(*this);
}
+ // A global-module-fragment is only permitted within a module unit.
+ bool DiagnosedMissingModuleDeclaration = false;
+ if (!ModuleScopes.empty() &&
+ ModuleScopes.back().Module->Kind == Module::GlobalModuleFragment &&
+ !ModuleScopes.back().ImplicitGlobalModuleFragment) {
+ Diag(ModuleScopes.back().BeginLoc,
+ diag::err_module_declaration_missing_after_global_module_introducer);
+ DiagnosedMissingModuleDeclaration = true;
+ }
+
if (TUKind == TU_Module) {
// If we are building a module interface unit, we need to have seen the
// module declaration by now.
if (getLangOpts().getCompilingModule() ==
LangOptions::CMK_ModuleInterface &&
(ModuleScopes.empty() ||
- ModuleScopes.back().Module->Kind != Module::ModuleInterfaceUnit)) {
+ !ModuleScopes.back().Module->isModulePurview()) &&
+ !DiagnosedMissingModuleDeclaration) {
// FIXME: Make a better guess as to where to put the module declaration.
Diag(getSourceManager().getLocForStartOfFile(
getSourceManager().getMainFileID()),
@@ -1325,6 +1360,190 @@ Sema::Diag(SourceLocation Loc, const PartialDiagnostic& PD) {
return Builder;
}
+// Print notes showing how we can reach FD starting from an a priori
+// known-callable function.
+static void emitCallStackNotes(Sema &S, FunctionDecl *FD) {
+ auto FnIt = S.DeviceKnownEmittedFns.find(FD);
+ while (FnIt != S.DeviceKnownEmittedFns.end()) {
+ DiagnosticBuilder Builder(
+ S.Diags.Report(FnIt->second.Loc, diag::note_called_by));
+ Builder << FnIt->second.FD;
+ Builder.setForceEmit();
+
+ FnIt = S.DeviceKnownEmittedFns.find(FnIt->second.FD);
+ }
+}
+
+// Emit any deferred diagnostics for FD and erase them from the map in which
+// they're stored.
+static void emitDeferredDiags(Sema &S, FunctionDecl *FD) {
+ auto It = S.DeviceDeferredDiags.find(FD);
+ if (It == S.DeviceDeferredDiags.end())
+ return;
+ bool HasWarningOrError = false;
+ for (PartialDiagnosticAt &PDAt : It->second) {
+ const SourceLocation &Loc = PDAt.first;
+ const PartialDiagnostic &PD = PDAt.second;
+ HasWarningOrError |= S.getDiagnostics().getDiagnosticLevel(
+ PD.getDiagID(), Loc) >= DiagnosticsEngine::Warning;
+ DiagnosticBuilder Builder(S.Diags.Report(Loc, PD.getDiagID()));
+ Builder.setForceEmit();
+ PD.Emit(Builder);
+ }
+ S.DeviceDeferredDiags.erase(It);
+
+ // FIXME: Should this be called after every warning/error emitted in the loop
+ // above, instead of just once per function? That would be consistent with
+ // how we handle immediate errors, but it also seems like a bit much.
+ if (HasWarningOrError)
+ emitCallStackNotes(S, FD);
+}
+
+// In CUDA, there are some constructs which may appear in semantically-valid
+// code, but trigger errors if we ever generate code for the function in which
+// they appear. Essentially every construct you're not allowed to use on the
+// device falls into this category, because you are allowed to use these
+// constructs in a __host__ __device__ function, but only if that function is
+// never codegen'ed on the device.
+//
+// To handle semantic checking for these constructs, we keep track of the set of
+// functions we know will be emitted, either because we could tell a priori that
+// they would be emitted, or because they were transitively called by a
+// known-emitted function.
+//
+// We also keep a partial call graph of which not-known-emitted functions call
+// which other not-known-emitted functions.
+//
+// When we see something which is illegal if the current function is emitted
+// (usually by way of CUDADiagIfDeviceCode, CUDADiagIfHostCode, or
+// CheckCUDACall), we first check if the current function is known-emitted. If
+// so, we immediately output the diagnostic.
+//
+// Otherwise, we "defer" the diagnostic. It sits in Sema::DeviceDeferredDiags
+// until we discover that the function is known-emitted, at which point we take
+// it out of this map and emit the diagnostic.
+
+Sema::DeviceDiagBuilder::DeviceDiagBuilder(Kind K, SourceLocation Loc,
+ unsigned DiagID, FunctionDecl *Fn,
+ Sema &S)
+ : S(S), Loc(Loc), DiagID(DiagID), Fn(Fn),
+ ShowCallStack(K == K_ImmediateWithCallStack || K == K_Deferred) {
+ switch (K) {
+ case K_Nop:
+ break;
+ case K_Immediate:
+ case K_ImmediateWithCallStack:
+ ImmediateDiag.emplace(S.Diag(Loc, DiagID));
+ break;
+ case K_Deferred:
+ assert(Fn && "Must have a function to attach the deferred diag to.");
+ auto &Diags = S.DeviceDeferredDiags[Fn];
+ PartialDiagId.emplace(Diags.size());
+ Diags.emplace_back(Loc, S.PDiag(DiagID));
+ break;
+ }
+}
+
+Sema::DeviceDiagBuilder::DeviceDiagBuilder(DeviceDiagBuilder &&D)
+ : S(D.S), Loc(D.Loc), DiagID(D.DiagID), Fn(D.Fn),
+ ShowCallStack(D.ShowCallStack), ImmediateDiag(D.ImmediateDiag),
+ PartialDiagId(D.PartialDiagId) {
+ // Clean the previous diagnostics.
+ D.ShowCallStack = false;
+ D.ImmediateDiag.reset();
+ D.PartialDiagId.reset();
+}
+
+Sema::DeviceDiagBuilder::~DeviceDiagBuilder() {
+ if (ImmediateDiag) {
+ // Emit our diagnostic and, if it was a warning or error, output a callstack
+ // if Fn isn't a priori known-emitted.
+ bool IsWarningOrError = S.getDiagnostics().getDiagnosticLevel(
+ DiagID, Loc) >= DiagnosticsEngine::Warning;
+ ImmediateDiag.reset(); // Emit the immediate diag.
+ if (IsWarningOrError && ShowCallStack)
+ emitCallStackNotes(S, Fn);
+ } else {
+ assert((!PartialDiagId || ShowCallStack) &&
+ "Must always show call stack for deferred diags.");
+ }
+}
+
+// Indicate that this function (and thus everything it transtively calls) will
+// be codegen'ed, and emit any deferred diagnostics on this function and its
+// (transitive) callees.
+void Sema::markKnownEmitted(
+ Sema &S, FunctionDecl *OrigCaller, FunctionDecl *OrigCallee,
+ SourceLocation OrigLoc,
+ const llvm::function_ref<bool(Sema &, FunctionDecl *)> IsKnownEmitted) {
+ // Nothing to do if we already know that FD is emitted.
+ if (IsKnownEmitted(S, OrigCallee)) {
+ assert(!S.DeviceCallGraph.count(OrigCallee));
+ return;
+ }
+
+ // We've just discovered that OrigCallee is known-emitted. Walk our call
+ // graph to see what else we can now discover also must be emitted.
+
+ struct CallInfo {
+ FunctionDecl *Caller;
+ FunctionDecl *Callee;
+ SourceLocation Loc;
+ };
+ llvm::SmallVector<CallInfo, 4> Worklist = {{OrigCaller, OrigCallee, OrigLoc}};
+ llvm::SmallSet<CanonicalDeclPtr<FunctionDecl>, 4> Seen;
+ Seen.insert(OrigCallee);
+ while (!Worklist.empty()) {
+ CallInfo C = Worklist.pop_back_val();
+ assert(!IsKnownEmitted(S, C.Callee) &&
+ "Worklist should not contain known-emitted functions.");
+ S.DeviceKnownEmittedFns[C.Callee] = {C.Caller, C.Loc};
+ emitDeferredDiags(S, C.Callee);
+
+ // If this is a template instantiation, explore its callgraph as well:
+ // Non-dependent calls are part of the template's callgraph, while dependent
+ // calls are part of to the instantiation's call graph.
+ if (auto *Templ = C.Callee->getPrimaryTemplate()) {
+ FunctionDecl *TemplFD = Templ->getAsFunction();
+ if (!Seen.count(TemplFD) && !S.DeviceKnownEmittedFns.count(TemplFD)) {
+ Seen.insert(TemplFD);
+ Worklist.push_back(
+ {/* Caller = */ C.Caller, /* Callee = */ TemplFD, C.Loc});
+ }
+ }
+
+ // Add all functions called by Callee to our worklist.
+ auto CGIt = S.DeviceCallGraph.find(C.Callee);
+ if (CGIt == S.DeviceCallGraph.end())
+ continue;
+
+ for (std::pair<CanonicalDeclPtr<FunctionDecl>, SourceLocation> FDLoc :
+ CGIt->second) {
+ FunctionDecl *NewCallee = FDLoc.first;
+ SourceLocation CallLoc = FDLoc.second;
+ if (Seen.count(NewCallee) || IsKnownEmitted(S, NewCallee))
+ continue;
+ Seen.insert(NewCallee);
+ Worklist.push_back(
+ {/* Caller = */ C.Callee, /* Callee = */ NewCallee, CallLoc});
+ }
+
+ // C.Callee is now known-emitted, so we no longer need to maintain its list
+ // of callees in DeviceCallGraph.
+ S.DeviceCallGraph.erase(CGIt);
+ }
+}
+
+Sema::DeviceDiagBuilder Sema::targetDiag(SourceLocation Loc, unsigned DiagID) {
+ if (LangOpts.OpenMP && LangOpts.OpenMPIsDevice)
+ return diagIfOpenMPDeviceCode(Loc, DiagID);
+ if (getLangOpts().CUDA)
+ return getLangOpts().CUDAIsDevice ? CUDADiagIfDeviceCode(Loc, DiagID)
+ : CUDADiagIfHostCode(Loc, DiagID);
+ return DeviceDiagBuilder(DeviceDiagBuilder::K_Immediate, Loc, DiagID,
+ getCurFunctionDecl(), *this);
+}
+
/// Looks through the macro-expansion chain for the given
/// location, looking for a macro expansion with the given name.
/// If one is found, returns true and sets the location to that
@@ -1377,10 +1596,10 @@ Scope *Sema::getScopeForContext(DeclContext *Ctx) {
/// Enter a new function scope
void Sema::PushFunctionScope() {
- if (FunctionScopes.empty()) {
- // Use PreallocatedFunctionScope to avoid allocating memory when possible.
- PreallocatedFunctionScope->Clear();
- FunctionScopes.push_back(PreallocatedFunctionScope.get());
+ if (FunctionScopes.empty() && CachedFunctionScope) {
+ // Use CachedFunctionScope to avoid allocating memory when possible.
+ CachedFunctionScope->Clear();
+ FunctionScopes.push_back(CachedFunctionScope.release());
} else {
FunctionScopes.push_back(new FunctionScopeInfo(getDiagnostics()));
}
@@ -1409,7 +1628,7 @@ void Sema::RecordParsingTemplateParameterDepth(unsigned Depth) {
}
// Check that the type of the VarDecl has an accessible copy constructor and
-// resolve its destructor's exception spefication.
+// resolve its destructor's exception specification.
static void checkEscapingByref(VarDecl *VD, Sema &S) {
QualType T = VD->getType();
EnterExpressionEvaluationContext scope(
@@ -1426,7 +1645,7 @@ static void checkEscapingByref(VarDecl *VD, Sema &S) {
S.Context.setBlockVarCopyInit(VD, Init, S.canThrow(Init));
}
- // The destructor's exception spefication is needed when IRGen generates
+ // The destructor's exception specification is needed when IRGen generates
// block copy/destroy functions. Resolve it here.
if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl())
if (CXXDestructorDecl *DD = RD->getDestructor()) {
@@ -1439,12 +1658,24 @@ static void markEscapingByrefs(const FunctionScopeInfo &FSI, Sema &S) {
// Set the EscapingByref flag of __block variables captured by
// escaping blocks.
for (const BlockDecl *BD : FSI.Blocks) {
- if (BD->doesNotEscape())
- continue;
for (const BlockDecl::Capture &BC : BD->captures()) {
VarDecl *VD = BC.getVariable();
- if (VD->hasAttr<BlocksAttr>())
+ if (VD->hasAttr<BlocksAttr>()) {
+ // Nothing to do if this is a __block variable captured by a
+ // non-escaping block.
+ if (BD->doesNotEscape())
+ continue;
VD->setEscapingByref();
+ }
+ // Check whether the captured variable is or contains an object of
+ // non-trivial C union type.
+ QualType CapType = BC.getVariable()->getType();
+ if (CapType.hasNonTrivialToPrimitiveDestructCUnion() ||
+ CapType.hasNonTrivialToPrimitiveCopyCUnion())
+ S.checkNonTrivialCUnion(BC.getVariable()->getType(),
+ BD->getCaretLocation(),
+ Sema::NTCUC_BlockCapture,
+ Sema::NTCUK_Destruct|Sema::NTCUK_Copy);
}
}
@@ -1461,30 +1692,42 @@ static void markEscapingByrefs(const FunctionScopeInfo &FSI, Sema &S) {
}
}
-void Sema::PopFunctionScopeInfo(const AnalysisBasedWarnings::Policy *WP,
- const Decl *D, const BlockExpr *blkExpr) {
+/// Pop a function (or block or lambda or captured region) scope from the stack.
+///
+/// \param WP The warning policy to use for CFG-based warnings, or null if such
+/// warnings should not be produced.
+/// \param D The declaration corresponding to this function scope, if producing
+/// CFG-based warnings.
+/// \param BlockType The type of the block expression, if D is a BlockDecl.
+Sema::PoppedFunctionScopePtr
+Sema::PopFunctionScopeInfo(const AnalysisBasedWarnings::Policy *WP,
+ const Decl *D, QualType BlockType) {
assert(!FunctionScopes.empty() && "mismatched push/pop!");
- // This function shouldn't be called after popping the current function scope.
- // markEscapingByrefs calls PerformMoveOrCopyInitialization, which can call
- // PushFunctionScope, which can cause clearing out PreallocatedFunctionScope
- // when FunctionScopes is empty.
markEscapingByrefs(*FunctionScopes.back(), *this);
- FunctionScopeInfo *Scope = FunctionScopes.pop_back_val();
+ PoppedFunctionScopePtr Scope(FunctionScopes.pop_back_val(),
+ PoppedFunctionScopeDeleter(this));
if (LangOpts.OpenMP)
- popOpenMPFunctionRegion(Scope);
+ popOpenMPFunctionRegion(Scope.get());
// Issue any analysis-based warnings.
if (WP && D)
- AnalysisWarnings.IssueWarnings(*WP, Scope, D, blkExpr);
+ AnalysisWarnings.IssueWarnings(*WP, Scope.get(), D, BlockType);
else
for (const auto &PUD : Scope->PossiblyUnreachableDiags)
Diag(PUD.Loc, PUD.PD);
- // Delete the scope unless its our preallocated scope.
- if (Scope != PreallocatedFunctionScope.get())
+ return Scope;
+}
+
+void Sema::PoppedFunctionScopeDeleter::
+operator()(sema::FunctionScopeInfo *Scope) const {
+ // Stash the function scope for later reuse if it's for a normal function.
+ if (Scope->isPlainFunction() && !Self->CachedFunctionScope)
+ Self->CachedFunctionScope.reset(Scope);
+ else
delete Scope;
}
@@ -1573,7 +1816,7 @@ LambdaScopeInfo *Sema::getCurLambda(bool IgnoreNonLambdaCapturingScope) {
// an associated template parameter list.
LambdaScopeInfo *Sema::getCurGenericLambda() {
if (LambdaScopeInfo *LSI = getCurLambda()) {
- return (LSI->AutoTemplateParams.size() ||
+ return (LSI->TemplateParams.size() ||
LSI->GLTemplateParameterList) ? LSI : nullptr;
}
return nullptr;
@@ -1841,7 +2084,7 @@ bool Sema::tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
// FIXME: Try this before emitting the fixit, and suppress diagnostics
// while doing so.
- E = ActOnCallExpr(nullptr, E.get(), Range.getEnd(), None,
+ E = BuildCallExpr(nullptr, E.get(), Range.getEnd(), None,
Range.getEnd().getLocWithOffset(1));
return true;
}
diff --git a/lib/Sema/SemaAccess.cpp b/lib/Sema/SemaAccess.cpp
index 69084589efea..b6fbbbff91f5 100644
--- a/lib/Sema/SemaAccess.cpp
+++ b/lib/Sema/SemaAccess.cpp
@@ -1,9 +1,8 @@
//===---- SemaAccess.cpp - C++ Access Control -------------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -127,8 +126,7 @@ struct EffectiveContext {
bool includesClass(const CXXRecordDecl *R) const {
R = R->getCanonicalDecl();
- return std::find(Records.begin(), Records.end(), R)
- != Records.end();
+ return llvm::find(Records, R) != Records.end();
}
/// Retrieves the innermost "useful" context. Can be null if we're
diff --git a/lib/Sema/SemaAttr.cpp b/lib/Sema/SemaAttr.cpp
index 2bc1b769f77a..8e9318847373 100644
--- a/lib/Sema/SemaAttr.cpp
+++ b/lib/Sema/SemaAttr.cpp
@@ -1,9 +1,8 @@
//===--- SemaAttr.cpp - Semantic Analysis for Attributes ------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -404,9 +403,15 @@ void Sema::ActOnPragmaMSSeg(SourceLocation PragmaLocation,
if (Action & PSK_Pop && Stack->Stack.empty())
Diag(PragmaLocation, diag::warn_pragma_pop_failed) << PragmaName
<< "stack empty";
- if (SegmentName &&
- !checkSectionName(SegmentName->getBeginLoc(), SegmentName->getString()))
- return;
+ if (SegmentName) {
+ if (!checkSectionName(SegmentName->getBeginLoc(), SegmentName->getString()))
+ return;
+
+ if (SegmentName->getString() == ".drectve" &&
+ Context.getTargetInfo().getCXXABI().isMicrosoft())
+ Diag(PragmaLocation, diag::warn_attribute_section_drectve) << PragmaName;
+ }
+
Stack->Act(PragmaLocation, Action, StackSlotLabel, SegmentName);
}
@@ -523,6 +528,7 @@ attrMatcherRuleListToString(ArrayRef<attr::SubjectMatchRule> Rules) {
void Sema::ActOnPragmaAttributeAttribute(
ParsedAttr &Attribute, SourceLocation PragmaLoc,
attr::ParsedSubjectMatchRuleSet Rules) {
+ Attribute.setIsPragmaClangAttribute();
SmallVector<attr::SubjectMatchRule, 4> SubjectMatchRules;
// Gather the subject match rules that are supported by the attribute.
SmallVector<std::pair<attr::SubjectMatchRule, bool>, 4>
@@ -680,6 +686,8 @@ void Sema::AddPragmaAttributes(Scope *S, Decl *D) {
for (auto &Entry : Group.Entries) {
ParsedAttr *Attribute = Entry.Attribute;
assert(Attribute && "Expected an attribute");
+ assert(Attribute->isPragmaClangAttribute() &&
+ "expected #pragma clang attribute");
// Ensure that the attribute can be applied to the given declaration.
bool Applies = false;
diff --git a/lib/Sema/SemaCUDA.cpp b/lib/Sema/SemaCUDA.cpp
index ffc728898584..203c09c57112 100644
--- a/lib/Sema/SemaCUDA.cpp
+++ b/lib/Sema/SemaCUDA.cpp
@@ -1,9 +1,8 @@
//===--- SemaCUDA.cpp - Semantic Analysis for CUDA constructs -------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
@@ -14,6 +13,7 @@
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/AST/ExprCXX.h"
+#include "clang/Basic/Cuda.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/Sema.h"
@@ -42,16 +42,15 @@ ExprResult Sema::ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
SourceLocation GGGLoc) {
FunctionDecl *ConfigDecl = Context.getcudaConfigureCallDecl();
if (!ConfigDecl)
- return ExprError(
- Diag(LLLLoc, diag::err_undeclared_var_use)
- << (getLangOpts().HIP ? "hipConfigureCall" : "cudaConfigureCall"));
+ return ExprError(Diag(LLLLoc, diag::err_undeclared_var_use)
+ << getCudaConfigureFuncName());
QualType ConfigQTy = ConfigDecl->getType();
DeclRefExpr *ConfigDR = new (Context)
DeclRefExpr(Context, ConfigDecl, false, ConfigQTy, VK_LValue, LLLLoc);
MarkFunctionReferenced(LLLLoc, ConfigDecl);
- return ActOnCallExpr(S, ConfigDR, LLLLoc, ExecConfig, GGGLoc, nullptr,
+ return BuildCallExpr(S, ConfigDR, LLLLoc, ExecConfig, GGGLoc, nullptr,
/*IsExecConfig=*/true);
}
@@ -587,78 +586,6 @@ void Sema::maybeAddCUDAHostDeviceAttrs(FunctionDecl *NewD,
NewD->addAttr(CUDADeviceAttr::CreateImplicit(Context));
}
-// In CUDA, there are some constructs which may appear in semantically-valid
-// code, but trigger errors if we ever generate code for the function in which
-// they appear. Essentially every construct you're not allowed to use on the
-// device falls into this category, because you are allowed to use these
-// constructs in a __host__ __device__ function, but only if that function is
-// never codegen'ed on the device.
-//
-// To handle semantic checking for these constructs, we keep track of the set of
-// functions we know will be emitted, either because we could tell a priori that
-// they would be emitted, or because they were transitively called by a
-// known-emitted function.
-//
-// We also keep a partial call graph of which not-known-emitted functions call
-// which other not-known-emitted functions.
-//
-// When we see something which is illegal if the current function is emitted
-// (usually by way of CUDADiagIfDeviceCode, CUDADiagIfHostCode, or
-// CheckCUDACall), we first check if the current function is known-emitted. If
-// so, we immediately output the diagnostic.
-//
-// Otherwise, we "defer" the diagnostic. It sits in Sema::CUDADeferredDiags
-// until we discover that the function is known-emitted, at which point we take
-// it out of this map and emit the diagnostic.
-
-Sema::CUDADiagBuilder::CUDADiagBuilder(Kind K, SourceLocation Loc,
- unsigned DiagID, FunctionDecl *Fn,
- Sema &S)
- : S(S), Loc(Loc), DiagID(DiagID), Fn(Fn),
- ShowCallStack(K == K_ImmediateWithCallStack || K == K_Deferred) {
- switch (K) {
- case K_Nop:
- break;
- case K_Immediate:
- case K_ImmediateWithCallStack:
- ImmediateDiag.emplace(S.Diag(Loc, DiagID));
- break;
- case K_Deferred:
- assert(Fn && "Must have a function to attach the deferred diag to.");
- PartialDiag.emplace(S.PDiag(DiagID));
- break;
- }
-}
-
-// Print notes showing how we can reach FD starting from an a priori
-// known-callable function.
-static void EmitCallStackNotes(Sema &S, FunctionDecl *FD) {
- auto FnIt = S.CUDAKnownEmittedFns.find(FD);
- while (FnIt != S.CUDAKnownEmittedFns.end()) {
- DiagnosticBuilder Builder(
- S.Diags.Report(FnIt->second.Loc, diag::note_called_by));
- Builder << FnIt->second.FD;
- Builder.setForceEmit();
-
- FnIt = S.CUDAKnownEmittedFns.find(FnIt->second.FD);
- }
-}
-
-Sema::CUDADiagBuilder::~CUDADiagBuilder() {
- if (ImmediateDiag) {
- // Emit our diagnostic and, if it was a warning or error, output a callstack
- // if Fn isn't a priori known-emitted.
- bool IsWarningOrError = S.getDiagnostics().getDiagnosticLevel(
- DiagID, Loc) >= DiagnosticsEngine::Warning;
- ImmediateDiag.reset(); // Emit the immediate diag.
- if (IsWarningOrError && ShowCallStack)
- EmitCallStackNotes(S, Fn);
- } else if (PartialDiag) {
- assert(ShowCallStack && "Must always show call stack for deferred diags.");
- S.CUDADeferredDiags[Fn].push_back({Loc, std::move(*PartialDiag)});
- }
-}
-
// Do we know that we will eventually codegen the given function?
static bool IsKnownEmitted(Sema &S, FunctionDecl *FD) {
// Templates are emitted when they're instantiated.
@@ -690,152 +617,69 @@ static bool IsKnownEmitted(Sema &S, FunctionDecl *FD) {
// Otherwise, the function is known-emitted if it's in our set of
// known-emitted functions.
- return S.CUDAKnownEmittedFns.count(FD) > 0;
+ return S.DeviceKnownEmittedFns.count(FD) > 0;
}
-Sema::CUDADiagBuilder Sema::CUDADiagIfDeviceCode(SourceLocation Loc,
- unsigned DiagID) {
+Sema::DeviceDiagBuilder Sema::CUDADiagIfDeviceCode(SourceLocation Loc,
+ unsigned DiagID) {
assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
- CUDADiagBuilder::Kind DiagKind = [&] {
+ DeviceDiagBuilder::Kind DiagKind = [this] {
switch (CurrentCUDATarget()) {
case CFT_Global:
case CFT_Device:
- return CUDADiagBuilder::K_Immediate;
+ return DeviceDiagBuilder::K_Immediate;
case CFT_HostDevice:
// An HD function counts as host code if we're compiling for host, and
// device code if we're compiling for device. Defer any errors in device
// mode until the function is known-emitted.
if (getLangOpts().CUDAIsDevice) {
return IsKnownEmitted(*this, dyn_cast<FunctionDecl>(CurContext))
- ? CUDADiagBuilder::K_ImmediateWithCallStack
- : CUDADiagBuilder::K_Deferred;
+ ? DeviceDiagBuilder::K_ImmediateWithCallStack
+ : DeviceDiagBuilder::K_Deferred;
}
- return CUDADiagBuilder::K_Nop;
+ return DeviceDiagBuilder::K_Nop;
default:
- return CUDADiagBuilder::K_Nop;
+ return DeviceDiagBuilder::K_Nop;
}
}();
- return CUDADiagBuilder(DiagKind, Loc, DiagID,
- dyn_cast<FunctionDecl>(CurContext), *this);
+ return DeviceDiagBuilder(DiagKind, Loc, DiagID,
+ dyn_cast<FunctionDecl>(CurContext), *this);
}
-Sema::CUDADiagBuilder Sema::CUDADiagIfHostCode(SourceLocation Loc,
- unsigned DiagID) {
+Sema::DeviceDiagBuilder Sema::CUDADiagIfHostCode(SourceLocation Loc,
+ unsigned DiagID) {
assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
- CUDADiagBuilder::Kind DiagKind = [&] {
+ DeviceDiagBuilder::Kind DiagKind = [this] {
switch (CurrentCUDATarget()) {
case CFT_Host:
- return CUDADiagBuilder::K_Immediate;
+ return DeviceDiagBuilder::K_Immediate;
case CFT_HostDevice:
// An HD function counts as host code if we're compiling for host, and
// device code if we're compiling for device. Defer any errors in device
// mode until the function is known-emitted.
if (getLangOpts().CUDAIsDevice)
- return CUDADiagBuilder::K_Nop;
+ return DeviceDiagBuilder::K_Nop;
return IsKnownEmitted(*this, dyn_cast<FunctionDecl>(CurContext))
- ? CUDADiagBuilder::K_ImmediateWithCallStack
- : CUDADiagBuilder::K_Deferred;
+ ? DeviceDiagBuilder::K_ImmediateWithCallStack
+ : DeviceDiagBuilder::K_Deferred;
default:
- return CUDADiagBuilder::K_Nop;
+ return DeviceDiagBuilder::K_Nop;
}
}();
- return CUDADiagBuilder(DiagKind, Loc, DiagID,
- dyn_cast<FunctionDecl>(CurContext), *this);
-}
-
-// Emit any deferred diagnostics for FD and erase them from the map in which
-// they're stored.
-static void EmitDeferredDiags(Sema &S, FunctionDecl *FD) {
- auto It = S.CUDADeferredDiags.find(FD);
- if (It == S.CUDADeferredDiags.end())
- return;
- bool HasWarningOrError = false;
- for (PartialDiagnosticAt &PDAt : It->second) {
- const SourceLocation &Loc = PDAt.first;
- const PartialDiagnostic &PD = PDAt.second;
- HasWarningOrError |= S.getDiagnostics().getDiagnosticLevel(
- PD.getDiagID(), Loc) >= DiagnosticsEngine::Warning;
- DiagnosticBuilder Builder(S.Diags.Report(Loc, PD.getDiagID()));
- Builder.setForceEmit();
- PD.Emit(Builder);
- }
- S.CUDADeferredDiags.erase(It);
-
- // FIXME: Should this be called after every warning/error emitted in the loop
- // above, instead of just once per function? That would be consistent with
- // how we handle immediate errors, but it also seems like a bit much.
- if (HasWarningOrError)
- EmitCallStackNotes(S, FD);
-}
-
-// Indicate that this function (and thus everything it transtively calls) will
-// be codegen'ed, and emit any deferred diagnostics on this function and its
-// (transitive) callees.
-static void MarkKnownEmitted(Sema &S, FunctionDecl *OrigCaller,
- FunctionDecl *OrigCallee, SourceLocation OrigLoc) {
- // Nothing to do if we already know that FD is emitted.
- if (IsKnownEmitted(S, OrigCallee)) {
- assert(!S.CUDACallGraph.count(OrigCallee));
- return;
- }
-
- // We've just discovered that OrigCallee is known-emitted. Walk our call
- // graph to see what else we can now discover also must be emitted.
-
- struct CallInfo {
- FunctionDecl *Caller;
- FunctionDecl *Callee;
- SourceLocation Loc;
- };
- llvm::SmallVector<CallInfo, 4> Worklist = {{OrigCaller, OrigCallee, OrigLoc}};
- llvm::SmallSet<CanonicalDeclPtr<FunctionDecl>, 4> Seen;
- Seen.insert(OrigCallee);
- while (!Worklist.empty()) {
- CallInfo C = Worklist.pop_back_val();
- assert(!IsKnownEmitted(S, C.Callee) &&
- "Worklist should not contain known-emitted functions.");
- S.CUDAKnownEmittedFns[C.Callee] = {C.Caller, C.Loc};
- EmitDeferredDiags(S, C.Callee);
-
- // If this is a template instantiation, explore its callgraph as well:
- // Non-dependent calls are part of the template's callgraph, while dependent
- // calls are part of to the instantiation's call graph.
- if (auto *Templ = C.Callee->getPrimaryTemplate()) {
- FunctionDecl *TemplFD = Templ->getAsFunction();
- if (!Seen.count(TemplFD) && !S.CUDAKnownEmittedFns.count(TemplFD)) {
- Seen.insert(TemplFD);
- Worklist.push_back(
- {/* Caller = */ C.Caller, /* Callee = */ TemplFD, C.Loc});
- }
- }
-
- // Add all functions called by Callee to our worklist.
- auto CGIt = S.CUDACallGraph.find(C.Callee);
- if (CGIt == S.CUDACallGraph.end())
- continue;
-
- for (std::pair<CanonicalDeclPtr<FunctionDecl>, SourceLocation> FDLoc :
- CGIt->second) {
- FunctionDecl *NewCallee = FDLoc.first;
- SourceLocation CallLoc = FDLoc.second;
- if (Seen.count(NewCallee) || IsKnownEmitted(S, NewCallee))
- continue;
- Seen.insert(NewCallee);
- Worklist.push_back(
- {/* Caller = */ C.Callee, /* Callee = */ NewCallee, CallLoc});
- }
-
- // C.Callee is now known-emitted, so we no longer need to maintain its list
- // of callees in CUDACallGraph.
- S.CUDACallGraph.erase(CGIt);
- }
+ return DeviceDiagBuilder(DiagKind, Loc, DiagID,
+ dyn_cast<FunctionDecl>(CurContext), *this);
}
bool Sema::CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee) {
assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
assert(Callee && "Callee may not be null.");
+
+ auto &ExprEvalCtx = ExprEvalContexts.back();
+ if (ExprEvalCtx.isUnevaluated() || ExprEvalCtx.isConstantEvaluated())
+ return true;
+
// FIXME: Is bailing out early correct here? Should we instead assume that
// the caller is a global initializer?
FunctionDecl *Caller = dyn_cast<FunctionDecl>(CurContext);
@@ -849,7 +693,7 @@ bool Sema::CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee) {
// Host-side references to a __global__ function refer to the stub, so the
// function itself is never emitted and therefore should not be marked.
if (getLangOpts().CUDAIsDevice || IdentifyCUDATarget(Callee) != CFT_Global)
- MarkKnownEmitted(*this, Caller, Callee, Loc);
+ markKnownEmitted(*this, Caller, Callee, Loc, IsKnownEmitted);
} else {
// If we have
// host fn calls kernel fn calls host+device,
@@ -858,26 +702,27 @@ bool Sema::CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee) {
// that, when compiling for host, only HD functions actually called from the
// host get marked as known-emitted.
if (getLangOpts().CUDAIsDevice || IdentifyCUDATarget(Callee) != CFT_Global)
- CUDACallGraph[Caller].insert({Callee, Loc});
+ DeviceCallGraph[Caller].insert({Callee, Loc});
}
- CUDADiagBuilder::Kind DiagKind = [&] {
+ DeviceDiagBuilder::Kind DiagKind = [this, Caller, Callee,
+ CallerKnownEmitted] {
switch (IdentifyCUDAPreference(Caller, Callee)) {
case CFP_Never:
- return CUDADiagBuilder::K_Immediate;
+ return DeviceDiagBuilder::K_Immediate;
case CFP_WrongSide:
assert(Caller && "WrongSide calls require a non-null caller");
// If we know the caller will be emitted, we know this wrong-side call
// will be emitted, so it's an immediate error. Otherwise, defer the
// error until we know the caller is emitted.
- return CallerKnownEmitted ? CUDADiagBuilder::K_ImmediateWithCallStack
- : CUDADiagBuilder::K_Deferred;
+ return CallerKnownEmitted ? DeviceDiagBuilder::K_ImmediateWithCallStack
+ : DeviceDiagBuilder::K_Deferred;
default:
- return CUDADiagBuilder::K_Nop;
+ return DeviceDiagBuilder::K_Nop;
}
}();
- if (DiagKind == CUDADiagBuilder::K_Nop)
+ if (DiagKind == DeviceDiagBuilder::K_Nop)
return true;
// Avoid emitting this error twice for the same location. Using a hashtable
@@ -887,13 +732,13 @@ bool Sema::CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee) {
if (!LocsWithCUDACallDiags.insert({Caller, Loc}).second)
return true;
- CUDADiagBuilder(DiagKind, Loc, diag::err_ref_bad_target, Caller, *this)
+ DeviceDiagBuilder(DiagKind, Loc, diag::err_ref_bad_target, Caller, *this)
<< IdentifyCUDATarget(Callee) << Callee << IdentifyCUDATarget(Caller);
- CUDADiagBuilder(DiagKind, Callee->getLocation(), diag::note_previous_decl,
- Caller, *this)
+ DeviceDiagBuilder(DiagKind, Callee->getLocation(), diag::note_previous_decl,
+ Caller, *this)
<< Callee;
- return DiagKind != CUDADiagBuilder::K_Immediate &&
- DiagKind != CUDADiagBuilder::K_ImmediateWithCallStack;
+ return DiagKind != DeviceDiagBuilder::K_Immediate &&
+ DiagKind != DeviceDiagBuilder::K_ImmediateWithCallStack;
}
void Sema::CUDASetLambdaAttrs(CXXMethodDecl *Method) {
@@ -958,3 +803,16 @@ void Sema::inheritCUDATargetAttrs(FunctionDecl *FD,
copyAttrIfPresent<CUDAHostAttr>(*this, FD, TemplateFD);
copyAttrIfPresent<CUDADeviceAttr>(*this, FD, TemplateFD);
}
+
+std::string Sema::getCudaConfigureFuncName() const {
+ if (getLangOpts().HIP)
+ return "hipConfigureCall";
+
+ // New CUDA kernel launch sequence.
+ if (CudaFeatureEnabled(Context.getTargetInfo().getSDKVersion(),
+ CudaFeature::CUDA_USES_NEW_LAUNCH))
+ return "__cudaPushCallConfiguration";
+
+ // Legacy CUDA kernel configuration call
+ return "cudaConfigureCall";
+}
diff --git a/lib/Sema/SemaCXXScopeSpec.cpp b/lib/Sema/SemaCXXScopeSpec.cpp
index 2354ffe7fbcc..c473856f0b07 100644
--- a/lib/Sema/SemaCXXScopeSpec.cpp
+++ b/lib/Sema/SemaCXXScopeSpec.cpp
@@ -1,9 +1,8 @@
//===--- SemaCXXScopeSpec.cpp - Semantic Analysis for C++ scope specifiers-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -430,8 +429,9 @@ namespace {
// Callback to only accept typo corrections that can be a valid C++ member
// intializer: either a non-static field member or a base class.
-class NestedNameSpecifierValidatorCCC : public CorrectionCandidateCallback {
- public:
+class NestedNameSpecifierValidatorCCC final
+ : public CorrectionCandidateCallback {
+public:
explicit NestedNameSpecifierValidatorCCC(Sema &SRef)
: SRef(SRef) {}
@@ -439,6 +439,10 @@ class NestedNameSpecifierValidatorCCC : public CorrectionCandidateCallback {
return SRef.isAcceptableNestedNameSpecifier(candidate.getCorrectionDecl());
}
+ std::unique_ptr<CorrectionCandidateCallback> clone() override {
+ return llvm::make_unique<NestedNameSpecifierValidatorCCC>(*this);
+ }
+
private:
Sema &SRef;
};
@@ -615,9 +619,9 @@ bool Sema::BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo,
// different kind of error, so look for typos.
DeclarationName Name = Found.getLookupName();
Found.clear();
+ NestedNameSpecifierValidatorCCC CCC(*this);
if (TypoCorrection Corrected = CorrectTypo(
- Found.getLookupNameInfo(), Found.getLookupKind(), S, &SS,
- llvm::make_unique<NestedNameSpecifierValidatorCCC>(*this),
+ Found.getLookupNameInfo(), Found.getLookupKind(), S, &SS, CCC,
CTK_ErrorRecovery, LookupCtx, EnteringContext)) {
if (LookupCtx) {
bool DroppedSpecifier =
@@ -884,7 +888,7 @@ bool Sema::IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
bool Sema::ActOnCXXNestedNameSpecifier(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
- TemplateTy Template,
+ TemplateTy OpaqueTemplate,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
@@ -894,11 +898,13 @@ bool Sema::ActOnCXXNestedNameSpecifier(Scope *S,
if (SS.isInvalid())
return true;
+ TemplateName Template = OpaqueTemplate.get();
+
// Translate the parser's template argument list in our AST format.
TemplateArgumentListInfo TemplateArgs(LAngleLoc, RAngleLoc);
translateTemplateArguments(TemplateArgsIn, TemplateArgs);
- DependentTemplateName *DTN = Template.get().getAsDependentTemplateName();
+ DependentTemplateName *DTN = Template.getAsDependentTemplateName();
if (DTN && DTN->isIdentifier()) {
// Handle a dependent template specialization for which we cannot resolve
// the template name.
@@ -926,23 +932,28 @@ bool Sema::ActOnCXXNestedNameSpecifier(Scope *S,
return false;
}
- TemplateDecl *TD = Template.get().getAsTemplateDecl();
- if (Template.get().getAsOverloadedTemplate() || DTN ||
+ // If we assumed an undeclared identifier was a template name, try to
+ // typo-correct it now.
+ if (Template.getAsAssumedTemplateName() &&
+ resolveAssumedTemplateNameAsType(S, Template, TemplateNameLoc))
+ return true;
+
+ TemplateDecl *TD = Template.getAsTemplateDecl();
+ if (Template.getAsOverloadedTemplate() || DTN ||
isa<FunctionTemplateDecl>(TD) || isa<VarTemplateDecl>(TD)) {
SourceRange R(TemplateNameLoc, RAngleLoc);
if (SS.getRange().isValid())
R.setBegin(SS.getRange().getBegin());
Diag(CCLoc, diag::err_non_type_template_in_nested_name_specifier)
- << (TD && isa<VarTemplateDecl>(TD)) << Template.get() << R;
- NoteAllFoundTemplates(Template.get());
+ << (TD && isa<VarTemplateDecl>(TD)) << Template << R;
+ NoteAllFoundTemplates(Template);
return true;
}
// We were able to resolve the template name to an actual template.
// Build an appropriate nested-name-specifier.
- QualType T =
- CheckTemplateIdType(Template.get(), TemplateNameLoc, TemplateArgs);
+ QualType T = CheckTemplateIdType(Template, TemplateNameLoc, TemplateArgs);
if (T.isNull())
return true;
@@ -950,7 +961,7 @@ bool Sema::ActOnCXXNestedNameSpecifier(Scope *S,
// nested name specifiers.
if (!T->isDependentType() && !T->getAs<TagType>()) {
Diag(TemplateNameLoc, diag::err_nested_name_spec_non_tag) << T;
- NoteAllFoundTemplates(Template.get());
+ NoteAllFoundTemplates(Template);
return true;
}
diff --git a/lib/Sema/SemaCast.cpp b/lib/Sema/SemaCast.cpp
index 0b4645e11c34..f184eda2f273 100644
--- a/lib/Sema/SemaCast.cpp
+++ b/lib/Sema/SemaCast.cpp
@@ -1,9 +1,8 @@
//===--- SemaCast.cpp - Semantic Analysis for Casts -----------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -88,6 +87,7 @@ namespace {
void CheckDynamicCast();
void CheckCXXCStyleCast(bool FunctionalCast, bool ListInitialization);
void CheckCStyleCast();
+ void CheckBuiltinBitCast();
void updatePartOfExplicitCastFlags(CastExpr *CE) {
// Walk down from the CE to the OrigSrcExpr, and mark all immediate
@@ -285,7 +285,7 @@ Sema::BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind,
AngleBrackets));
case tok::kw_dynamic_cast: {
- // OpenCL C++ 1.0 s2.9: dynamic_cast is not supported.
+ // dynamic_cast is not supported in C++ for OpenCL.
if (getLangOpts().OpenCLCPlusPlus) {
return ExprError(Diag(OpLoc, diag::err_openclcxx_not_supported)
<< "dynamic_cast");
@@ -332,6 +332,38 @@ Sema::BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind,
}
}
+ExprResult Sema::ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &D,
+ ExprResult Operand,
+ SourceLocation RParenLoc) {
+ assert(!D.isInvalidType());
+
+ TypeSourceInfo *TInfo = GetTypeForDeclaratorCast(D, Operand.get()->getType());
+ if (D.isInvalidType())
+ return ExprError();
+
+ return BuildBuiltinBitCastExpr(KWLoc, TInfo, Operand.get(), RParenLoc);
+}
+
+ExprResult Sema::BuildBuiltinBitCastExpr(SourceLocation KWLoc,
+ TypeSourceInfo *TSI, Expr *Operand,
+ SourceLocation RParenLoc) {
+ CastOperation Op(*this, TSI->getType(), Operand);
+ Op.OpRange = SourceRange(KWLoc, RParenLoc);
+ TypeLoc TL = TSI->getTypeLoc();
+ Op.DestRange = SourceRange(TL.getBeginLoc(), TL.getEndLoc());
+
+ if (!Operand->isTypeDependent() && !TSI->getType()->isDependentType()) {
+ Op.CheckBuiltinBitCast();
+ if (Op.SrcExpr.isInvalid())
+ return ExprError();
+ }
+
+ BuiltinBitCastExpr *BCE =
+ new (Context) BuiltinBitCastExpr(Op.ResultType, Op.ValueKind, Op.Kind,
+ Op.SrcExpr.get(), TSI, KWLoc, RParenLoc);
+ return Op.complete(BCE);
+}
+
/// Try to diagnose a failed overloaded cast. Returns true if
/// diagnostics were emitted.
static bool tryDiagnoseOverloadedCast(Sema &S, CastType CT,
@@ -400,11 +432,11 @@ static bool tryDiagnoseOverloadedCast(Sema &S, CastType CT,
break;
}
- S.Diag(range.getBegin(), msg)
- << CT << srcType << destType
- << range << src->getSourceRange();
-
- candidates.NoteCandidates(S, howManyCandidates, src);
+ candidates.NoteCandidates(
+ PartialDiagnosticAt(range.getBegin(),
+ S.PDiag(msg) << CT << srcType << destType << range
+ << src->getSourceRange()),
+ S, howManyCandidates, src);
return true;
}
@@ -2012,7 +2044,7 @@ static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
if (!CStyle) {
Self.CheckCompatibleReinterpretCast(SrcType, DestType,
- /*isDereference=*/false, OpRange);
+ /*IsDereference=*/false, OpRange);
}
// C++ 5.2.10p10: [...] a reference cast reinterpret_cast<T&>(x) has the
@@ -2213,7 +2245,15 @@ static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
/*CheckObjCLifetime=*/CStyle))
SuccessResult = getCastAwayConstnessCastKind(CACK, msg);
- if (IsLValueCast) {
+ if (IsAddressSpaceConversion(SrcType, DestType)) {
+ Kind = CK_AddressSpaceConversion;
+ assert(SrcType->isPointerType() && DestType->isPointerType());
+ if (!CStyle &&
+ !DestType->getPointeeType().getQualifiers().isAddressSpaceSupersetOf(
+ SrcType->getPointeeType().getQualifiers())) {
+ SuccessResult = TC_Failed;
+ }
+ } else if (IsLValueCast) {
Kind = CK_LValueBitCast;
} else if (DestType->isObjCObjectPointerType()) {
Kind = Self.PrepareCastToObjCObjectPointer(SrcExpr);
@@ -2223,8 +2263,6 @@ static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
} else {
Kind = CK_BitCast;
}
- } else if (IsAddressSpaceConversion(SrcType, DestType)) {
- Kind = CK_AddressSpaceConversion;
} else {
Kind = CK_BitCast;
}
@@ -2279,23 +2317,80 @@ static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
return SuccessResult;
}
+static TryCastResult TryAddressSpaceCast(Sema &Self, ExprResult &SrcExpr,
+ QualType DestType, bool CStyle,
+ unsigned &msg) {
+ if (!Self.getLangOpts().OpenCL)
+ // FIXME: As compiler doesn't have any information about overlapping addr
+ // spaces at the moment we have to be permissive here.
+ return TC_NotApplicable;
+ // Even though the logic below is general enough and can be applied to
+ // non-OpenCL mode too, we fast-path above because no other languages
+ // define overlapping address spaces currently.
+ auto SrcType = SrcExpr.get()->getType();
+ auto SrcPtrType = SrcType->getAs<PointerType>();
+ if (!SrcPtrType)
+ return TC_NotApplicable;
+ auto DestPtrType = DestType->getAs<PointerType>();
+ if (!DestPtrType)
+ return TC_NotApplicable;
+ auto SrcPointeeType = SrcPtrType->getPointeeType();
+ auto DestPointeeType = DestPtrType->getPointeeType();
+ if (SrcPointeeType.getAddressSpace() == DestPointeeType.getAddressSpace())
+ return TC_NotApplicable;
+ if (!DestPtrType->isAddressSpaceOverlapping(*SrcPtrType)) {
+ msg = diag::err_bad_cxx_cast_addr_space_mismatch;
+ return TC_Failed;
+ }
+ auto SrcPointeeTypeWithoutAS =
+ Self.Context.removeAddrSpaceQualType(SrcPointeeType.getCanonicalType());
+ auto DestPointeeTypeWithoutAS =
+ Self.Context.removeAddrSpaceQualType(DestPointeeType.getCanonicalType());
+ return Self.Context.hasSameType(SrcPointeeTypeWithoutAS,
+ DestPointeeTypeWithoutAS)
+ ? TC_Success
+ : TC_NotApplicable;
+}
+
void CastOperation::checkAddressSpaceCast(QualType SrcType, QualType DestType) {
// In OpenCL only conversions between pointers to objects in overlapping
// addr spaces are allowed. v2.0 s6.5.5 - Generic addr space overlaps
// with any named one, except for constant.
+
+ // Converting the top level pointee addrspace is permitted for compatible
+ // addrspaces (such as 'generic int *' to 'local int *' or vice versa), but
+ // if any of the nested pointee addrspaces differ, we emit a warning
+ // regardless of addrspace compatibility. This makes
+ // local int ** p;
+ // return (generic int **) p;
+ // warn even though local -> generic is permitted.
if (Self.getLangOpts().OpenCL) {
- auto SrcPtrType = SrcType->getAs<PointerType>();
- if (!SrcPtrType)
- return;
- auto DestPtrType = DestType->getAs<PointerType>();
- if (!DestPtrType)
- return;
- if (!DestPtrType->isAddressSpaceOverlapping(*SrcPtrType)) {
- Self.Diag(OpRange.getBegin(),
- diag::err_typecheck_incompatible_address_space)
- << SrcType << DestType << Sema::AA_Casting
- << SrcExpr.get()->getSourceRange();
- SrcExpr = ExprError();
+ const Type *DestPtr, *SrcPtr;
+ bool Nested = false;
+ unsigned DiagID = diag::err_typecheck_incompatible_address_space;
+ DestPtr = Self.getASTContext().getCanonicalType(DestType.getTypePtr()),
+ SrcPtr = Self.getASTContext().getCanonicalType(SrcType.getTypePtr());
+
+ while (isa<PointerType>(DestPtr) && isa<PointerType>(SrcPtr)) {
+ const PointerType *DestPPtr = cast<PointerType>(DestPtr);
+ const PointerType *SrcPPtr = cast<PointerType>(SrcPtr);
+ QualType DestPPointee = DestPPtr->getPointeeType();
+ QualType SrcPPointee = SrcPPtr->getPointeeType();
+ if (Nested ? DestPPointee.getAddressSpace() !=
+ SrcPPointee.getAddressSpace()
+ : !DestPPtr->isAddressSpaceOverlapping(*SrcPPtr)) {
+ Self.Diag(OpRange.getBegin(), DiagID)
+ << SrcType << DestType << Sema::AA_Casting
+ << SrcExpr.get()->getSourceRange();
+ if (!Nested)
+ SrcExpr = ExprError();
+ return;
+ }
+
+ DestPtr = DestPPtr->getPointeeType().getTypePtr();
+ SrcPtr = SrcPPtr->getPointeeType().getTypePtr();
+ Nested = true;
+ DiagID = diag::ext_nested_pointer_qualifier_mismatch;
}
}
}
@@ -2373,30 +2468,39 @@ void CastOperation::CheckCXXCStyleCast(bool FunctionalStyle,
// listed above, the interpretation that appears first in the list is used,
// even if a cast resulting from that interpretation is ill-formed.
// In plain language, this means trying a const_cast ...
+ // Note that for address space we check compatibility after const_cast.
unsigned msg = diag::err_bad_cxx_cast_generic;
TryCastResult tcr = TryConstCast(Self, SrcExpr, DestType,
- /*CStyle*/true, msg);
+ /*CStyle*/ true, msg);
if (SrcExpr.isInvalid())
return;
if (isValidCast(tcr))
Kind = CK_NoOp;
- Sema::CheckedConversionKind CCK
- = FunctionalStyle? Sema::CCK_FunctionalCast
- : Sema::CCK_CStyleCast;
+ Sema::CheckedConversionKind CCK =
+ FunctionalStyle ? Sema::CCK_FunctionalCast : Sema::CCK_CStyleCast;
if (tcr == TC_NotApplicable) {
- // ... or if that is not possible, a static_cast, ignoring const, ...
- tcr = TryStaticCast(Self, SrcExpr, DestType, CCK, OpRange,
- msg, Kind, BasePath, ListInitialization);
+ tcr = TryAddressSpaceCast(Self, SrcExpr, DestType, /*CStyle*/ true, msg);
if (SrcExpr.isInvalid())
return;
+ if (isValidCast(tcr))
+ Kind = CK_AddressSpaceConversion;
+
if (tcr == TC_NotApplicable) {
- // ... and finally a reinterpret_cast, ignoring const.
- tcr = TryReinterpretCast(Self, SrcExpr, DestType, /*CStyle*/true,
- OpRange, msg, Kind);
+ // ... or if that is not possible, a static_cast, ignoring const, ...
+ tcr = TryStaticCast(Self, SrcExpr, DestType, CCK, OpRange, msg, Kind,
+ BasePath, ListInitialization);
if (SrcExpr.isInvalid())
return;
+
+ if (tcr == TC_NotApplicable) {
+ // ... and finally a reinterpret_cast, ignoring const.
+ tcr = TryReinterpretCast(Self, SrcExpr, DestType, /*CStyle*/ true,
+ OpRange, msg, Kind);
+ if (SrcExpr.isInvalid())
+ return;
+ }
}
}
@@ -2427,8 +2531,6 @@ void CastOperation::CheckCXXCStyleCast(bool FunctionalStyle,
}
}
- checkAddressSpaceCast(SrcExpr.get()->getType(), DestType);
-
if (isValidCast(tcr)) {
if (Kind == CK_BitCast)
checkCastAlign();
@@ -2695,6 +2797,43 @@ void CastOperation::CheckCStyleCast() {
checkCastAlign();
}
+void CastOperation::CheckBuiltinBitCast() {
+ QualType SrcType = SrcExpr.get()->getType();
+ if (SrcExpr.get()->isRValue())
+ SrcExpr = Self.CreateMaterializeTemporaryExpr(SrcType, SrcExpr.get(),
+ /*IsLValueReference=*/false);
+
+ CharUnits DestSize = Self.Context.getTypeSizeInChars(DestType);
+ CharUnits SourceSize = Self.Context.getTypeSizeInChars(SrcType);
+ if (DestSize != SourceSize) {
+ Self.Diag(OpRange.getBegin(), diag::err_bit_cast_type_size_mismatch)
+ << (int)SourceSize.getQuantity() << (int)DestSize.getQuantity();
+ SrcExpr = ExprError();
+ return;
+ }
+
+ if (!DestType.isTriviallyCopyableType(Self.Context)) {
+ Self.Diag(OpRange.getBegin(), diag::err_bit_cast_non_trivially_copyable)
+ << 1;
+ SrcExpr = ExprError();
+ return;
+ }
+
+ if (!SrcType.isTriviallyCopyableType(Self.Context)) {
+ Self.Diag(OpRange.getBegin(), diag::err_bit_cast_non_trivially_copyable)
+ << 0;
+ SrcExpr = ExprError();
+ return;
+ }
+
+ if (Self.Context.hasSameUnqualifiedType(DestType, SrcType)) {
+ Kind = CK_NoOp;
+ return;
+ }
+
+ Kind = CK_LValueToRValueBitCast;
+}
+
/// DiagnoseCastQual - Warn whenever casts discards a qualifiers, be it either
/// const, volatile or both.
static void DiagnoseCastQual(Sema &Self, const ExprResult &SrcExpr,
@@ -2742,7 +2881,7 @@ ExprResult Sema::BuildCStyleCastExpr(SourceLocation LPLoc,
Op.OpRange = SourceRange(LPLoc, CastExpr->getEndLoc());
if (getLangOpts().CPlusPlus) {
- Op.CheckCXXCStyleCast(/*FunctionalStyle=*/ false,
+ Op.CheckCXXCStyleCast(/*FunctionalCast=*/ false,
isa<InitListExpr>(CastExpr));
} else {
Op.CheckCStyleCast();
@@ -2769,7 +2908,7 @@ ExprResult Sema::BuildCXXFunctionalCastExpr(TypeSourceInfo *CastTypeInfo,
Op.DestRange = CastTypeInfo->getTypeLoc().getSourceRange();
Op.OpRange = SourceRange(Op.DestRange.getBegin(), CastExpr->getEndLoc());
- Op.CheckCXXCStyleCast(/*FunctionalStyle=*/true, /*ListInit=*/false);
+ Op.CheckCXXCStyleCast(/*FunctionalCast=*/true, /*ListInit=*/false);
if (Op.SrcExpr.isInvalid())
return ExprError();
diff --git a/lib/Sema/SemaChecking.cpp b/lib/Sema/SemaChecking.cpp
index 8dc1fdb76988..f9f82cdeef43 100644
--- a/lib/Sema/SemaChecking.cpp
+++ b/lib/Sema/SemaChecking.cpp
@@ -1,9 +1,8 @@
//===- SemaChecking.cpp - Extra Semantic Checking -------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -85,6 +84,7 @@
#include "llvm/Support/Format.h"
#include "llvm/Support/Locale.h"
#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/SaveAndRestore.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cassert>
@@ -191,6 +191,16 @@ static bool SemaBuiltinAddressof(Sema &S, CallExpr *TheCall) {
return false;
}
+/// Check the number of arguments, and set the result type to
+/// the argument type.
+static bool SemaBuiltinPreserveAI(Sema &S, CallExpr *TheCall) {
+ if (checkArgCount(S, TheCall, 1))
+ return true;
+
+ TheCall->setType(TheCall->getArg(0)->getType());
+ return false;
+}
+
static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall) {
if (checkArgCount(S, TheCall, 3))
return true;
@@ -236,47 +246,6 @@ static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall) {
return false;
}
-static void SemaBuiltinMemChkCall(Sema &S, FunctionDecl *FDecl,
- CallExpr *TheCall, unsigned SizeIdx,
- unsigned DstSizeIdx,
- StringRef LikelyMacroName) {
- if (TheCall->getNumArgs() <= SizeIdx ||
- TheCall->getNumArgs() <= DstSizeIdx)
- return;
-
- const Expr *SizeArg = TheCall->getArg(SizeIdx);
- const Expr *DstSizeArg = TheCall->getArg(DstSizeIdx);
-
- Expr::EvalResult SizeResult, DstSizeResult;
-
- // find out if both sizes are known at compile time
- if (!SizeArg->EvaluateAsInt(SizeResult, S.Context) ||
- !DstSizeArg->EvaluateAsInt(DstSizeResult, S.Context))
- return;
-
- llvm::APSInt Size = SizeResult.Val.getInt();
- llvm::APSInt DstSize = DstSizeResult.Val.getInt();
-
- if (Size.ule(DstSize))
- return;
-
- // Confirmed overflow, so generate the diagnostic.
- StringRef FunctionName = FDecl->getName();
- SourceLocation SL = TheCall->getBeginLoc();
- SourceManager &SM = S.getSourceManager();
- // If we're in an expansion of a macro whose name corresponds to this builtin,
- // use the simple macro name and location.
- if (SL.isMacroID() && Lexer::getImmediateMacroName(SL, SM, S.getLangOpts()) ==
- LikelyMacroName) {
- FunctionName = LikelyMacroName;
- SL = SM.getImmediateMacroCallerLoc(SL);
- }
-
- S.Diag(SL, diag::warn_memcpy_chk_overflow)
- << FunctionName << DstSize.toString(/*Radix=*/10)
- << Size.toString(/*Radix=*/10);
-}
-
static bool SemaBuiltinCallWithStaticChain(Sema &S, CallExpr *BuiltinCall) {
if (checkArgCount(S, BuiltinCall, 2))
return true;
@@ -340,6 +309,149 @@ static bool SemaBuiltinCallWithStaticChain(Sema &S, CallExpr *BuiltinCall) {
return false;
}
+/// Check a call to BuiltinID for buffer overflows. If BuiltinID is a
+/// __builtin_*_chk function, then use the object size argument specified in the
+/// source. Otherwise, infer the object size using __builtin_object_size.
+void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD,
+ CallExpr *TheCall) {
+ // FIXME: There are some more useful checks we could be doing here:
+ // - Analyze the format string of sprintf to see how much of buffer is used.
+ // - Evaluate strlen of strcpy arguments, use as object size.
+
+ if (TheCall->isValueDependent() || TheCall->isTypeDependent() ||
+ isConstantEvaluated())
+ return;
+
+ unsigned BuiltinID = FD->getBuiltinID(/*ConsiderWrappers=*/true);
+ if (!BuiltinID)
+ return;
+
+ unsigned DiagID = 0;
+ bool IsChkVariant = false;
+ unsigned SizeIndex, ObjectIndex;
+ switch (BuiltinID) {
+ default:
+ return;
+ case Builtin::BI__builtin___memcpy_chk:
+ case Builtin::BI__builtin___memmove_chk:
+ case Builtin::BI__builtin___memset_chk:
+ case Builtin::BI__builtin___strlcat_chk:
+ case Builtin::BI__builtin___strlcpy_chk:
+ case Builtin::BI__builtin___strncat_chk:
+ case Builtin::BI__builtin___strncpy_chk:
+ case Builtin::BI__builtin___stpncpy_chk:
+ case Builtin::BI__builtin___memccpy_chk: {
+ DiagID = diag::warn_builtin_chk_overflow;
+ IsChkVariant = true;
+ SizeIndex = TheCall->getNumArgs() - 2;
+ ObjectIndex = TheCall->getNumArgs() - 1;
+ break;
+ }
+
+ case Builtin::BI__builtin___snprintf_chk:
+ case Builtin::BI__builtin___vsnprintf_chk: {
+ DiagID = diag::warn_builtin_chk_overflow;
+ IsChkVariant = true;
+ SizeIndex = 1;
+ ObjectIndex = 3;
+ break;
+ }
+
+ case Builtin::BIstrncat:
+ case Builtin::BI__builtin_strncat:
+ case Builtin::BIstrncpy:
+ case Builtin::BI__builtin_strncpy:
+ case Builtin::BIstpncpy:
+ case Builtin::BI__builtin_stpncpy: {
+ // Whether these functions overflow depends on the runtime strlen of the
+ // string, not just the buffer size, so emitting the "always overflow"
+ // diagnostic isn't quite right. We should still diagnose passing a buffer
+ // size larger than the destination buffer though; this is a runtime abort
+ // in _FORTIFY_SOURCE mode, and is quite suspicious otherwise.
+ DiagID = diag::warn_fortify_source_size_mismatch;
+ SizeIndex = TheCall->getNumArgs() - 1;
+ ObjectIndex = 0;
+ break;
+ }
+
+ case Builtin::BImemcpy:
+ case Builtin::BI__builtin_memcpy:
+ case Builtin::BImemmove:
+ case Builtin::BI__builtin_memmove:
+ case Builtin::BImemset:
+ case Builtin::BI__builtin_memset: {
+ DiagID = diag::warn_fortify_source_overflow;
+ SizeIndex = TheCall->getNumArgs() - 1;
+ ObjectIndex = 0;
+ break;
+ }
+ case Builtin::BIsnprintf:
+ case Builtin::BI__builtin_snprintf:
+ case Builtin::BIvsnprintf:
+ case Builtin::BI__builtin_vsnprintf: {
+ DiagID = diag::warn_fortify_source_size_mismatch;
+ SizeIndex = 1;
+ ObjectIndex = 0;
+ break;
+ }
+ }
+
+ llvm::APSInt ObjectSize;
+ // For __builtin___*_chk, the object size is explicitly provided by the caller
+ // (usually using __builtin_object_size). Use that value to check this call.
+ if (IsChkVariant) {
+ Expr::EvalResult Result;
+ Expr *SizeArg = TheCall->getArg(ObjectIndex);
+ if (!SizeArg->EvaluateAsInt(Result, getASTContext()))
+ return;
+ ObjectSize = Result.Val.getInt();
+
+ // Otherwise, try to evaluate an imaginary call to __builtin_object_size.
+ } else {
+ // If the parameter has a pass_object_size attribute, then we should use its
+ // (potentially) more strict checking mode. Otherwise, conservatively assume
+ // type 0.
+ int BOSType = 0;
+ if (const auto *POS =
+ FD->getParamDecl(ObjectIndex)->getAttr<PassObjectSizeAttr>())
+ BOSType = POS->getType();
+
+ Expr *ObjArg = TheCall->getArg(ObjectIndex);
+ uint64_t Result;
+ if (!ObjArg->tryEvaluateObjectSize(Result, getASTContext(), BOSType))
+ return;
+ // Get the object size in the target's size_t width.
+ const TargetInfo &TI = getASTContext().getTargetInfo();
+ unsigned SizeTypeWidth = TI.getTypeWidth(TI.getSizeType());
+ ObjectSize = llvm::APSInt::getUnsigned(Result).extOrTrunc(SizeTypeWidth);
+ }
+
+ // Evaluate the number of bytes of the object that this call will use.
+ Expr::EvalResult Result;
+ Expr *UsedSizeArg = TheCall->getArg(SizeIndex);
+ if (!UsedSizeArg->EvaluateAsInt(Result, getASTContext()))
+ return;
+ llvm::APSInt UsedSize = Result.Val.getInt();
+
+ if (UsedSize.ule(ObjectSize))
+ return;
+
+ StringRef FunctionName = getASTContext().BuiltinInfo.getName(BuiltinID);
+ // Skim off the details of whichever builtin was called to produce a better
+ // diagnostic, as it's unlikley that the user wrote the __builtin explicitly.
+ if (IsChkVariant) {
+ FunctionName = FunctionName.drop_front(std::strlen("__builtin___"));
+ FunctionName = FunctionName.drop_back(std::strlen("_chk"));
+ } else if (FunctionName.startswith("__builtin_")) {
+ FunctionName = FunctionName.drop_front(std::strlen("__builtin_"));
+ }
+
+ DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall,
+ PDiag(DiagID)
+ << FunctionName << ObjectSize.toString(/*Radix=*/10)
+ << UsedSize.toString(/*Radix=*/10));
+}
+
static bool SemaBuiltinSEHScopeCheck(Sema &SemaRef, CallExpr *TheCall,
Scope::ScopeFlags NeededScopeFlags,
unsigned DiagID) {
@@ -1077,6 +1189,7 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
if (SemaBuiltinAssumeAligned(TheCall))
return ExprError();
break;
+ case Builtin::BI__builtin_dynamic_object_size:
case Builtin::BI__builtin_object_size:
if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 3))
return ExprError();
@@ -1098,10 +1211,14 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
if (checkArgCount(*this, TheCall, 1)) return true;
TheCall->setType(Context.IntTy);
break;
- case Builtin::BI__builtin_constant_p:
+ case Builtin::BI__builtin_constant_p: {
if (checkArgCount(*this, TheCall, 1)) return true;
+ ExprResult Arg = DefaultFunctionArrayLvalueConversion(TheCall->getArg(0));
+ if (Arg.isInvalid()) return true;
+ TheCall->setArg(0, Arg.get());
TheCall->setType(Context.IntTy);
break;
+ }
case Builtin::BI__builtin_launder:
return SemaBuiltinLaunder(*this, TheCall);
case Builtin::BI__sync_fetch_and_add:
@@ -1302,41 +1419,9 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
TheCall->setType(Context.IntTy);
break;
}
-
- // check secure string manipulation functions where overflows
- // are detectable at compile time
- case Builtin::BI__builtin___memcpy_chk:
- SemaBuiltinMemChkCall(*this, FDecl, TheCall, 2, 3, "memcpy");
- break;
- case Builtin::BI__builtin___memmove_chk:
- SemaBuiltinMemChkCall(*this, FDecl, TheCall, 2, 3, "memmove");
- break;
- case Builtin::BI__builtin___memset_chk:
- SemaBuiltinMemChkCall(*this, FDecl, TheCall, 2, 3, "memset");
- break;
- case Builtin::BI__builtin___strlcat_chk:
- SemaBuiltinMemChkCall(*this, FDecl, TheCall, 2, 3, "strlcat");
- break;
- case Builtin::BI__builtin___strlcpy_chk:
- SemaBuiltinMemChkCall(*this, FDecl, TheCall, 2, 3, "strlcpy");
- break;
- case Builtin::BI__builtin___strncat_chk:
- SemaBuiltinMemChkCall(*this, FDecl, TheCall, 2, 3, "strncat");
- break;
- case Builtin::BI__builtin___strncpy_chk:
- SemaBuiltinMemChkCall(*this, FDecl, TheCall, 2, 3, "strncpy");
- break;
- case Builtin::BI__builtin___stpncpy_chk:
- SemaBuiltinMemChkCall(*this, FDecl, TheCall, 2, 3, "stpncpy");
- break;
- case Builtin::BI__builtin___memccpy_chk:
- SemaBuiltinMemChkCall(*this, FDecl, TheCall, 3, 4, "memccpy");
- break;
- case Builtin::BI__builtin___snprintf_chk:
- SemaBuiltinMemChkCall(*this, FDecl, TheCall, 1, 3, "snprintf");
- break;
- case Builtin::BI__builtin___vsnprintf_chk:
- SemaBuiltinMemChkCall(*this, FDecl, TheCall, 1, 3, "vsnprintf");
+ case Builtin::BI__builtin_preserve_access_index:
+ if (SemaBuiltinPreserveAI(*this, TheCall))
+ return ExprError();
break;
case Builtin::BI__builtin_call_with_static_chain:
if (SemaBuiltinCallWithStaticChain(*this, TheCall))
@@ -1806,6 +1891,16 @@ bool Sema::CheckAArch64BuiltinFunctionCall(unsigned BuiltinID,
BuiltinID == AArch64::BI__builtin_arm_wsr64)
return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true);
+ // Memory Tagging Extensions (MTE) Intrinsics
+ if (BuiltinID == AArch64::BI__builtin_arm_irg ||
+ BuiltinID == AArch64::BI__builtin_arm_addg ||
+ BuiltinID == AArch64::BI__builtin_arm_gmi ||
+ BuiltinID == AArch64::BI__builtin_arm_ldg ||
+ BuiltinID == AArch64::BI__builtin_arm_stg ||
+ BuiltinID == AArch64::BI__builtin_arm_subp) {
+ return SemaBuiltinARMMemoryTaggingCall(BuiltinID, TheCall);
+ }
+
if (BuiltinID == AArch64::BI__builtin_arm_rsr ||
BuiltinID == AArch64::BI__builtin_arm_rsrp ||
BuiltinID == AArch64::BI__builtin_arm_wsr ||
@@ -2620,8 +2715,7 @@ bool Sema::CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall) {
const TargetInfo &TI = Context.getTargetInfo();
const BuiltinAndString *FC =
- std::lower_bound(std::begin(ValidCPU), std::end(ValidCPU), BuiltinID,
- LowerBoundCmp);
+ llvm::lower_bound(ValidCPU, BuiltinID, LowerBoundCmp);
if (FC != std::end(ValidCPU) && FC->BuiltinID == BuiltinID) {
const TargetOptions &Opts = TI.getTargetOpts();
StringRef CPU = Opts.CPU;
@@ -2637,8 +2731,7 @@ bool Sema::CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall) {
}
const BuiltinAndString *FH =
- std::lower_bound(std::begin(ValidHVX), std::end(ValidHVX), BuiltinID,
- LowerBoundCmp);
+ llvm::lower_bound(ValidHVX, BuiltinID, LowerBoundCmp);
if (FH != std::end(ValidHVX) && FH->BuiltinID == BuiltinID) {
if (!TI.hasFeature("hvx"))
return Diag(TheCall->getBeginLoc(),
@@ -2867,11 +2960,8 @@ bool Sema::CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) {
true);
(void)SortOnce;
- const BuiltinInfo *F =
- std::lower_bound(std::begin(Infos), std::end(Infos), BuiltinID,
- [](const BuiltinInfo &BI, unsigned BuiltinID) {
- return BI.BuiltinID < BuiltinID;
- });
+ const BuiltinInfo *F = llvm::partition_point(
+ Infos, [=](const BuiltinInfo &BI) { return BI.BuiltinID < BuiltinID; });
if (F == std::end(Infos) || F->BuiltinID != BuiltinID)
return false;
@@ -2955,6 +3045,8 @@ bool Sema::CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
// These intrinsics take an unsigned 5 bit immediate.
// The first block of intrinsics actually have an unsigned 5 bit field,
// not a df/n field.
+ case Mips::BI__builtin_msa_cfcmsa:
+ case Mips::BI__builtin_msa_ctcmsa: i = 0; l = 0; u = 31; break;
case Mips::BI__builtin_msa_clei_u_b:
case Mips::BI__builtin_msa_clei_u_h:
case Mips::BI__builtin_msa_clei_u_w:
@@ -3201,6 +3293,8 @@ bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID,
case SystemZ::BI__builtin_s390_vfmaxsb:
case SystemZ::BI__builtin_s390_vfmindb:
case SystemZ::BI__builtin_s390_vfmaxdb: i = 2; l = 0; u = 15; break;
+ case SystemZ::BI__builtin_s390_vsld: i = 2; l = 0; u = 7; break;
+ case SystemZ::BI__builtin_s390_vsrd: i = 2; l = 0; u = 7; break;
}
return SemaBuiltinConstantArgRange(TheCall, i, l, u);
}
@@ -3299,6 +3393,8 @@ bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) {
case X86::BI__builtin_ia32_cvtss2sd_round_mask:
case X86::BI__builtin_ia32_getexpsd128_round_mask:
case X86::BI__builtin_ia32_getexpss128_round_mask:
+ case X86::BI__builtin_ia32_getmantpd512_mask:
+ case X86::BI__builtin_ia32_getmantps512_mask:
case X86::BI__builtin_ia32_maxsd_round_mask:
case X86::BI__builtin_ia32_maxss_round_mask:
case X86::BI__builtin_ia32_minsd_round_mask:
@@ -3321,6 +3417,8 @@ bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) {
case X86::BI__builtin_ia32_fixupimmsd_maskz:
case X86::BI__builtin_ia32_fixupimmss_mask:
case X86::BI__builtin_ia32_fixupimmss_maskz:
+ case X86::BI__builtin_ia32_getmantsd_round_mask:
+ case X86::BI__builtin_ia32_getmantss_round_mask:
case X86::BI__builtin_ia32_rangepd512_mask:
case X86::BI__builtin_ia32_rangeps512_mask:
case X86::BI__builtin_ia32_rangesd128_round_mask:
@@ -3364,9 +3462,13 @@ bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) {
case X86::BI__builtin_ia32_cvtdq2ps512_mask:
case X86::BI__builtin_ia32_cvtudq2ps512_mask:
case X86::BI__builtin_ia32_cvtpd2ps512_mask:
+ case X86::BI__builtin_ia32_cvtpd2dq512_mask:
case X86::BI__builtin_ia32_cvtpd2qq512_mask:
+ case X86::BI__builtin_ia32_cvtpd2udq512_mask:
case X86::BI__builtin_ia32_cvtpd2uqq512_mask:
+ case X86::BI__builtin_ia32_cvtps2dq512_mask:
case X86::BI__builtin_ia32_cvtps2qq512_mask:
+ case X86::BI__builtin_ia32_cvtps2udq512_mask:
case X86::BI__builtin_ia32_cvtps2uqq512_mask:
case X86::BI__builtin_ia32_cvtqq2pd512_mask:
case X86::BI__builtin_ia32_cvtqq2ps512_mask:
@@ -3387,8 +3489,6 @@ bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) {
case X86::BI__builtin_ia32_scalefps512_mask:
case X86::BI__builtin_ia32_scalefsd_round_mask:
case X86::BI__builtin_ia32_scalefss_round_mask:
- case X86::BI__builtin_ia32_getmantpd512_mask:
- case X86::BI__builtin_ia32_getmantps512_mask:
case X86::BI__builtin_ia32_cvtsd2ss_round_mask:
case X86::BI__builtin_ia32_sqrtsd_round_mask:
case X86::BI__builtin_ia32_sqrtss_round_mask:
@@ -3417,11 +3517,6 @@ bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) {
ArgNum = 4;
HasRC = true;
break;
- case X86::BI__builtin_ia32_getmantsd_round_mask:
- case X86::BI__builtin_ia32_getmantss_round_mask:
- ArgNum = 5;
- HasRC = true;
- break;
}
llvm::APSInt Result;
@@ -3906,6 +4001,8 @@ bool Sema::CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
case X86::BI__builtin_ia32_scatterpfqps:
i = 4; l = 2; u = 3;
break;
+ case X86::BI__builtin_ia32_reducesd_mask:
+ case X86::BI__builtin_ia32_reducess_mask:
case X86::BI__builtin_ia32_rndscalesd_round_mask:
case X86::BI__builtin_ia32_rndscaless_round_mask:
i = 4; l = 0; u = 255;
@@ -3975,7 +4072,8 @@ static void CheckNonNullArgument(Sema &S,
SourceLocation CallSiteLoc) {
if (CheckNonNullExpr(S, ArgExpr))
S.DiagRuntimeBehavior(CallSiteLoc, ArgExpr,
- S.PDiag(diag::warn_null_arg) << ArgExpr->getSourceRange());
+ S.PDiag(diag::warn_null_arg)
+ << ArgExpr->getSourceRange());
}
bool Sema::GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx) {
@@ -4045,6 +4143,9 @@ static void CheckNonNullArguments(Sema &S,
SourceLocation CallSiteLoc) {
assert((FDecl || Proto) && "Need a function declaration or prototype");
+ // Already checked by by constant evaluator.
+ if (S.isConstantEvaluated())
+ return;
// Check the attributes attached to the method/function itself.
llvm::SmallBitVector NonNullArgs;
if (FDecl) {
@@ -4802,7 +4903,7 @@ static bool checkBuiltinArgument(Sema &S, CallExpr *E, unsigned ArgIndex) {
/// We have a call to a function like __sync_fetch_and_add, which is an
/// overloaded function based on the pointer type of its first argument.
-/// The main ActOnCallExpr routines have already promoted the types of
+/// The main BuildCallExpr routines have already promoted the types of
/// arguments because all of these calls are prototyped as void(...).
///
/// This function goes through and does final semantic checking for these
@@ -5149,15 +5250,10 @@ Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) {
}
// Create a new DeclRefExpr to refer to the new decl.
- DeclRefExpr* NewDRE = DeclRefExpr::Create(
- Context,
- DRE->getQualifierLoc(),
- SourceLocation(),
- NewBuiltinDecl,
- /*enclosing*/ false,
- DRE->getLocation(),
- Context.BuiltinFnTy,
- DRE->getValueKind());
+ DeclRefExpr *NewDRE = DeclRefExpr::Create(
+ Context, DRE->getQualifierLoc(), SourceLocation(), NewBuiltinDecl,
+ /*enclosing*/ false, DRE->getLocation(), Context.BuiltinFnTy,
+ DRE->getValueKind(), nullptr, nullptr, DRE->isNonOdrUse());
// Set the callee in the CallExpr.
// FIXME: This loses syntactic information.
@@ -5980,6 +6076,8 @@ bool Sema::SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
/// TheCall is a constant expression in the range [Low, High].
bool Sema::SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum,
int Low, int High, bool RangeIsError) {
+ if (isConstantEvaluated())
+ return false;
llvm::APSInt Result;
// We can't check the value of a dependent argument.
@@ -6029,6 +6127,160 @@ bool Sema::SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
return false;
}
+/// SemaBuiltinARMMemoryTaggingCall - Handle calls of memory tagging extensions
+bool Sema::SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall) {
+ if (BuiltinID == AArch64::BI__builtin_arm_irg) {
+ if (checkArgCount(*this, TheCall, 2))
+ return true;
+ Expr *Arg0 = TheCall->getArg(0);
+ Expr *Arg1 = TheCall->getArg(1);
+
+ ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0);
+ if (FirstArg.isInvalid())
+ return true;
+ QualType FirstArgType = FirstArg.get()->getType();
+ if (!FirstArgType->isAnyPointerType())
+ return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer)
+ << "first" << FirstArgType << Arg0->getSourceRange();
+ TheCall->setArg(0, FirstArg.get());
+
+ ExprResult SecArg = DefaultLvalueConversion(Arg1);
+ if (SecArg.isInvalid())
+ return true;
+ QualType SecArgType = SecArg.get()->getType();
+ if (!SecArgType->isIntegerType())
+ return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer)
+ << "second" << SecArgType << Arg1->getSourceRange();
+
+ // Derive the return type from the pointer argument.
+ TheCall->setType(FirstArgType);
+ return false;
+ }
+
+ if (BuiltinID == AArch64::BI__builtin_arm_addg) {
+ if (checkArgCount(*this, TheCall, 2))
+ return true;
+
+ Expr *Arg0 = TheCall->getArg(0);
+ ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0);
+ if (FirstArg.isInvalid())
+ return true;
+ QualType FirstArgType = FirstArg.get()->getType();
+ if (!FirstArgType->isAnyPointerType())
+ return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer)
+ << "first" << FirstArgType << Arg0->getSourceRange();
+ TheCall->setArg(0, FirstArg.get());
+
+ // Derive the return type from the pointer argument.
+ TheCall->setType(FirstArgType);
+
+ // Second arg must be an constant in range [0,15]
+ return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15);
+ }
+
+ if (BuiltinID == AArch64::BI__builtin_arm_gmi) {
+ if (checkArgCount(*this, TheCall, 2))
+ return true;
+ Expr *Arg0 = TheCall->getArg(0);
+ Expr *Arg1 = TheCall->getArg(1);
+
+ ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0);
+ if (FirstArg.isInvalid())
+ return true;
+ QualType FirstArgType = FirstArg.get()->getType();
+ if (!FirstArgType->isAnyPointerType())
+ return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer)
+ << "first" << FirstArgType << Arg0->getSourceRange();
+
+ QualType SecArgType = Arg1->getType();
+ if (!SecArgType->isIntegerType())
+ return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer)
+ << "second" << SecArgType << Arg1->getSourceRange();
+ TheCall->setType(Context.IntTy);
+ return false;
+ }
+
+ if (BuiltinID == AArch64::BI__builtin_arm_ldg ||
+ BuiltinID == AArch64::BI__builtin_arm_stg) {
+ if (checkArgCount(*this, TheCall, 1))
+ return true;
+ Expr *Arg0 = TheCall->getArg(0);
+ ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0);
+ if (FirstArg.isInvalid())
+ return true;
+
+ QualType FirstArgType = FirstArg.get()->getType();
+ if (!FirstArgType->isAnyPointerType())
+ return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer)
+ << "first" << FirstArgType << Arg0->getSourceRange();
+ TheCall->setArg(0, FirstArg.get());
+
+ // Derive the return type from the pointer argument.
+ if (BuiltinID == AArch64::BI__builtin_arm_ldg)
+ TheCall->setType(FirstArgType);
+ return false;
+ }
+
+ if (BuiltinID == AArch64::BI__builtin_arm_subp) {
+ Expr *ArgA = TheCall->getArg(0);
+ Expr *ArgB = TheCall->getArg(1);
+
+ ExprResult ArgExprA = DefaultFunctionArrayLvalueConversion(ArgA);
+ ExprResult ArgExprB = DefaultFunctionArrayLvalueConversion(ArgB);
+
+ if (ArgExprA.isInvalid() || ArgExprB.isInvalid())
+ return true;
+
+ QualType ArgTypeA = ArgExprA.get()->getType();
+ QualType ArgTypeB = ArgExprB.get()->getType();
+
+ auto isNull = [&] (Expr *E) -> bool {
+ return E->isNullPointerConstant(
+ Context, Expr::NPC_ValueDependentIsNotNull); };
+
+ // argument should be either a pointer or null
+ if (!ArgTypeA->isAnyPointerType() && !isNull(ArgA))
+ return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer)
+ << "first" << ArgTypeA << ArgA->getSourceRange();
+
+ if (!ArgTypeB->isAnyPointerType() && !isNull(ArgB))
+ return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer)
+ << "second" << ArgTypeB << ArgB->getSourceRange();
+
+ // Ensure Pointee types are compatible
+ if (ArgTypeA->isAnyPointerType() && !isNull(ArgA) &&
+ ArgTypeB->isAnyPointerType() && !isNull(ArgB)) {
+ QualType pointeeA = ArgTypeA->getPointeeType();
+ QualType pointeeB = ArgTypeB->getPointeeType();
+ if (!Context.typesAreCompatible(
+ Context.getCanonicalType(pointeeA).getUnqualifiedType(),
+ Context.getCanonicalType(pointeeB).getUnqualifiedType())) {
+ return Diag(TheCall->getBeginLoc(), diag::err_typecheck_sub_ptr_compatible)
+ << ArgTypeA << ArgTypeB << ArgA->getSourceRange()
+ << ArgB->getSourceRange();
+ }
+ }
+
+ // at least one argument should be pointer type
+ if (!ArgTypeA->isAnyPointerType() && !ArgTypeB->isAnyPointerType())
+ return Diag(TheCall->getBeginLoc(), diag::err_memtag_any2arg_pointer)
+ << ArgTypeA << ArgTypeB << ArgA->getSourceRange();
+
+ if (isNull(ArgA)) // adopt type of the other pointer
+ ArgExprA = ImpCastExprToType(ArgExprA.get(), ArgTypeB, CK_NullToPointer);
+
+ if (isNull(ArgB))
+ ArgExprB = ImpCastExprToType(ArgExprB.get(), ArgTypeA, CK_NullToPointer);
+
+ TheCall->setArg(0, ArgExprA.get());
+ TheCall->setArg(1, ArgExprB.get());
+ TheCall->setType(Context.LongLongTy);
+ return false;
+ }
+ assert(false && "Unhandled ARM MTE intrinsic");
+ return true;
+}
+
/// SemaBuiltinARMSpecialReg - Handle a check if argument ArgNum of CallExpr
/// TheCall is an ARM/AArch64 special register string literal.
bool Sema::SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
@@ -6332,6 +6584,8 @@ checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args,
llvm::SmallBitVector &CheckedVarArgs,
UncoveredArgHandler &UncoveredArg,
llvm::APSInt Offset) {
+ if (S.isConstantEvaluated())
+ return SLCT_NotALiteral;
tryAgain:
assert(Offset.isSigned() && "invalid offset");
@@ -6361,7 +6615,8 @@ checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args,
bool CheckLeft = true, CheckRight = true;
bool Cond;
- if (C->getCond()->EvaluateAsBooleanCondition(Cond, S.getASTContext())) {
+ if (C->getCond()->EvaluateAsBooleanCondition(Cond, S.getASTContext(),
+ S.isConstantEvaluated())) {
if (Cond)
CheckRight = false;
else
@@ -6562,8 +6817,10 @@ checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args,
if (BinOp->isAdditiveOp()) {
Expr::EvalResult LResult, RResult;
- bool LIsInt = BinOp->getLHS()->EvaluateAsInt(LResult, S.Context);
- bool RIsInt = BinOp->getRHS()->EvaluateAsInt(RResult, S.Context);
+ bool LIsInt = BinOp->getLHS()->EvaluateAsInt(
+ LResult, S.Context, Expr::SE_NoSideEffects, S.isConstantEvaluated());
+ bool RIsInt = BinOp->getRHS()->EvaluateAsInt(
+ RResult, S.Context, Expr::SE_NoSideEffects, S.isConstantEvaluated());
if (LIsInt != RIsInt) {
BinaryOperatorKind BinOpKind = BinOp->getOpcode();
@@ -6589,7 +6846,9 @@ checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args,
auto ASE = dyn_cast<ArraySubscriptExpr>(UnaOp->getSubExpr());
if (UnaOp->getOpcode() == UO_AddrOf && ASE) {
Expr::EvalResult IndexResult;
- if (ASE->getRHS()->EvaluateAsInt(IndexResult, S.Context)) {
+ if (ASE->getRHS()->EvaluateAsInt(IndexResult, S.Context,
+ Expr::SE_NoSideEffects,
+ S.isConstantEvaluated())) {
sumOffsets(Offset, IndexResult.Val.getInt(), BO_Add,
/*RHS is int*/ true);
E = ASE->getBase();
@@ -7651,7 +7910,8 @@ CheckPrintfHandler::HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier
startSpecifier, specifierLen);
// Check the length modifier is valid with the given conversion specifier.
- if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo()))
+ if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(),
+ S.getLangOpts()))
HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen,
diag::warn_format_nonsensical_length);
else if (!FS.hasStandardLengthModifier())
@@ -8155,7 +8415,8 @@ bool CheckScanfHandler::HandleScanfSpecifier(
}
// Check the length modifier is valid with the given conversion specifier.
- if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo()))
+ if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(),
+ S.getLangOpts()))
HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen,
diag::warn_format_nonsensical_length);
else if (!FS.hasStandardLengthModifier())
@@ -9172,23 +9433,23 @@ void Sema::CheckMemaccessArguments(const CallExpr *Call,
getContainedDynamicClass(PointeeTy, IsContained)) {
unsigned OperationType = 0;
+ const bool IsCmp = BId == Builtin::BImemcmp || BId == Builtin::BIbcmp;
// "overwritten" if we're warning about the destination for any call
// but memcmp; otherwise a verb appropriate to the call.
- if (ArgIdx != 0 || BId == Builtin::BImemcmp) {
+ if (ArgIdx != 0 || IsCmp) {
if (BId == Builtin::BImemcpy)
OperationType = 1;
else if(BId == Builtin::BImemmove)
OperationType = 2;
- else if (BId == Builtin::BImemcmp)
+ else if (IsCmp)
OperationType = 3;
}
- DiagRuntimeBehavior(
- Dest->getExprLoc(), Dest,
- PDiag(diag::warn_dyn_class_memaccess)
- << (BId == Builtin::BImemcmp ? ArgIdx + 2 : ArgIdx)
- << FnName << IsContained << ContainedRD << OperationType
- << Call->getCallee()->getSourceRange());
+ DiagRuntimeBehavior(Dest->getExprLoc(), Dest,
+ PDiag(diag::warn_dyn_class_memaccess)
+ << (IsCmp ? ArgIdx + 2 : ArgIdx) << FnName
+ << IsContained << ContainedRD << OperationType
+ << Call->getCallee()->getSourceRange());
} else if (PointeeTy.hasNonTrivialObjCLifetime() &&
BId != Builtin::BImemset)
DiagRuntimeBehavior(
@@ -9667,12 +9928,13 @@ static QualType GetExprType(const Expr *E) {
/// range of values it might take.
///
/// \param MaxWidth - the width to which the value will be truncated
-static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth) {
+static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth,
+ bool InConstantContext) {
E = E->IgnoreParens();
// Try a full evaluation first.
Expr::EvalResult result;
- if (E->EvaluateAsRValue(result, C))
+ if (E->EvaluateAsRValue(result, C, InConstantContext))
return GetValueRange(C, result.Val, GetExprType(E), MaxWidth);
// I think we only want to look through implicit casts here; if the
@@ -9680,7 +9942,7 @@ static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth) {
// being of the new, wider type.
if (const auto *CE = dyn_cast<ImplicitCastExpr>(E)) {
if (CE->getCastKind() == CK_NoOp || CE->getCastKind() == CK_LValueToRValue)
- return GetExprRange(C, CE->getSubExpr(), MaxWidth);
+ return GetExprRange(C, CE->getSubExpr(), MaxWidth, InConstantContext);
IntRange OutputTypeRange = IntRange::forValueOfType(C, GetExprType(CE));
@@ -9691,9 +9953,9 @@ static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth) {
if (!isIntegerCast)
return OutputTypeRange;
- IntRange SubRange
- = GetExprRange(C, CE->getSubExpr(),
- std::min(MaxWidth, OutputTypeRange.Width));
+ IntRange SubRange = GetExprRange(C, CE->getSubExpr(),
+ std::min(MaxWidth, OutputTypeRange.Width),
+ InConstantContext);
// Bail out if the subexpr's range is as wide as the cast type.
if (SubRange.Width >= OutputTypeRange.Width)
@@ -9709,13 +9971,15 @@ static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth) {
// If we can fold the condition, just take that operand.
bool CondResult;
if (CO->getCond()->EvaluateAsBooleanCondition(CondResult, C))
- return GetExprRange(C, CondResult ? CO->getTrueExpr()
- : CO->getFalseExpr(),
- MaxWidth);
+ return GetExprRange(C,
+ CondResult ? CO->getTrueExpr() : CO->getFalseExpr(),
+ MaxWidth, InConstantContext);
// Otherwise, conservatively merge.
- IntRange L = GetExprRange(C, CO->getTrueExpr(), MaxWidth);
- IntRange R = GetExprRange(C, CO->getFalseExpr(), MaxWidth);
+ IntRange L =
+ GetExprRange(C, CO->getTrueExpr(), MaxWidth, InConstantContext);
+ IntRange R =
+ GetExprRange(C, CO->getFalseExpr(), MaxWidth, InConstantContext);
return IntRange::join(L, R);
}
@@ -9751,7 +10015,7 @@ static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth) {
// been coerced to the LHS type.
case BO_Assign:
// TODO: bitfields?
- return GetExprRange(C, BO->getRHS(), MaxWidth);
+ return GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext);
// Operations with opaque sources are black-listed.
case BO_PtrMemD:
@@ -9761,8 +10025,9 @@ static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth) {
// Bitwise-and uses the *infinum* of the two source ranges.
case BO_And:
case BO_AndAssign:
- return IntRange::meet(GetExprRange(C, BO->getLHS(), MaxWidth),
- GetExprRange(C, BO->getRHS(), MaxWidth));
+ return IntRange::meet(
+ GetExprRange(C, BO->getLHS(), MaxWidth, InConstantContext),
+ GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext));
// Left shift gets black-listed based on a judgement call.
case BO_Shl:
@@ -9783,7 +10048,7 @@ static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth) {
// Right shift by a constant can narrow its left argument.
case BO_Shr:
case BO_ShrAssign: {
- IntRange L = GetExprRange(C, BO->getLHS(), MaxWidth);
+ IntRange L = GetExprRange(C, BO->getLHS(), MaxWidth, InConstantContext);
// If the shift amount is a positive constant, drop the width by
// that much.
@@ -9802,7 +10067,7 @@ static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth) {
// Comma acts as its right operand.
case BO_Comma:
- return GetExprRange(C, BO->getRHS(), MaxWidth);
+ return GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext);
// Black-list pointer subtractions.
case BO_Sub:
@@ -9815,7 +10080,7 @@ static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth) {
case BO_Div: {
// Don't 'pre-truncate' the operands.
unsigned opWidth = C.getIntWidth(GetExprType(E));
- IntRange L = GetExprRange(C, BO->getLHS(), opWidth);
+ IntRange L = GetExprRange(C, BO->getLHS(), opWidth, InConstantContext);
// If the divisor is constant, use that.
llvm::APSInt divisor;
@@ -9829,7 +10094,7 @@ static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth) {
}
// Otherwise, just use the LHS's width.
- IntRange R = GetExprRange(C, BO->getRHS(), opWidth);
+ IntRange R = GetExprRange(C, BO->getRHS(), opWidth, InConstantContext);
return IntRange(L.Width, L.NonNegative && R.NonNegative);
}
@@ -9838,8 +10103,8 @@ static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth) {
case BO_Rem: {
// Don't 'pre-truncate' the operands.
unsigned opWidth = C.getIntWidth(GetExprType(E));
- IntRange L = GetExprRange(C, BO->getLHS(), opWidth);
- IntRange R = GetExprRange(C, BO->getRHS(), opWidth);
+ IntRange L = GetExprRange(C, BO->getLHS(), opWidth, InConstantContext);
+ IntRange R = GetExprRange(C, BO->getRHS(), opWidth, InConstantContext);
IntRange meet = IntRange::meet(L, R);
meet.Width = std::min(meet.Width, MaxWidth);
@@ -9856,8 +10121,8 @@ static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth) {
// The default case is to treat the operation as if it were closed
// on the narrowest type that encompasses both operands.
- IntRange L = GetExprRange(C, BO->getLHS(), MaxWidth);
- IntRange R = GetExprRange(C, BO->getRHS(), MaxWidth);
+ IntRange L = GetExprRange(C, BO->getLHS(), MaxWidth, InConstantContext);
+ IntRange R = GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext);
return IntRange::join(L, R);
}
@@ -9873,12 +10138,12 @@ static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth) {
return IntRange::forValueOfType(C, GetExprType(E));
default:
- return GetExprRange(C, UO->getSubExpr(), MaxWidth);
+ return GetExprRange(C, UO->getSubExpr(), MaxWidth, InConstantContext);
}
}
if (const auto *OVE = dyn_cast<OpaqueValueExpr>(E))
- return GetExprRange(C, OVE->getSourceExpr(), MaxWidth);
+ return GetExprRange(C, OVE->getSourceExpr(), MaxWidth, InConstantContext);
if (const auto *BitField = E->getSourceBitField())
return IntRange(BitField->getBitWidthValue(C),
@@ -9887,8 +10152,9 @@ static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth) {
return IntRange::forValueOfType(C, GetExprType(E));
}
-static IntRange GetExprRange(ASTContext &C, const Expr *E) {
- return GetExprRange(C, E, C.getIntWidth(GetExprType(E)));
+static IntRange GetExprRange(ASTContext &C, const Expr *E,
+ bool InConstantContext) {
+ return GetExprRange(C, E, C.getIntWidth(GetExprType(E)), InConstantContext);
}
/// Checks whether the given value, which currently has the given
@@ -9938,9 +10204,16 @@ static bool IsEnumConstOrFromMacro(Sema &S, Expr *E) {
if (isa<EnumConstantDecl>(DR->getDecl()))
return true;
- // Suppress cases where the '0' value is expanded from a macro.
- if (E->getBeginLoc().isMacroID())
- return true;
+ // Suppress cases where the value is expanded from a macro, unless that macro
+ // is how a language represents a boolean literal. This is the case in both C
+ // and Objective-C.
+ SourceLocation BeginLoc = E->getBeginLoc();
+ if (BeginLoc.isMacroID()) {
+ StringRef MacroName = Lexer::getImmediateMacroName(
+ BeginLoc, S.getSourceManager(), S.getLangOpts());
+ return MacroName != "YES" && MacroName != "NO" &&
+ MacroName != "true" && MacroName != "false";
+ }
return false;
}
@@ -10132,11 +10405,17 @@ static bool CheckTautologicalComparison(Sema &S, BinaryOperator *E,
OtherT = AT->getValueType();
IntRange OtherRange = IntRange::forValueOfType(S.Context, OtherT);
+ // Special case for ObjC BOOL on targets where its a typedef for a signed char
+ // (Namely, macOS).
+ bool IsObjCSignedCharBool = S.getLangOpts().ObjC &&
+ S.NSAPIObj->isObjCBOOLType(OtherT) &&
+ OtherT->isSpecificBuiltinType(BuiltinType::SChar);
+
// Whether we're treating Other as being a bool because of the form of
// expression despite it having another type (typically 'int' in C).
bool OtherIsBooleanDespiteType =
!OtherT->isBooleanType() && Other->isKnownToHaveBooleanValue();
- if (OtherIsBooleanDespiteType)
+ if (OtherIsBooleanDespiteType || IsObjCSignedCharBool)
OtherRange = IntRange::forBoolType();
// Determine the promoted range of the other type and see if a comparison of
@@ -10167,22 +10446,34 @@ static bool CheckTautologicalComparison(Sema &S, BinaryOperator *E,
// Should be enough for uint128 (39 decimal digits)
SmallString<64> PrettySourceValue;
llvm::raw_svector_ostream OS(PrettySourceValue);
- if (ED)
+ if (ED) {
OS << '\'' << *ED << "' (" << Value << ")";
- else
+ } else if (auto *BL = dyn_cast<ObjCBoolLiteralExpr>(
+ Constant->IgnoreParenImpCasts())) {
+ OS << (BL->getValue() ? "YES" : "NO");
+ } else {
OS << Value;
+ }
+
+ if (IsObjCSignedCharBool) {
+ S.DiagRuntimeBehavior(E->getOperatorLoc(), E,
+ S.PDiag(diag::warn_tautological_compare_objc_bool)
+ << OS.str() << *Result);
+ return true;
+ }
// FIXME: We use a somewhat different formatting for the in-range cases and
// cases involving boolean values for historical reasons. We should pick a
// consistent way of presenting these diagnostics.
if (!InRange || Other->isKnownToHaveBooleanValue()) {
+
S.DiagRuntimeBehavior(
- E->getOperatorLoc(), E,
- S.PDiag(!InRange ? diag::warn_out_of_range_compare
- : diag::warn_tautological_bool_compare)
- << OS.str() << classifyConstantValue(Constant)
- << OtherT << OtherIsBooleanDespiteType << *Result
- << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange());
+ E->getOperatorLoc(), E,
+ S.PDiag(!InRange ? diag::warn_out_of_range_compare
+ : diag::warn_tautological_bool_compare)
+ << OS.str() << classifyConstantValue(Constant) << OtherT
+ << OtherIsBooleanDespiteType << *Result
+ << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange());
} else {
unsigned Diag = (isKnownToHaveUnsignedValue(OriginalOther) && Value == 0)
? (HasEnumType(OriginalOther)
@@ -10286,7 +10577,8 @@ static void AnalyzeComparison(Sema &S, BinaryOperator *E) {
}
// Otherwise, calculate the effective range of the signed operand.
- IntRange signedRange = GetExprRange(S.Context, signedOperand);
+ IntRange signedRange =
+ GetExprRange(S.Context, signedOperand, S.isConstantEvaluated());
// Go ahead and analyze implicit conversions in the operands. Note
// that we skip the implicit conversions on both sides.
@@ -10303,7 +10595,8 @@ static void AnalyzeComparison(Sema &S, BinaryOperator *E) {
// change the result of the comparison.
if (E->isEqualityOp()) {
unsigned comparisonWidth = S.Context.getIntWidth(T);
- IntRange unsignedRange = GetExprRange(S.Context, unsignedOperand);
+ IntRange unsignedRange =
+ GetExprRange(S.Context, unsignedOperand, S.isConstantEvaluated());
// We should never be unable to prove that the unsigned operand is
// non-negative.
@@ -10314,9 +10607,9 @@ static void AnalyzeComparison(Sema &S, BinaryOperator *E) {
}
S.DiagRuntimeBehavior(E->getOperatorLoc(), E,
- S.PDiag(diag::warn_mixed_sign_comparison)
- << LHS->getType() << RHS->getType()
- << LHS->getSourceRange() << RHS->getSourceRange());
+ S.PDiag(diag::warn_mixed_sign_comparison)
+ << LHS->getType() << RHS->getType()
+ << LHS->getSourceRange() << RHS->getSourceRange());
}
/// Analyzes an attempt to assign the given value to a bitfield.
@@ -10484,8 +10777,8 @@ static void DiagnoseImpCast(Sema &S, Expr *E, QualType SourceType, QualType T,
if (pruneControlFlow) {
S.DiagRuntimeBehavior(E->getExprLoc(), E,
S.PDiag(diag)
- << SourceType << T << E->getSourceRange()
- << SourceRange(CContext));
+ << SourceType << T << E->getSourceRange()
+ << SourceRange(CContext));
return;
}
S.Diag(E->getExprLoc(), diag)
@@ -10622,16 +10915,18 @@ static void AnalyzeCompoundAssignment(Sema &S, BinaryOperator *E) {
// The below checks assume source is floating point.
if (!ResultBT || !RBT || !RBT->isFloatingPoint()) return;
- // If source is floating point but target is not.
+ // If source is floating point but target is an integer.
+ if (ResultBT->isInteger())
+ return DiagnoseImpCast(S, E, E->getRHS()->getType(), E->getLHS()->getType(),
+ E->getExprLoc(), diag::warn_impcast_float_integer);
+
if (!ResultBT->isFloatingPoint())
- return DiagnoseFloatingImpCast(S, E, E->getRHS()->getType(),
- E->getExprLoc());
-
- // If both source and target are floating points.
- // Builtin FP kinds are ordered by increasing FP rank.
- if (ResultBT->getKind() < RBT->getKind() &&
- // We don't want to warn for system macro.
- !S.SourceMgr.isInSystemMacro(E->getOperatorLoc()))
+ return;
+
+ // If both source and target are floating points, warn about losing precision.
+ int Order = S.getASTContext().getFloatingTypeSemanticOrder(
+ QualType(ResultBT, 0), QualType(RBT, 0));
+ if (Order < 0 && !S.SourceMgr.isInSystemMacro(E->getOperatorLoc()))
// warn about dropping FP rank.
DiagnoseImpCast(S, E->getRHS(), E->getLHS()->getType(), E->getOperatorLoc(),
diag::warn_impcast_float_result_precision);
@@ -10856,6 +11151,11 @@ static bool isSameWidthConstantConversion(Sema &S, Expr *E, QualType T,
return true;
}
+static bool isObjCSignedCharBool(Sema &S, QualType Ty) {
+ return Ty->isSpecificBuiltinType(BuiltinType::SChar) &&
+ S.getLangOpts().ObjC && S.NSAPIObj->isObjCBOOLType(Ty);
+}
+
static void
CheckImplicitConversion(Sema &S, Expr *E, QualType T, SourceLocation CC,
bool *ICContext = nullptr) {
@@ -10899,6 +11199,29 @@ CheckImplicitConversion(Sema &S, Expr *E, QualType T, SourceLocation CC,
}
}
+ // If the we're converting a constant to an ObjC BOOL on a platform where BOOL
+ // is a typedef for signed char (macOS), then that constant value has to be 1
+ // or 0.
+ if (isObjCSignedCharBool(S, T) && Source->isIntegralType(S.Context)) {
+ Expr::EvalResult Result;
+ if (E->EvaluateAsInt(Result, S.getASTContext(),
+ Expr::SE_AllowSideEffects) &&
+ Result.Val.getInt() != 1 && Result.Val.getInt() != 0) {
+ auto Builder = S.Diag(CC, diag::warn_impcast_constant_int_to_objc_bool)
+ << Result.Val.getInt().toString(10);
+ Expr *Ignored = E->IgnoreImplicit();
+ bool NeedsParens = isa<AbstractConditionalOperator>(Ignored) ||
+ isa<BinaryOperator>(Ignored) ||
+ isa<CXXOperatorCallExpr>(Ignored);
+ SourceLocation EndLoc = S.getLocForEndOfToken(E->getEndLoc());
+ if (NeedsParens)
+ Builder << FixItHint::CreateInsertion(E->getBeginLoc(), "(")
+ << FixItHint::CreateInsertion(EndLoc, ")");
+ Builder << FixItHint::CreateInsertion(EndLoc, " ? YES : NO");
+ return;
+ }
+ }
+
// Check implicit casts from Objective-C collection literals to specialized
// collection types, e.g., NSArray<NSString *> *.
if (auto *ArrayLiteral = dyn_cast<ObjCArrayLiteral>(E))
@@ -10950,8 +11273,9 @@ CheckImplicitConversion(Sema &S, Expr *E, QualType T, SourceLocation CC,
if (TargetBT && TargetBT->isFloatingPoint()) {
// ...then warn if we're dropping FP rank.
- // Builtin FP kinds are ordered by increasing FP rank.
- if (SourceBT->getKind() > TargetBT->getKind()) {
+ int Order = S.getASTContext().getFloatingTypeSemanticOrder(
+ QualType(SourceBT, 0), QualType(TargetBT, 0));
+ if (Order > 0) {
// Don't warn about float constants that are precisely
// representable in the target type.
Expr::EvalResult result;
@@ -10969,7 +11293,7 @@ CheckImplicitConversion(Sema &S, Expr *E, QualType T, SourceLocation CC,
DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_float_precision);
}
// ... or possibly if we're increasing rank, too
- else if (TargetBT->getKind() > SourceBT->getKind()) {
+ else if (Order < 0) {
if (S.SourceMgr.isInSystemMacro(CC))
return;
@@ -11013,6 +11337,69 @@ CheckImplicitConversion(Sema &S, Expr *E, QualType T, SourceLocation CC,
return;
}
+ // Valid casts involving fixed point types should be accounted for here.
+ if (Source->isFixedPointType()) {
+ if (Target->isUnsaturatedFixedPointType()) {
+ Expr::EvalResult Result;
+ if (E->EvaluateAsFixedPoint(Result, S.Context, Expr::SE_AllowSideEffects,
+ S.isConstantEvaluated())) {
+ APFixedPoint Value = Result.Val.getFixedPoint();
+ APFixedPoint MaxVal = S.Context.getFixedPointMax(T);
+ APFixedPoint MinVal = S.Context.getFixedPointMin(T);
+ if (Value > MaxVal || Value < MinVal) {
+ S.DiagRuntimeBehavior(E->getExprLoc(), E,
+ S.PDiag(diag::warn_impcast_fixed_point_range)
+ << Value.toString() << T
+ << E->getSourceRange()
+ << clang::SourceRange(CC));
+ return;
+ }
+ }
+ } else if (Target->isIntegerType()) {
+ Expr::EvalResult Result;
+ if (!S.isConstantEvaluated() &&
+ E->EvaluateAsFixedPoint(Result, S.Context,
+ Expr::SE_AllowSideEffects)) {
+ APFixedPoint FXResult = Result.Val.getFixedPoint();
+
+ bool Overflowed;
+ llvm::APSInt IntResult = FXResult.convertToInt(
+ S.Context.getIntWidth(T),
+ Target->isSignedIntegerOrEnumerationType(), &Overflowed);
+
+ if (Overflowed) {
+ S.DiagRuntimeBehavior(E->getExprLoc(), E,
+ S.PDiag(diag::warn_impcast_fixed_point_range)
+ << FXResult.toString() << T
+ << E->getSourceRange()
+ << clang::SourceRange(CC));
+ return;
+ }
+ }
+ }
+ } else if (Target->isUnsaturatedFixedPointType()) {
+ if (Source->isIntegerType()) {
+ Expr::EvalResult Result;
+ if (!S.isConstantEvaluated() &&
+ E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects)) {
+ llvm::APSInt Value = Result.Val.getInt();
+
+ bool Overflowed;
+ APFixedPoint IntResult = APFixedPoint::getFromIntValue(
+ Value, S.Context.getFixedPointSemantics(T), &Overflowed);
+
+ if (Overflowed) {
+ S.DiagRuntimeBehavior(E->getExprLoc(), E,
+ S.PDiag(diag::warn_impcast_fixed_point_range)
+ << Value.toString(/*Radix=*/10) << T
+ << E->getSourceRange()
+ << clang::SourceRange(CC));
+ return;
+ }
+ }
+ }
+ }
+
DiagnoseNullConversion(S, E, T, CC);
S.DiscardMisalignedMemberAddress(Target, E);
@@ -11025,14 +11412,15 @@ CheckImplicitConversion(Sema &S, Expr *E, QualType T, SourceLocation CC,
if (Target->isSpecificBuiltinType(BuiltinType::Bool))
return;
- IntRange SourceRange = GetExprRange(S.Context, E);
+ IntRange SourceRange = GetExprRange(S.Context, E, S.isConstantEvaluated());
IntRange TargetRange = IntRange::forTargetOfCanonicalType(S.Context, Target);
if (SourceRange.Width > TargetRange.Width) {
// If the source is a constant, use a default-on diagnostic.
// TODO: this should happen for bitfield stores, too.
Expr::EvalResult Result;
- if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects)) {
+ if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects,
+ S.isConstantEvaluated())) {
llvm::APSInt Value(32);
Value = Result.Val.getInt();
@@ -11042,11 +11430,11 @@ CheckImplicitConversion(Sema &S, Expr *E, QualType T, SourceLocation CC,
std::string PrettySourceValue = Value.toString(10);
std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange);
- S.DiagRuntimeBehavior(E->getExprLoc(), E,
- S.PDiag(diag::warn_impcast_integer_precision_constant)
- << PrettySourceValue << PrettyTargetValue
- << E->getType() << T << E->getSourceRange()
- << clang::SourceRange(CC));
+ S.DiagRuntimeBehavior(
+ E->getExprLoc(), E,
+ S.PDiag(diag::warn_impcast_integer_precision_constant)
+ << PrettySourceValue << PrettyTargetValue << E->getType() << T
+ << E->getSourceRange() << clang::SourceRange(CC));
return;
}
@@ -11464,6 +11852,9 @@ void Sema::DiagnoseAlwaysNonNullPointer(Expr *E,
}
if (const auto *FD = dyn_cast<FunctionDecl>(PV->getDeclContext())) {
+ // Skip function template not specialized yet.
+ if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate)
+ return;
auto ParamIter = llvm::find(FD->parameters(), PV);
assert(ParamIter != FD->param_end());
unsigned ParamNo = std::distance(FD->param_begin(), ParamIter);
@@ -11641,12 +12032,12 @@ class SequenceChecker : public EvaluatedExprVisitor<SequenceChecker> {
class Seq {
friend class SequenceTree;
- unsigned Index = 0;
+ unsigned Index;
explicit Seq(unsigned N) : Index(N) {}
public:
- Seq() = default;
+ Seq() : Index(0) {}
};
SequenceTree() { Values.push_back(Value(0)); }
@@ -11710,19 +12101,19 @@ class SequenceChecker : public EvaluatedExprVisitor<SequenceChecker> {
};
struct Usage {
- Expr *Use = nullptr;
+ Expr *Use;
SequenceTree::Seq Seq;
- Usage() = default;
+ Usage() : Use(nullptr), Seq() {}
};
struct UsageInfo {
Usage Uses[UK_Count];
/// Have we issued a diagnostic for this variable already?
- bool Diagnosed = false;
+ bool Diagnosed;
- UsageInfo() = default;
+ UsageInfo() : Uses(), Diagnosed(false) {}
};
using UsageInfoMap = llvm::SmallDenseMap<Object, UsageInfo, 16>;
@@ -11791,7 +12182,8 @@ class SequenceChecker : public EvaluatedExprVisitor<SequenceChecker> {
bool evaluate(const Expr *E, bool &Result) {
if (!EvalOK || E->isValueDependent())
return false;
- EvalOK = E->EvaluateAsBooleanCondition(Result, Self.SemaRef.Context);
+ EvalOK = E->EvaluateAsBooleanCondition(
+ Result, Self.SemaRef.Context, Self.SemaRef.isConstantEvaluated());
return EvalOK;
}
@@ -11849,10 +12241,11 @@ class SequenceChecker : public EvaluatedExprVisitor<SequenceChecker> {
if (OtherKind == UK_Use)
std::swap(Mod, ModOrUse);
- SemaRef.Diag(Mod->getExprLoc(),
- IsModMod ? diag::warn_unsequenced_mod_mod
- : diag::warn_unsequenced_mod_use)
- << O << SourceRange(ModOrUse->getExprLoc());
+ SemaRef.DiagRuntimeBehavior(
+ Mod->getExprLoc(), {Mod, ModOrUse},
+ SemaRef.PDiag(IsModMod ? diag::warn_unsequenced_mod_mod
+ : diag::warn_unsequenced_mod_use)
+ << O << SourceRange(ModOrUse->getExprLoc()));
UI.Diagnosed = true;
}
@@ -12143,6 +12536,8 @@ void Sema::CheckUnsequencedOperations(Expr *E) {
void Sema::CheckCompletedExpr(Expr *E, SourceLocation CheckLoc,
bool IsConstexpr) {
+ llvm::SaveAndRestore<bool> ConstantContext(
+ isConstantEvaluatedOverride, IsConstexpr || isa<ConstantExpr>(E));
CheckImplicitConversions(E, CheckLoc);
if (!E->isInstantiationDependent())
CheckUnsequencedOperations(E);
@@ -12381,6 +12776,10 @@ static bool IsTailPaddedMemberArray(Sema &S, const llvm::APInt &Size,
void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ArraySubscriptExpr *ASE,
bool AllowOnePastEnd, bool IndexNegated) {
+ // Already diagnosed by the constant evaluator.
+ if (isConstantEvaluated())
+ return;
+
IndexExpr = IndexExpr->IgnoreParenImpCasts();
if (IndexExpr->isValueDependent())
return;
@@ -12395,6 +12794,8 @@ void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
return;
const Type *BaseType = ArrayTy->getElementType().getTypePtr();
+ if (EffectiveType->isDependentType() || BaseType->isDependentType())
+ return;
Expr::EvalResult Result;
if (!IndexExpr->EvaluateAsInt(Result, Context, Expr::SE_AllowSideEffects))
@@ -13516,8 +13917,13 @@ static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2) {
/// \param VD Declaration of an identifier that appears in a type tag.
///
/// \param MagicValue Type tag magic value.
+///
+/// \param isConstantEvaluated wether the evalaution should be performed in
+
+/// constant context.
static bool FindTypeTagExpr(const Expr *TypeExpr, const ASTContext &Ctx,
- const ValueDecl **VD, uint64_t *MagicValue) {
+ const ValueDecl **VD, uint64_t *MagicValue,
+ bool isConstantEvaluated) {
while(true) {
if (!TypeExpr)
return false;
@@ -13555,7 +13961,8 @@ static bool FindTypeTagExpr(const Expr *TypeExpr, const ASTContext &Ctx,
const AbstractConditionalOperator *ACO =
cast<AbstractConditionalOperator>(TypeExpr);
bool Result;
- if (ACO->getCond()->EvaluateAsBooleanCondition(Result, Ctx)) {
+ if (ACO->getCond()->EvaluateAsBooleanCondition(Result, Ctx,
+ isConstantEvaluated)) {
if (Result)
TypeExpr = ACO->getTrueExpr();
else
@@ -13591,14 +13998,17 @@ static bool FindTypeTagExpr(const Expr *TypeExpr, const ASTContext &Ctx,
///
/// \param TypeInfo Information about the corresponding C type.
///
+/// \param isConstantEvaluated wether the evalaution should be performed in
+/// constant context.
+///
/// \returns true if the corresponding C type was found.
static bool GetMatchingCType(
- const IdentifierInfo *ArgumentKind,
- const Expr *TypeExpr, const ASTContext &Ctx,
- const llvm::DenseMap<Sema::TypeTagMagicValue,
- Sema::TypeTagData> *MagicValues,
- bool &FoundWrongKind,
- Sema::TypeTagData &TypeInfo) {
+ const IdentifierInfo *ArgumentKind, const Expr *TypeExpr,
+ const ASTContext &Ctx,
+ const llvm::DenseMap<Sema::TypeTagMagicValue, Sema::TypeTagData>
+ *MagicValues,
+ bool &FoundWrongKind, Sema::TypeTagData &TypeInfo,
+ bool isConstantEvaluated) {
FoundWrongKind = false;
// Variable declaration that has type_tag_for_datatype attribute.
@@ -13606,7 +14016,7 @@ static bool GetMatchingCType(
uint64_t MagicValue;
- if (!FindTypeTagExpr(TypeExpr, Ctx, &VD, &MagicValue))
+ if (!FindTypeTagExpr(TypeExpr, Ctx, &VD, &MagicValue, isConstantEvaluated))
return false;
if (VD) {
@@ -13684,8 +14094,8 @@ void Sema::CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
bool FoundWrongKind;
TypeTagData TypeInfo;
if (!GetMatchingCType(ArgumentKind, TypeTagExpr, Context,
- TypeTagForDatatypeMagicValues.get(),
- FoundWrongKind, TypeInfo)) {
+ TypeTagForDatatypeMagicValues.get(), FoundWrongKind,
+ TypeInfo, isConstantEvaluated())) {
if (FoundWrongKind)
Diag(TypeTagExpr->getExprLoc(),
diag::warn_type_tag_for_datatype_wrong_kind)
@@ -13787,8 +14197,7 @@ void Sema::DiscardMisalignedMemberAddress(const Type *T, Expr *E) {
cast<UnaryOperator>(E)->getOpcode() == UO_AddrOf) {
auto *Op = cast<UnaryOperator>(E)->getSubExpr()->IgnoreParens();
if (isa<MemberExpr>(Op)) {
- auto MA = std::find(MisalignedMembers.begin(), MisalignedMembers.end(),
- MisalignedMember(Op));
+ auto MA = llvm::find(MisalignedMembers, MisalignedMember(Op));
if (MA != MisalignedMembers.end() &&
(T->isIntegerType() ||
(T->isPointerType() && (T->getPointeeType()->isIncompleteType() ||
diff --git a/lib/Sema/SemaCodeComplete.cpp b/lib/Sema/SemaCodeComplete.cpp
index d9f007a46da5..e4bbee86e350 100644
--- a/lib/Sema/SemaCodeComplete.cpp
+++ b/lib/Sema/SemaCodeComplete.cpp
@@ -1,9 +1,8 @@
//===---------------- SemaCodeComplete.cpp - Code Completion ----*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -17,7 +16,9 @@
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/QualTypeNames.h"
+#include "clang/AST/Type.h"
#include "clang/Basic/CharInfo.h"
+#include "clang/Basic/Specifiers.h"
#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/MacroInfo.h"
#include "clang/Lex/Preprocessor.h"
@@ -38,6 +39,7 @@
#include "llvm/Support/Path.h"
#include <list>
#include <map>
+#include <string>
#include <vector>
using namespace clang;
@@ -82,6 +84,15 @@ private:
public:
ShadowMapEntry() : DeclOrVector(), SingleDeclIndex(0) {}
+ ShadowMapEntry(const ShadowMapEntry &) = delete;
+ ShadowMapEntry(ShadowMapEntry &&Move) { *this = std::move(Move); }
+ ShadowMapEntry &operator=(const ShadowMapEntry &) = delete;
+ ShadowMapEntry &operator=(ShadowMapEntry &&Move) {
+ SingleDeclIndex = Move.SingleDeclIndex;
+ DeclOrVector = Move.DeclOrVector;
+ Move.DeclOrVector = nullptr;
+ return *this;
+ }
void Add(const NamedDecl *ND, unsigned Index) {
if (DeclOrVector.isNull()) {
@@ -105,7 +116,7 @@ private:
DeclIndexPair(ND, Index));
}
- void Destroy() {
+ ~ShadowMapEntry() {
if (DeclIndexPairVector *Vec =
DeclOrVector.dyn_cast<DeclIndexPairVector *>()) {
delete Vec;
@@ -152,9 +163,16 @@ private:
/// different levels of, e.g., the inheritance hierarchy.
std::list<ShadowMap> ShadowMaps;
+ /// Overloaded C++ member functions found by SemaLookup.
+ /// Used to determine when one overload is dominated by another.
+ llvm::DenseMap<std::pair<DeclContext *, /*Name*/uintptr_t>, ShadowMapEntry>
+ OverloadMap;
+
/// If we're potentially referring to a C++ member function, the set
/// of qualifiers applied to the object type.
Qualifiers ObjectTypeQualifiers;
+ /// The kind of the object expression, for rvalue/lvalue overloads.
+ ExprValueKind ObjectKind;
/// Whether the \p ObjectTypeQualifiers field is active.
bool HasObjectTypeQualifiers;
@@ -230,8 +248,9 @@ public:
/// out member functions that aren't available (because there will be a
/// cv-qualifier mismatch) or prefer functions with an exact qualifier
/// match.
- void setObjectTypeQualifiers(Qualifiers Quals) {
+ void setObjectTypeQualifiers(Qualifiers Quals, ExprValueKind Kind) {
ObjectTypeQualifiers = Quals;
+ ObjectKind = Kind;
HasObjectTypeQualifiers = true;
}
@@ -348,6 +367,202 @@ public:
};
} // namespace
+void PreferredTypeBuilder::enterReturn(Sema &S, SourceLocation Tok) {
+ if (isa<BlockDecl>(S.CurContext)) {
+ if (sema::BlockScopeInfo *BSI = S.getCurBlock()) {
+ ComputeType = nullptr;
+ Type = BSI->ReturnType;
+ ExpectedLoc = Tok;
+ }
+ } else if (const auto *Function = dyn_cast<FunctionDecl>(S.CurContext)) {
+ ComputeType = nullptr;
+ Type = Function->getReturnType();
+ ExpectedLoc = Tok;
+ } else if (const auto *Method = dyn_cast<ObjCMethodDecl>(S.CurContext)) {
+ ComputeType = nullptr;
+ Type = Method->getReturnType();
+ ExpectedLoc = Tok;
+ }
+}
+
+void PreferredTypeBuilder::enterVariableInit(SourceLocation Tok, Decl *D) {
+ auto *VD = llvm::dyn_cast_or_null<ValueDecl>(D);
+ ComputeType = nullptr;
+ Type = VD ? VD->getType() : QualType();
+ ExpectedLoc = Tok;
+}
+
+void PreferredTypeBuilder::enterFunctionArgument(
+ SourceLocation Tok, llvm::function_ref<QualType()> ComputeType) {
+ this->ComputeType = ComputeType;
+ Type = QualType();
+ ExpectedLoc = Tok;
+}
+
+void PreferredTypeBuilder::enterParenExpr(SourceLocation Tok,
+ SourceLocation LParLoc) {
+ // expected type for parenthesized expression does not change.
+ if (ExpectedLoc == LParLoc)
+ ExpectedLoc = Tok;
+}
+
+static QualType getPreferredTypeOfBinaryRHS(Sema &S, Expr *LHS,
+ tok::TokenKind Op) {
+ if (!LHS)
+ return QualType();
+
+ QualType LHSType = LHS->getType();
+ if (LHSType->isPointerType()) {
+ if (Op == tok::plus || Op == tok::plusequal || Op == tok::minusequal)
+ return S.getASTContext().getPointerDiffType();
+ // Pointer difference is more common than subtracting an int from a pointer.
+ if (Op == tok::minus)
+ return LHSType;
+ }
+
+ switch (Op) {
+ // No way to infer the type of RHS from LHS.
+ case tok::comma:
+ return QualType();
+ // Prefer the type of the left operand for all of these.
+ // Arithmetic operations.
+ case tok::plus:
+ case tok::plusequal:
+ case tok::minus:
+ case tok::minusequal:
+ case tok::percent:
+ case tok::percentequal:
+ case tok::slash:
+ case tok::slashequal:
+ case tok::star:
+ case tok::starequal:
+ // Assignment.
+ case tok::equal:
+ // Comparison operators.
+ case tok::equalequal:
+ case tok::exclaimequal:
+ case tok::less:
+ case tok::lessequal:
+ case tok::greater:
+ case tok::greaterequal:
+ case tok::spaceship:
+ return LHS->getType();
+ // Binary shifts are often overloaded, so don't try to guess those.
+ case tok::greatergreater:
+ case tok::greatergreaterequal:
+ case tok::lessless:
+ case tok::lesslessequal:
+ if (LHSType->isIntegralOrEnumerationType())
+ return S.getASTContext().IntTy;
+ return QualType();
+ // Logical operators, assume we want bool.
+ case tok::ampamp:
+ case tok::pipepipe:
+ case tok::caretcaret:
+ return S.getASTContext().BoolTy;
+ // Operators often used for bit manipulation are typically used with the type
+ // of the left argument.
+ case tok::pipe:
+ case tok::pipeequal:
+ case tok::caret:
+ case tok::caretequal:
+ case tok::amp:
+ case tok::ampequal:
+ if (LHSType->isIntegralOrEnumerationType())
+ return LHSType;
+ return QualType();
+ // RHS should be a pointer to a member of the 'LHS' type, but we can't give
+ // any particular type here.
+ case tok::periodstar:
+ case tok::arrowstar:
+ return QualType();
+ default:
+ // FIXME(ibiryukov): handle the missing op, re-add the assertion.
+ // assert(false && "unhandled binary op");
+ return QualType();
+ }
+}
+
+/// Get preferred type for an argument of an unary expression. \p ContextType is
+/// preferred type of the whole unary expression.
+static QualType getPreferredTypeOfUnaryArg(Sema &S, QualType ContextType,
+ tok::TokenKind Op) {
+ switch (Op) {
+ case tok::exclaim:
+ return S.getASTContext().BoolTy;
+ case tok::amp:
+ if (!ContextType.isNull() && ContextType->isPointerType())
+ return ContextType->getPointeeType();
+ return QualType();
+ case tok::star:
+ if (ContextType.isNull())
+ return QualType();
+ return S.getASTContext().getPointerType(ContextType.getNonReferenceType());
+ case tok::plus:
+ case tok::minus:
+ case tok::tilde:
+ case tok::minusminus:
+ case tok::plusplus:
+ if (ContextType.isNull())
+ return S.getASTContext().IntTy;
+ // leave as is, these operators typically return the same type.
+ return ContextType;
+ case tok::kw___real:
+ case tok::kw___imag:
+ return QualType();
+ default:
+ assert(false && "unhandled unary op");
+ return QualType();
+ }
+}
+
+void PreferredTypeBuilder::enterBinary(Sema &S, SourceLocation Tok, Expr *LHS,
+ tok::TokenKind Op) {
+ ComputeType = nullptr;
+ Type = getPreferredTypeOfBinaryRHS(S, LHS, Op);
+ ExpectedLoc = Tok;
+}
+
+void PreferredTypeBuilder::enterMemAccess(Sema &S, SourceLocation Tok,
+ Expr *Base) {
+ if (!Base)
+ return;
+ // Do we have expected type for Base?
+ if (ExpectedLoc != Base->getBeginLoc())
+ return;
+ // Keep the expected type, only update the location.
+ ExpectedLoc = Tok;
+ return;
+}
+
+void PreferredTypeBuilder::enterUnary(Sema &S, SourceLocation Tok,
+ tok::TokenKind OpKind,
+ SourceLocation OpLoc) {
+ ComputeType = nullptr;
+ Type = getPreferredTypeOfUnaryArg(S, this->get(OpLoc), OpKind);
+ ExpectedLoc = Tok;
+}
+
+void PreferredTypeBuilder::enterSubscript(Sema &S, SourceLocation Tok,
+ Expr *LHS) {
+ ComputeType = nullptr;
+ Type = S.getASTContext().IntTy;
+ ExpectedLoc = Tok;
+}
+
+void PreferredTypeBuilder::enterTypeCast(SourceLocation Tok,
+ QualType CastType) {
+ ComputeType = nullptr;
+ Type = !CastType.isNull() ? CastType.getCanonicalType() : QualType();
+ ExpectedLoc = Tok;
+}
+
+void PreferredTypeBuilder::enterCondition(Sema &S, SourceLocation Tok) {
+ ComputeType = nullptr;
+ Type = S.getASTContext().BoolTy;
+ ExpectedLoc = Tok;
+}
+
class ResultBuilder::ShadowMapEntry::iterator {
llvm::PointerUnion<const NamedDecl *, const DeclIndexPair *> DeclOrIterator;
unsigned SingleDeclIndex;
@@ -681,7 +896,8 @@ QualType clang::getDeclUsageType(ASTContext &C, const NamedDecl *ND) {
T = Property->getType();
else if (const auto *Value = dyn_cast<ValueDecl>(ND))
T = Value->getType();
- else
+
+ if (T.isNull())
return QualType();
// Dig through references, function pointers, and block pointers to
@@ -792,8 +1008,8 @@ void ResultBuilder::AdjustResultPriorityForDecl(Result &R) {
}
}
-DeclContext::lookup_result getConstructors(ASTContext &Context,
- const CXXRecordDecl *Record) {
+static DeclContext::lookup_result getConstructors(ASTContext &Context,
+ const CXXRecordDecl *Record) {
QualType RecordTy = Context.getTypeDeclType(Record);
DeclarationName ConstructorName =
Context.DeclarationNames.getCXXConstructorName(
@@ -960,6 +1176,53 @@ static void setInBaseClass(ResultBuilder::Result &R) {
R.InBaseClass = true;
}
+enum class OverloadCompare { BothViable, Dominates, Dominated };
+// Will Candidate ever be called on the object, when overloaded with Incumbent?
+// Returns Dominates if Candidate is always called, Dominated if Incumbent is
+// always called, BothViable if either may be called dependending on arguments.
+// Precondition: must actually be overloads!
+static OverloadCompare compareOverloads(const CXXMethodDecl &Candidate,
+ const CXXMethodDecl &Incumbent,
+ const Qualifiers &ObjectQuals,
+ ExprValueKind ObjectKind) {
+ if (Candidate.isVariadic() != Incumbent.isVariadic() ||
+ Candidate.getNumParams() != Incumbent.getNumParams() ||
+ Candidate.getMinRequiredArguments() !=
+ Incumbent.getMinRequiredArguments())
+ return OverloadCompare::BothViable;
+ for (unsigned I = 0, E = Candidate.getNumParams(); I != E; ++I)
+ if (Candidate.parameters()[I]->getType().getCanonicalType() !=
+ Incumbent.parameters()[I]->getType().getCanonicalType())
+ return OverloadCompare::BothViable;
+ if (!llvm::empty(Candidate.specific_attrs<EnableIfAttr>()) ||
+ !llvm::empty(Incumbent.specific_attrs<EnableIfAttr>()))
+ return OverloadCompare::BothViable;
+ // At this point, we know calls can't pick one or the other based on
+ // arguments, so one of the two must win. (Or both fail, handled elsewhere).
+ RefQualifierKind CandidateRef = Candidate.getRefQualifier();
+ RefQualifierKind IncumbentRef = Incumbent.getRefQualifier();
+ if (CandidateRef != IncumbentRef) {
+ // If the object kind is LValue/RValue, there's one acceptable ref-qualifier
+ // and it can't be mixed with ref-unqualified overloads (in valid code).
+
+ // For xvalue objects, we prefer the rvalue overload even if we have to
+ // add qualifiers (which is rare, because const&& is rare).
+ if (ObjectKind == clang::VK_XValue)
+ return CandidateRef == RQ_RValue ? OverloadCompare::Dominates
+ : OverloadCompare::Dominated;
+ }
+ // Now the ref qualifiers are the same (or we're in some invalid state).
+ // So make some decision based on the qualifiers.
+ Qualifiers CandidateQual = Candidate.getMethodQualifiers();
+ Qualifiers IncumbentQual = Incumbent.getMethodQualifiers();
+ bool CandidateSuperset = CandidateQual.compatiblyIncludes(IncumbentQual);
+ bool IncumbentSuperset = IncumbentQual.compatiblyIncludes(CandidateQual);
+ if (CandidateSuperset == IncumbentSuperset)
+ return OverloadCompare::BothViable;
+ return IncumbentSuperset ? OverloadCompare::Dominates
+ : OverloadCompare::Dominated;
+}
+
void ResultBuilder::AddResult(Result R, DeclContext *CurContext,
NamedDecl *Hiding, bool InBaseClass = false) {
if (R.Kind != Result::RK_Declaration) {
@@ -1028,7 +1291,7 @@ void ResultBuilder::AddResult(Result R, DeclContext *CurContext,
if (HasObjectTypeQualifiers)
if (const auto *Method = dyn_cast<CXXMethodDecl>(R.Declaration))
if (Method->isInstance()) {
- Qualifiers MethodQuals = Method->getTypeQualifiers();
+ Qualifiers MethodQuals = Method->getMethodQualifiers();
if (ObjectTypeQualifiers == MethodQuals)
R.Priority += CCD_ObjectQualifierMatch;
else if (ObjectTypeQualifiers - MethodQuals) {
@@ -1036,6 +1299,44 @@ void ResultBuilder::AddResult(Result R, DeclContext *CurContext,
// qualifiers.
return;
}
+ // Detect cases where a ref-qualified method cannot be invoked.
+ switch (Method->getRefQualifier()) {
+ case RQ_LValue:
+ if (ObjectKind != VK_LValue && !MethodQuals.hasConst())
+ return;
+ break;
+ case RQ_RValue:
+ if (ObjectKind == VK_LValue)
+ return;
+ break;
+ case RQ_None:
+ break;
+ }
+
+ /// Check whether this dominates another overloaded method, which should
+ /// be suppressed (or vice versa).
+ /// Motivating case is const_iterator begin() const vs iterator begin().
+ auto &OverloadSet = OverloadMap[std::make_pair(
+ CurContext, Method->getDeclName().getAsOpaqueInteger())];
+ for (const DeclIndexPair& Entry : OverloadSet) {
+ Result &Incumbent = Results[Entry.second];
+ switch (compareOverloads(*Method,
+ *cast<CXXMethodDecl>(Incumbent.Declaration),
+ ObjectTypeQualifiers, ObjectKind)) {
+ case OverloadCompare::Dominates:
+ // Replace the dominated overload with this one.
+ // FIXME: if the overload dominates multiple incumbents then we
+ // should remove all. But two overloads is by far the common case.
+ Incumbent = std::move(R);
+ return;
+ case OverloadCompare::Dominated:
+ // This overload can't be called, drop it.
+ return;
+ case OverloadCompare::BothViable:
+ break;
+ }
+ }
+ OverloadSet.Add(Method, Results.size());
}
// Insert this result into the set of results.
@@ -1056,11 +1357,6 @@ void ResultBuilder::EnterNewScope() { ShadowMaps.emplace_back(); }
/// Exit from the current scope.
void ResultBuilder::ExitScope() {
- for (ShadowMap::iterator E = ShadowMaps.back().begin(),
- EEnd = ShadowMaps.back().end();
- E != EEnd; ++E)
- E->second.Destroy();
-
ShadowMaps.pop_back();
}
@@ -1516,6 +1812,7 @@ static void AddTypedefResult(ResultBuilder &Results) {
Builder.AddPlaceholderChunk("type");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("name");
+ Builder.AddChunk(CodeCompletionString::CK_SemiColon);
Results.AddResult(CodeCompletionResult(Builder.TakeString()));
}
@@ -1629,22 +1926,10 @@ static void AddStaticAssertResult(CodeCompletionBuilder &Builder,
Builder.AddChunk(CodeCompletionString::CK_Comma);
Builder.AddPlaceholderChunk("message");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddChunk(CodeCompletionString::CK_SemiColon);
Results.AddResult(CodeCompletionResult(Builder.TakeString()));
}
-static void printOverrideString(llvm::raw_ostream &OS,
- CodeCompletionString *CCS) {
- for (const auto &C : *CCS) {
- if (C.Kind == CodeCompletionString::CK_Optional)
- printOverrideString(OS, C.Optional);
- else
- OS << C.Text;
- // Add a space after return type.
- if (C.Kind == CodeCompletionString::CK_ResultType)
- OS << ' ';
- }
-}
-
static void AddOverrideResults(ResultBuilder &Results,
const CodeCompletionContext &CCContext,
CodeCompletionBuilder &Builder) {
@@ -1714,7 +1999,9 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC, Scope *S,
Builder.AddTypedTextChunk("namespace");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("identifier");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
+ Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
Builder.AddPlaceholderChunk("declarations");
Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
Builder.AddChunk(CodeCompletionString::CK_RightBrace);
@@ -1727,14 +2014,14 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC, Scope *S,
Builder.AddPlaceholderChunk("name");
Builder.AddChunk(CodeCompletionString::CK_Equal);
Builder.AddPlaceholderChunk("namespace");
+ Builder.AddChunk(CodeCompletionString::CK_SemiColon);
Results.AddResult(Result(Builder.TakeString()));
// Using directives
- Builder.AddTypedTextChunk("using");
- Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Builder.AddTextChunk("namespace");
+ Builder.AddTypedTextChunk("using namespace");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("identifier");
+ Builder.AddChunk(CodeCompletionString::CK_SemiColon);
Results.AddResult(Result(Builder.TakeString()));
// asm(string-literal)
@@ -1769,17 +2056,17 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC, Scope *S,
Builder.AddPlaceholderChunk("qualifier");
Builder.AddTextChunk("::");
Builder.AddPlaceholderChunk("name");
+ Builder.AddChunk(CodeCompletionString::CK_SemiColon);
Results.AddResult(Result(Builder.TakeString()));
// using typename qualifier::name (only in a dependent context)
if (SemaRef.CurContext->isDependentContext()) {
- Builder.AddTypedTextChunk("using");
- Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Builder.AddTextChunk("typename");
+ Builder.AddTypedTextChunk("using typename");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("qualifier");
Builder.AddTextChunk("::");
Builder.AddPlaceholderChunk("name");
+ Builder.AddChunk(CodeCompletionString::CK_SemiColon);
Results.AddResult(Result(Builder.TakeString()));
}
@@ -1857,15 +2144,21 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC, Scope *S,
if (SemaRef.getLangOpts().CPlusPlus && Results.includeCodePatterns() &&
SemaRef.getLangOpts().CXXExceptions) {
Builder.AddTypedTextChunk("try");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
+ Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
Builder.AddPlaceholderChunk("statements");
Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
Builder.AddChunk(CodeCompletionString::CK_RightBrace);
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddTextChunk("catch");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("declaration");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
+ Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
Builder.AddPlaceholderChunk("statements");
Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
Builder.AddChunk(CodeCompletionString::CK_RightBrace);
@@ -1877,13 +2170,16 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC, Scope *S,
if (Results.includeCodePatterns()) {
// if (condition) { statements }
Builder.AddTypedTextChunk("if");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
if (SemaRef.getLangOpts().CPlusPlus)
Builder.AddPlaceholderChunk("condition");
else
Builder.AddPlaceholderChunk("expression");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
+ Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
Builder.AddPlaceholderChunk("statements");
Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
Builder.AddChunk(CodeCompletionString::CK_RightBrace);
@@ -1891,14 +2187,18 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC, Scope *S,
// switch (condition) { }
Builder.AddTypedTextChunk("switch");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
if (SemaRef.getLangOpts().CPlusPlus)
Builder.AddPlaceholderChunk("condition");
else
Builder.AddPlaceholderChunk("expression");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
+ Builder.AddPlaceholderChunk("cases");
+ Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
Builder.AddChunk(CodeCompletionString::CK_RightBrace);
Results.AddResult(Result(Builder.TakeString()));
}
@@ -1922,13 +2222,16 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC, Scope *S,
if (Results.includeCodePatterns()) {
/// while (condition) { statements }
Builder.AddTypedTextChunk("while");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
if (SemaRef.getLangOpts().CPlusPlus)
Builder.AddPlaceholderChunk("condition");
else
Builder.AddPlaceholderChunk("expression");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
+ Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
Builder.AddPlaceholderChunk("statements");
Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
Builder.AddChunk(CodeCompletionString::CK_RightBrace);
@@ -1936,11 +2239,14 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC, Scope *S,
// do { statements } while ( expression );
Builder.AddTypedTextChunk("do");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
+ Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
Builder.AddPlaceholderChunk("statements");
Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
Builder.AddChunk(CodeCompletionString::CK_RightBrace);
Builder.AddTextChunk("while");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("expression");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
@@ -1948,16 +2254,20 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC, Scope *S,
// for ( for-init-statement ; condition ; expression ) { statements }
Builder.AddTypedTextChunk("for");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
if (SemaRef.getLangOpts().CPlusPlus || SemaRef.getLangOpts().C99)
Builder.AddPlaceholderChunk("init-statement");
else
Builder.AddPlaceholderChunk("init-expression");
Builder.AddChunk(CodeCompletionString::CK_SemiColon);
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("condition");
Builder.AddChunk(CodeCompletionString::CK_SemiColon);
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("inc-expression");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
Builder.AddPlaceholderChunk("statements");
@@ -1969,44 +2279,62 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC, Scope *S,
if (S->getContinueParent()) {
// continue ;
Builder.AddTypedTextChunk("continue");
+ Builder.AddChunk(CodeCompletionString::CK_SemiColon);
Results.AddResult(Result(Builder.TakeString()));
}
if (S->getBreakParent()) {
// break ;
Builder.AddTypedTextChunk("break");
+ Builder.AddChunk(CodeCompletionString::CK_SemiColon);
Results.AddResult(Result(Builder.TakeString()));
}
- // "return expression ;" or "return ;", depending on whether we
- // know the function is void or not.
- bool isVoid = false;
+ // "return expression ;" or "return ;", depending on the return type.
+ QualType ReturnType;
if (const auto *Function = dyn_cast<FunctionDecl>(SemaRef.CurContext))
- isVoid = Function->getReturnType()->isVoidType();
+ ReturnType = Function->getReturnType();
else if (const auto *Method = dyn_cast<ObjCMethodDecl>(SemaRef.CurContext))
- isVoid = Method->getReturnType()->isVoidType();
+ ReturnType = Method->getReturnType();
else if (SemaRef.getCurBlock() &&
!SemaRef.getCurBlock()->ReturnType.isNull())
- isVoid = SemaRef.getCurBlock()->ReturnType->isVoidType();
- Builder.AddTypedTextChunk("return");
- if (!isVoid) {
- Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ ReturnType = SemaRef.getCurBlock()->ReturnType;;
+ if (ReturnType.isNull() || ReturnType->isVoidType()) {
+ Builder.AddTypedTextChunk("return");
+ Builder.AddChunk(CodeCompletionString::CK_SemiColon);
+ Results.AddResult(Result(Builder.TakeString()));
+ } else {
+ assert(!ReturnType.isNull());
+ // "return expression ;"
+ Builder.AddTypedTextChunk("return");
+ Builder.AddChunk(clang::CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("expression");
+ Builder.AddChunk(CodeCompletionString::CK_SemiColon);
+ Results.AddResult(Result(Builder.TakeString()));
+ // When boolean, also add 'return true;' and 'return false;'.
+ if (ReturnType->isBooleanType()) {
+ Builder.AddTypedTextChunk("return true");
+ Builder.AddChunk(CodeCompletionString::CK_SemiColon);
+ Results.AddResult(Result(Builder.TakeString()));
+
+ Builder.AddTypedTextChunk("return false");
+ Builder.AddChunk(CodeCompletionString::CK_SemiColon);
+ Results.AddResult(Result(Builder.TakeString()));
+ }
}
- Results.AddResult(Result(Builder.TakeString()));
// goto identifier ;
Builder.AddTypedTextChunk("goto");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("label");
+ Builder.AddChunk(CodeCompletionString::CK_SemiColon);
Results.AddResult(Result(Builder.TakeString()));
// Using directives
- Builder.AddTypedTextChunk("using");
- Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Builder.AddTextChunk("namespace");
+ Builder.AddTypedTextChunk("using namespace");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("identifier");
+ Builder.AddChunk(CodeCompletionString::CK_SemiColon);
Results.AddResult(Result(Builder.TakeString()));
AddStaticAssertResult(Builder, Results, SemaRef.getLangOpts());
@@ -2409,6 +2737,11 @@ static std::string
FormatFunctionParameter(const PrintingPolicy &Policy, const ParmVarDecl *Param,
bool SuppressName = false, bool SuppressBlock = false,
Optional<ArrayRef<QualType>> ObjCSubsts = None) {
+ // Params are unavailable in FunctionTypeLoc if the FunctionType is invalid.
+ // It would be better to pass in the param Type, which is usually avaliable.
+ // But this case is rare, so just pretend we fell back to int as elsewhere.
+ if (!Param)
+ return "int";
bool ObjCMethodParam = isa<ObjCMethodDecl>(Param->getDeclContext());
if (Param->getType()->isDependentType() ||
!Param->getType()->isBlockPointerType()) {
@@ -2736,23 +3069,23 @@ static void
AddFunctionTypeQualsToCompletionString(CodeCompletionBuilder &Result,
const FunctionDecl *Function) {
const auto *Proto = Function->getType()->getAs<FunctionProtoType>();
- if (!Proto || !Proto->getTypeQuals())
+ if (!Proto || !Proto->getMethodQuals())
return;
// FIXME: Add ref-qualifier!
// Handle single qualifiers without copying
- if (Proto->getTypeQuals().hasOnlyConst()) {
+ if (Proto->getMethodQuals().hasOnlyConst()) {
Result.AddInformativeChunk(" const");
return;
}
- if (Proto->getTypeQuals().hasOnlyVolatile()) {
+ if (Proto->getMethodQuals().hasOnlyVolatile()) {
Result.AddInformativeChunk(" volatile");
return;
}
- if (Proto->getTypeQuals().hasOnlyRestrict()) {
+ if (Proto->getMethodQuals().hasOnlyRestrict()) {
Result.AddInformativeChunk(" restrict");
return;
}
@@ -2952,19 +3285,42 @@ CodeCompletionString *CodeCompletionResult::CreateCodeCompletionString(
PP, Ctx, Result, IncludeBriefComments, CCContext, Policy);
}
+static void printOverrideString(const CodeCompletionString &CCS,
+ std::string &BeforeName,
+ std::string &NameAndSignature) {
+ bool SeenTypedChunk = false;
+ for (auto &Chunk : CCS) {
+ if (Chunk.Kind == CodeCompletionString::CK_Optional) {
+ assert(SeenTypedChunk && "optional parameter before name");
+ // Note that we put all chunks inside into NameAndSignature.
+ printOverrideString(*Chunk.Optional, NameAndSignature, NameAndSignature);
+ continue;
+ }
+ SeenTypedChunk |= Chunk.Kind == CodeCompletionString::CK_TypedText;
+ if (SeenTypedChunk)
+ NameAndSignature += Chunk.Text;
+ else
+ BeforeName += Chunk.Text;
+ }
+}
+
CodeCompletionString *
CodeCompletionResult::createCodeCompletionStringForOverride(
Preprocessor &PP, ASTContext &Ctx, CodeCompletionBuilder &Result,
bool IncludeBriefComments, const CodeCompletionContext &CCContext,
PrintingPolicy &Policy) {
- std::string OverrideSignature;
- llvm::raw_string_ostream OS(OverrideSignature);
auto *CCS = createCodeCompletionStringForDecl(PP, Ctx, Result,
/*IncludeBriefComments=*/false,
CCContext, Policy);
- printOverrideString(OS, CCS);
- OS << " override";
- Result.AddTypedTextChunk(Result.getAllocator().CopyString(OS.str()));
+ std::string BeforeName;
+ std::string NameAndSignature;
+ // For overrides all chunks go into the result, none are informative.
+ printOverrideString(*CCS, BeforeName, NameAndSignature);
+ NameAndSignature += " override";
+
+ Result.AddTextChunk(Result.getAllocator().CopyString(BeforeName));
+ Result.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Result.AddTypedTextChunk(Result.getAllocator().CopyString(NameAndSignature));
return Result.TakeString();
}
@@ -3740,7 +4096,8 @@ void Sema::CodeCompleteOrdinaryName(Scope *S,
// the member function to filter/prioritize the results list.
auto ThisType = getCurrentThisType();
if (!ThisType.isNull())
- Results.setObjectTypeQualifiers(ThisType->getPointeeType().getQualifiers());
+ Results.setObjectTypeQualifiers(ThisType->getPointeeType().getQualifiers(),
+ VK_LValue);
CodeCompletionDeclConsumer Consumer(Results, CurContext);
LookupVisibleDecls(S, LookupOrdinaryName, Consumer,
@@ -3856,16 +4213,116 @@ void Sema::CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
}
struct Sema::CodeCompleteExpressionData {
- CodeCompleteExpressionData(QualType PreferredType = QualType())
+ CodeCompleteExpressionData(QualType PreferredType = QualType(),
+ bool IsParenthesized = false)
: PreferredType(PreferredType), IntegralConstantExpression(false),
- ObjCCollection(false) {}
+ ObjCCollection(false), IsParenthesized(IsParenthesized) {}
QualType PreferredType;
bool IntegralConstantExpression;
bool ObjCCollection;
+ bool IsParenthesized;
SmallVector<Decl *, 4> IgnoreDecls;
};
+namespace {
+/// Information that allows to avoid completing redundant enumerators.
+struct CoveredEnumerators {
+ llvm::SmallPtrSet<EnumConstantDecl *, 8> Seen;
+ NestedNameSpecifier *SuggestedQualifier = nullptr;
+};
+} // namespace
+
+static void AddEnumerators(ResultBuilder &Results, ASTContext &Context,
+ EnumDecl *Enum, DeclContext *CurContext,
+ const CoveredEnumerators &Enumerators) {
+ NestedNameSpecifier *Qualifier = Enumerators.SuggestedQualifier;
+ if (Context.getLangOpts().CPlusPlus && !Qualifier && Enumerators.Seen.empty()) {
+ // If there are no prior enumerators in C++, check whether we have to
+ // qualify the names of the enumerators that we suggest, because they
+ // may not be visible in this scope.
+ Qualifier = getRequiredQualification(Context, CurContext, Enum);
+ }
+
+ Results.EnterNewScope();
+ for (auto *E : Enum->enumerators()) {
+ if (Enumerators.Seen.count(E))
+ continue;
+
+ CodeCompletionResult R(E, CCP_EnumInCase, Qualifier);
+ Results.AddResult(R, CurContext, nullptr, false);
+ }
+ Results.ExitScope();
+}
+
+/// Try to find a corresponding FunctionProtoType for function-like types (e.g.
+/// function pointers, std::function, etc).
+static const FunctionProtoType *TryDeconstructFunctionLike(QualType T) {
+ assert(!T.isNull());
+ // Try to extract first template argument from std::function<> and similar.
+ // Note we only handle the sugared types, they closely match what users wrote.
+ // We explicitly choose to not handle ClassTemplateSpecializationDecl.
+ if (auto *Specialization = T->getAs<TemplateSpecializationType>()) {
+ if (Specialization->getNumArgs() != 1)
+ return nullptr;
+ const TemplateArgument &Argument = Specialization->getArg(0);
+ if (Argument.getKind() != TemplateArgument::Type)
+ return nullptr;
+ return Argument.getAsType()->getAs<FunctionProtoType>();
+ }
+ // Handle other cases.
+ if (T->isPointerType())
+ T = T->getPointeeType();
+ return T->getAs<FunctionProtoType>();
+}
+
+/// Adds a pattern completion for a lambda expression with the specified
+/// parameter types and placeholders for parameter names.
+static void AddLambdaCompletion(ResultBuilder &Results,
+ llvm::ArrayRef<QualType> Parameters,
+ const LangOptions &LangOpts) {
+ if (!Results.includeCodePatterns())
+ return;
+ CodeCompletionBuilder Completion(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
+ // [](<parameters>) {}
+ Completion.AddChunk(CodeCompletionString::CK_LeftBracket);
+ Completion.AddPlaceholderChunk("=");
+ Completion.AddChunk(CodeCompletionString::CK_RightBracket);
+ if (!Parameters.empty()) {
+ Completion.AddChunk(CodeCompletionString::CK_LeftParen);
+ bool First = true;
+ for (auto Parameter : Parameters) {
+ if (!First)
+ Completion.AddChunk(CodeCompletionString::ChunkKind::CK_Comma);
+ else
+ First = false;
+
+ constexpr llvm::StringLiteral NamePlaceholder = "!#!NAME_GOES_HERE!#!";
+ std::string Type = NamePlaceholder;
+ Parameter.getAsStringInternal(Type, PrintingPolicy(LangOpts));
+ llvm::StringRef Prefix, Suffix;
+ std::tie(Prefix, Suffix) = llvm::StringRef(Type).split(NamePlaceholder);
+ Prefix = Prefix.rtrim();
+ Suffix = Suffix.ltrim();
+
+ Completion.AddTextChunk(Completion.getAllocator().CopyString(Prefix));
+ Completion.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Completion.AddPlaceholderChunk("parameter");
+ Completion.AddTextChunk(Completion.getAllocator().CopyString(Suffix));
+ };
+ Completion.AddChunk(CodeCompletionString::CK_RightParen);
+ }
+ Completion.AddChunk(clang::CodeCompletionString::CK_HorizontalSpace);
+ Completion.AddChunk(CodeCompletionString::CK_LeftBrace);
+ Completion.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Completion.AddPlaceholderChunk("body");
+ Completion.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Completion.AddChunk(CodeCompletionString::CK_RightBrace);
+
+ Results.AddResult(Completion.TakeString());
+}
+
/// Perform code-completion in an expression context when we know what
/// type we're looking for.
void Sema::CodeCompleteExpression(Scope *S,
@@ -3873,13 +4330,18 @@ void Sema::CodeCompleteExpression(Scope *S,
ResultBuilder Results(
*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
- CodeCompletionContext(CodeCompletionContext::CCC_Expression,
- Data.PreferredType));
+ CodeCompletionContext(
+ Data.IsParenthesized
+ ? CodeCompletionContext::CCC_ParenthesizedExpression
+ : CodeCompletionContext::CCC_Expression,
+ Data.PreferredType));
+ auto PCC =
+ Data.IsParenthesized ? PCC_ParenthesizedExpression : PCC_Expression;
if (Data.ObjCCollection)
Results.setFilter(&ResultBuilder::IsObjCCollection);
else if (Data.IntegralConstantExpression)
Results.setFilter(&ResultBuilder::IsIntegralConstantValue);
- else if (WantTypesInContext(PCC_Expression, getLangOpts()))
+ else if (WantTypesInContext(PCC, getLangOpts()))
Results.setFilter(&ResultBuilder::IsOrdinaryName);
else
Results.setFilter(&ResultBuilder::IsOrdinaryNonTypeName);
@@ -3897,14 +4359,23 @@ void Sema::CodeCompleteExpression(Scope *S,
CodeCompleter->loadExternal());
Results.EnterNewScope();
- AddOrdinaryNameResults(PCC_Expression, S, *this, Results);
+ AddOrdinaryNameResults(PCC, S, *this, Results);
Results.ExitScope();
bool PreferredTypeIsPointer = false;
- if (!Data.PreferredType.isNull())
+ if (!Data.PreferredType.isNull()) {
PreferredTypeIsPointer = Data.PreferredType->isAnyPointerType() ||
Data.PreferredType->isMemberPointerType() ||
Data.PreferredType->isBlockPointerType();
+ if (Data.PreferredType->isEnumeralType()) {
+ EnumDecl *Enum = Data.PreferredType->castAs<EnumType>()->getDecl();
+ if (auto *Def = Enum->getDefinition())
+ Enum = Def;
+ // FIXME: collect covered enumerators in cases like:
+ // if (x == my_enum::one) { ... } else if (x == ^) {}
+ AddEnumerators(Results, Context, Enum, CurContext, CoveredEnumerators());
+ }
+ }
if (S->getFnParent() && !Data.ObjCCollection &&
!Data.IntegralConstantExpression)
@@ -3913,17 +4384,28 @@ void Sema::CodeCompleteExpression(Scope *S,
if (CodeCompleter->includeMacros())
AddMacroResults(PP, Results, CodeCompleter->loadExternal(), false,
PreferredTypeIsPointer);
+
+ // Complete a lambda expression when preferred type is a function.
+ if (!Data.PreferredType.isNull() && getLangOpts().CPlusPlus11) {
+ if (const FunctionProtoType *F =
+ TryDeconstructFunctionLike(Data.PreferredType))
+ AddLambdaCompletion(Results, F->getParamTypes(), getLangOpts());
+ }
+
HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
Results.data(), Results.size());
}
-void Sema::CodeCompleteExpression(Scope *S, QualType PreferredType) {
- return CodeCompleteExpression(S, CodeCompleteExpressionData(PreferredType));
+void Sema::CodeCompleteExpression(Scope *S, QualType PreferredType,
+ bool IsParenthesized) {
+ return CodeCompleteExpression(
+ S, CodeCompleteExpressionData(PreferredType, IsParenthesized));
}
-void Sema::CodeCompletePostfixExpression(Scope *S, ExprResult E) {
+void Sema::CodeCompletePostfixExpression(Scope *S, ExprResult E,
+ QualType PreferredType) {
if (E.isInvalid())
- CodeCompleteOrdinaryName(S, PCC_RecoveryInFunction);
+ CodeCompleteExpression(S, PreferredType);
else if (getLangOpts().ObjC)
CodeCompleteObjCInstanceMessage(S, E.get(), None, false);
}
@@ -4169,13 +4651,12 @@ AddObjCProperties(const CodeCompletionContext &CCContext,
}
}
-static void
-AddRecordMembersCompletionResults(Sema &SemaRef, ResultBuilder &Results,
- Scope *S, QualType BaseType, RecordDecl *RD,
- Optional<FixItHint> AccessOpFixIt) {
+static void AddRecordMembersCompletionResults(
+ Sema &SemaRef, ResultBuilder &Results, Scope *S, QualType BaseType,
+ ExprValueKind BaseKind, RecordDecl *RD, Optional<FixItHint> AccessOpFixIt) {
// Indicate that we are performing a member access, and the cv-qualifiers
// for the base object type.
- Results.setObjectTypeQualifiers(BaseType.getQualifiers());
+ Results.setObjectTypeQualifiers(BaseType.getQualifiers(), BaseKind);
// Access to a C/C++ class, struct, or union.
Results.allowNestedNameSpecifiers();
@@ -4211,7 +4692,8 @@ AddRecordMembersCompletionResults(Sema &SemaRef, ResultBuilder &Results,
void Sema::CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base,
Expr *OtherOpBase,
SourceLocation OpLoc, bool IsArrow,
- bool IsBaseExprStatement) {
+ bool IsBaseExprStatement,
+ QualType PreferredType) {
if (!Base || !CodeCompleter)
return;
@@ -4239,6 +4721,7 @@ void Sema::CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base,
}
CodeCompletionContext CCContext(contextKind, ConvertedBaseType);
+ CCContext.setPreferredType(PreferredType);
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(), CCContext,
&ResultBuilder::IsMember);
@@ -4254,18 +4737,20 @@ void Sema::CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base,
Base = ConvertedBase.get();
QualType BaseType = Base->getType();
+ ExprValueKind BaseKind = Base->getValueKind();
if (IsArrow) {
- if (const PointerType *Ptr = BaseType->getAs<PointerType>())
+ if (const PointerType *Ptr = BaseType->getAs<PointerType>()) {
BaseType = Ptr->getPointeeType();
- else if (BaseType->isObjCObjectPointerType())
+ BaseKind = VK_LValue;
+ } else if (BaseType->isObjCObjectPointerType())
/*Do nothing*/;
else
return false;
}
if (const RecordType *Record = BaseType->getAs<RecordType>()) {
- AddRecordMembersCompletionResults(*this, Results, S, BaseType,
+ AddRecordMembersCompletionResults(*this, Results, S, BaseType, BaseKind,
Record->getDecl(),
std::move(AccessOpFixIt));
} else if (const auto *TST =
@@ -4274,13 +4759,13 @@ void Sema::CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base,
if (const auto *TD =
dyn_cast_or_null<ClassTemplateDecl>(TN.getAsTemplateDecl())) {
CXXRecordDecl *RD = TD->getTemplatedDecl();
- AddRecordMembersCompletionResults(*this, Results, S, BaseType, RD,
- std::move(AccessOpFixIt));
+ AddRecordMembersCompletionResults(*this, Results, S, BaseType, BaseKind,
+ RD, std::move(AccessOpFixIt));
}
} else if (const auto *ICNT = BaseType->getAs<InjectedClassNameType>()) {
if (auto *RD = ICNT->getDecl())
- AddRecordMembersCompletionResults(*this, Results, S, BaseType, RD,
- std::move(AccessOpFixIt));
+ AddRecordMembersCompletionResults(*this, Results, S, BaseType, BaseKind,
+ RD, std::move(AccessOpFixIt));
} else if (!IsArrow && BaseType->isObjCObjectPointerType()) {
// Objective-C property reference.
AddedPropertiesSet AddedProperties;
@@ -4497,8 +4982,7 @@ void Sema::CodeCompleteCase(Scope *S) {
// FIXME: Ideally, we would also be able to look *past* the code-completion
// token, in case we are code-completing in the middle of the switch and not
// at the end. However, we aren't able to do so at the moment.
- llvm::SmallPtrSet<EnumConstantDecl *, 8> EnumeratorsSeen;
- NestedNameSpecifier *Qualifier = nullptr;
+ CoveredEnumerators Enumerators;
for (SwitchCase *SC = Switch->getSwitchCaseList(); SC;
SC = SC->getNextSwitchCase()) {
CaseStmt *Case = dyn_cast<CaseStmt>(SC);
@@ -4515,7 +4999,7 @@ void Sema::CodeCompleteCase(Scope *S) {
// values of each enumerator. However, value-based approach would not
// work as well with C++ templates where enumerators declared within a
// template are type- and value-dependent.
- EnumeratorsSeen.insert(Enumerator);
+ Enumerators.Seen.insert(Enumerator);
// If this is a qualified-id, keep track of the nested-name-specifier
// so that we can reproduce it as part of code completion, e.g.,
@@ -4528,30 +5012,15 @@ void Sema::CodeCompleteCase(Scope *S) {
// At the XXX, our completions are TagDecl::TK_union,
// TagDecl::TK_struct, and TagDecl::TK_class, rather than TK_union,
// TK_struct, and TK_class.
- Qualifier = DRE->getQualifier();
+ Enumerators.SuggestedQualifier = DRE->getQualifier();
}
}
- if (getLangOpts().CPlusPlus && !Qualifier && EnumeratorsSeen.empty()) {
- // If there are no prior enumerators in C++, check whether we have to
- // qualify the names of the enumerators that we suggest, because they
- // may not be visible in this scope.
- Qualifier = getRequiredQualification(Context, CurContext, Enum);
- }
-
// Add any enumerators that have not yet been mentioned.
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Expression);
- Results.EnterNewScope();
- for (auto *E : Enum->enumerators()) {
- if (EnumeratorsSeen.count(E))
- continue;
-
- CodeCompletionResult R(E, CCP_EnumInCase, Qualifier);
- Results.AddResult(R, CurContext, nullptr, false);
- }
- Results.ExitScope();
+ AddEnumerators(Results, Context, Enum, CurContext, Enumerators);
if (CodeCompleter->includeMacros()) {
AddMacroResults(PP, Results, CodeCompleter->loadExternal(), false);
@@ -4576,22 +5045,19 @@ typedef CodeCompleteConsumer::OverloadCandidate ResultCandidate;
static void mergeCandidatesWithResults(
Sema &SemaRef, SmallVectorImpl<ResultCandidate> &Results,
OverloadCandidateSet &CandidateSet, SourceLocation Loc) {
- if (!CandidateSet.empty()) {
- // Sort the overload candidate set by placing the best overloads first.
- std::stable_sort(
- CandidateSet.begin(), CandidateSet.end(),
- [&](const OverloadCandidate &X, const OverloadCandidate &Y) {
- return isBetterOverloadCandidate(SemaRef, X, Y, Loc,
- CandidateSet.getKind());
- });
-
- // Add the remaining viable overload candidates as code-completion results.
- for (OverloadCandidate &Candidate : CandidateSet) {
- if (Candidate.Function && Candidate.Function->isDeleted())
- continue;
- if (Candidate.Viable)
- Results.push_back(ResultCandidate(Candidate.Function));
- }
+ // Sort the overload candidate set by placing the best overloads first.
+ llvm::stable_sort(CandidateSet, [&](const OverloadCandidate &X,
+ const OverloadCandidate &Y) {
+ return isBetterOverloadCandidate(SemaRef, X, Y, Loc,
+ CandidateSet.getKind());
+ });
+
+ // Add the remaining viable overload candidates as code-completion results.
+ for (OverloadCandidate &Candidate : CandidateSet) {
+ if (Candidate.Function && Candidate.Function->isDeleted())
+ continue;
+ if (Candidate.Viable)
+ Results.push_back(ResultCandidate(Candidate.Function));
}
}
@@ -4670,7 +5136,7 @@ QualType Sema::ProduceCallSignatureHelp(Scope *S, Expr *Fn,
Decls.append(UME->decls_begin(), UME->decls_end());
const bool FirstArgumentIsBase = !UME->isImplicitAccess() && UME->getBase();
AddFunctionCandidates(Decls, ArgExprs, CandidateSet, TemplateArgs,
- /*SuppressUsedConversions=*/false,
+ /*SuppressUserConversions=*/false,
/*PartialOverloading=*/true, FirstArgumentIsBase);
} else {
FunctionDecl *FD = nullptr;
@@ -4685,7 +5151,7 @@ QualType Sema::ProduceCallSignatureHelp(Scope *S, Expr *Fn,
else
AddOverloadCandidate(FD, DeclAccessPair::make(FD, FD->getAccess()),
Args, CandidateSet,
- /*SuppressUsedConversions=*/false,
+ /*SuppressUserConversions=*/false,
/*PartialOverloading=*/true);
} else if (auto DC = NakedFn->getType()->getAsCXXRecordDecl()) {
@@ -4702,7 +5168,7 @@ QualType Sema::ProduceCallSignatureHelp(Scope *S, Expr *Fn,
ArgExprs.append(Args.begin(), Args.end());
AddFunctionCandidates(R.asUnresolvedSet(), ArgExprs, CandidateSet,
/*ExplicitArgs=*/nullptr,
- /*SuppressUsedConversions=*/false,
+ /*SuppressUserConversions=*/false,
/*PartialOverloading=*/true);
}
} else {
@@ -4750,13 +5216,14 @@ QualType Sema::ProduceConstructorSignatureHelp(Scope *S, QualType Type,
if (auto *FD = dyn_cast<FunctionDecl>(C)) {
AddOverloadCandidate(FD, DeclAccessPair::make(FD, C->getAccess()), Args,
CandidateSet,
- /*SuppressUsedConversions=*/false,
- /*PartialOverloading=*/true);
+ /*SuppressUserConversions=*/false,
+ /*PartialOverloading=*/true,
+ /*AllowExplicit*/ true);
} else if (auto *FTD = dyn_cast<FunctionTemplateDecl>(C)) {
AddTemplateOverloadCandidate(
FTD, DeclAccessPair::make(FTD, C->getAccess()),
/*ExplicitTemplateArgs=*/nullptr, Args, CandidateSet,
- /*SuppressUsedConversions=*/false,
+ /*SuppressUserConversions=*/false,
/*PartialOverloading=*/true);
}
}
@@ -4800,22 +5267,6 @@ void Sema::CodeCompleteInitializer(Scope *S, Decl *D) {
CodeCompleteExpression(S, Data);
}
-void Sema::CodeCompleteReturn(Scope *S) {
- QualType ResultType;
- if (isa<BlockDecl>(CurContext)) {
- if (BlockScopeInfo *BSI = getCurBlock())
- ResultType = BSI->ReturnType;
- } else if (const auto *Function = dyn_cast<FunctionDecl>(CurContext))
- ResultType = Function->getReturnType();
- else if (const auto *Method = dyn_cast<ObjCMethodDecl>(CurContext))
- ResultType = Method->getReturnType();
-
- if (ResultType.isNull())
- CodeCompleteOrdinaryName(S, PCC_Expression);
- else
- CodeCompleteExpression(S, ResultType);
-}
-
void Sema::CodeCompleteAfterIf(Scope *S) {
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
@@ -4845,9 +5296,7 @@ void Sema::CodeCompleteAfterIf(Scope *S) {
Results.AddResult(Builder.TakeString());
// "else if" block
- Builder.AddTypedTextChunk("else");
- Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Builder.AddTextChunk("if");
+ Builder.AddTypedTextChunk("else if");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
if (getLangOpts().CPlusPlus)
@@ -4877,93 +5326,9 @@ void Sema::CodeCompleteAfterIf(Scope *S) {
Results.data(), Results.size());
}
-static QualType getPreferredTypeOfBinaryRHS(Sema &S, Expr *LHS,
- tok::TokenKind Op) {
- if (!LHS)
- return QualType();
-
- QualType LHSType = LHS->getType();
- if (LHSType->isPointerType()) {
- if (Op == tok::plus || Op == tok::plusequal || Op == tok::minusequal)
- return S.getASTContext().getPointerDiffType();
- // Pointer difference is more common than subtracting an int from a pointer.
- if (Op == tok::minus)
- return LHSType;
- }
-
- switch (Op) {
- // No way to infer the type of RHS from LHS.
- case tok::comma:
- return QualType();
- // Prefer the type of the left operand for all of these.
- // Arithmetic operations.
- case tok::plus:
- case tok::plusequal:
- case tok::minus:
- case tok::minusequal:
- case tok::percent:
- case tok::percentequal:
- case tok::slash:
- case tok::slashequal:
- case tok::star:
- case tok::starequal:
- // Assignment.
- case tok::equal:
- // Comparison operators.
- case tok::equalequal:
- case tok::exclaimequal:
- case tok::less:
- case tok::lessequal:
- case tok::greater:
- case tok::greaterequal:
- case tok::spaceship:
- return LHS->getType();
- // Binary shifts are often overloaded, so don't try to guess those.
- case tok::greatergreater:
- case tok::greatergreaterequal:
- case tok::lessless:
- case tok::lesslessequal:
- if (LHSType->isIntegralOrEnumerationType())
- return S.getASTContext().IntTy;
- return QualType();
- // Logical operators, assume we want bool.
- case tok::ampamp:
- case tok::pipepipe:
- case tok::caretcaret:
- return S.getASTContext().BoolTy;
- // Operators often used for bit manipulation are typically used with the type
- // of the left argument.
- case tok::pipe:
- case tok::pipeequal:
- case tok::caret:
- case tok::caretequal:
- case tok::amp:
- case tok::ampequal:
- if (LHSType->isIntegralOrEnumerationType())
- return LHSType;
- return QualType();
- // RHS should be a pointer to a member of the 'LHS' type, but we can't give
- // any particular type here.
- case tok::periodstar:
- case tok::arrowstar:
- return QualType();
- default:
- // FIXME(ibiryukov): handle the missing op, re-add the assertion.
- // assert(false && "unhandled binary op");
- return QualType();
- }
-}
-
-void Sema::CodeCompleteBinaryRHS(Scope *S, Expr *LHS, tok::TokenKind Op) {
- auto PreferredType = getPreferredTypeOfBinaryRHS(*this, LHS, Op);
- if (!PreferredType.isNull())
- CodeCompleteExpression(S, PreferredType);
- else
- CodeCompleteOrdinaryName(S, PCC_Expression);
-}
-
void Sema::CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS,
- bool EnteringContext, QualType BaseType) {
+ bool EnteringContext, QualType BaseType,
+ QualType PreferredType) {
if (SS.isEmpty() || !CodeCompleter)
return;
@@ -4972,9 +5337,24 @@ void Sema::CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS,
// it can be useful for global code completion which have information about
// contexts/symbols that are not in the AST.
if (SS.isInvalid()) {
- CodeCompletionContext CC(CodeCompletionContext::CCC_Symbol);
+ CodeCompletionContext CC(CodeCompletionContext::CCC_Symbol, PreferredType);
CC.setCXXScopeSpecifier(SS);
- HandleCodeCompleteResults(this, CodeCompleter, CC, nullptr, 0);
+ // As SS is invalid, we try to collect accessible contexts from the current
+ // scope with a dummy lookup so that the completion consumer can try to
+ // guess what the specified scope is.
+ ResultBuilder DummyResults(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(), CC);
+ if (!PreferredType.isNull())
+ DummyResults.setPreferredType(PreferredType);
+ if (S->getEntity()) {
+ CodeCompletionDeclConsumer Consumer(DummyResults, S->getEntity(),
+ BaseType);
+ LookupVisibleDecls(S, LookupOrdinaryName, Consumer,
+ /*IncludeGlobalScope=*/false,
+ /*LoadExternal=*/false);
+ }
+ HandleCodeCompleteResults(this, CodeCompleter,
+ DummyResults.getCompletionContext(), nullptr, 0);
return;
}
// Always pretend to enter a context to ensure that a dependent type
@@ -4988,9 +5368,12 @@ void Sema::CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS,
if (!isDependentScopeSpecifier(SS) && RequireCompleteDeclContext(SS, Ctx))
return;
- ResultBuilder Results(*this, CodeCompleter->getAllocator(),
- CodeCompleter->getCodeCompletionTUInfo(),
- CodeCompletionContext::CCC_Symbol);
+ ResultBuilder Results(
+ *this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext(CodeCompletionContext::CCC_Symbol, PreferredType));
+ if (!PreferredType.isNull())
+ Results.setPreferredType(PreferredType);
Results.EnterNewScope();
// The "template" keyword can follow "::" in the grammar, but only
@@ -5149,9 +5532,10 @@ void Sema::CodeCompleteOperatorName(Scope *S) {
&ResultBuilder::IsType);
Results.EnterNewScope();
- // Add the names of overloadable operators.
+ // Add the names of overloadable operators. Note that OO_Conditional is not
+ // actually overloadable.
#define OVERLOADED_OPERATOR(Name, Spelling, Token, Unary, Binary, MemberOnly) \
- if (std::strcmp(Spelling, "?")) \
+ if (OO_##Name != OO_Conditional) \
Results.AddResult(Result(Spelling));
#include "clang/Basic/OperatorKinds.def"
@@ -6287,8 +6671,9 @@ void Sema::CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
SourceLocation TemplateKWLoc;
UnqualifiedId id;
id.setIdentifier(Super, SuperLoc);
- ExprResult SuperExpr =
- ActOnIdExpression(S, SS, TemplateKWLoc, id, false, false);
+ ExprResult SuperExpr = ActOnIdExpression(S, SS, TemplateKWLoc, id,
+ /*HasTrailingLParen=*/false,
+ /*IsAddressOfOperand=*/false);
return CodeCompleteObjCInstanceMessage(S, (Expr *)SuperExpr.get(),
SelIdents, AtArgumentExpression);
}
@@ -8218,8 +8603,7 @@ void Sema::CodeCompletePreprocessorExpression() {
if (!CodeCompleter || CodeCompleter->includeMacros())
AddMacroResults(PP, Results,
- CodeCompleter ? CodeCompleter->loadExternal() : false,
- true);
+ !CodeCompleter || CodeCompleter->loadExternal(), true);
// defined (<macro>)
Results.EnterNewScope();
@@ -8258,7 +8642,8 @@ void Sema::CodeCompleteIncludedFile(llvm::StringRef Dir, bool Angled) {
// We need the native slashes for the actual file system interactions.
SmallString<128> NativeRelDir = StringRef(RelDir);
llvm::sys::path::native(NativeRelDir);
- auto FS = getSourceManager().getFileManager().getVirtualFileSystem();
+ llvm::vfs::FileSystem &FS =
+ getSourceManager().getFileManager().getVirtualFileSystem();
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
@@ -8284,20 +8669,39 @@ void Sema::CodeCompleteIncludedFile(llvm::StringRef Dir, bool Angled) {
};
// Helper: scans IncludeDir for nice files, and adds results for each.
- auto AddFilesFromIncludeDir = [&](StringRef IncludeDir, bool IsSystem) {
+ auto AddFilesFromIncludeDir = [&](StringRef IncludeDir,
+ bool IsSystem,
+ DirectoryLookup::LookupType_t LookupType) {
llvm::SmallString<128> Dir = IncludeDir;
- if (!NativeRelDir.empty())
- llvm::sys::path::append(Dir, NativeRelDir);
+ if (!NativeRelDir.empty()) {
+ if (LookupType == DirectoryLookup::LT_Framework) {
+ // For a framework dir, #include <Foo/Bar/> actually maps to
+ // a path of Foo.framework/Headers/Bar/.
+ auto Begin = llvm::sys::path::begin(NativeRelDir);
+ auto End = llvm::sys::path::end(NativeRelDir);
+
+ llvm::sys::path::append(Dir, *Begin + ".framework", "Headers");
+ llvm::sys::path::append(Dir, ++Begin, End);
+ } else {
+ llvm::sys::path::append(Dir, NativeRelDir);
+ }
+ }
std::error_code EC;
unsigned Count = 0;
- for (auto It = FS->dir_begin(Dir, EC);
+ for (auto It = FS.dir_begin(Dir, EC);
!EC && It != llvm::vfs::directory_iterator(); It.increment(EC)) {
if (++Count == 2500) // If we happen to hit a huge directory,
break; // bail out early so we're not too slow.
StringRef Filename = llvm::sys::path::filename(It->path());
switch (It->type()) {
case llvm::sys::fs::file_type::directory_file:
+ // All entries in a framework directory must have a ".framework" suffix,
+ // but the suffix does not appear in the source code's include/import.
+ if (LookupType == DirectoryLookup::LT_Framework &&
+ NativeRelDir.empty() && !Filename.consume_back(".framework"))
+ break;
+
AddCompletion(Filename, /*IsDirectory=*/true);
break;
case llvm::sys::fs::file_type::regular_file:
@@ -8326,10 +8730,12 @@ void Sema::CodeCompleteIncludedFile(llvm::StringRef Dir, bool Angled) {
// header maps are not (currently) enumerable.
break;
case DirectoryLookup::LT_NormalDir:
- AddFilesFromIncludeDir(IncludeDir.getDir()->getName(), IsSystem);
+ AddFilesFromIncludeDir(IncludeDir.getDir()->getName(), IsSystem,
+ DirectoryLookup::LT_NormalDir);
break;
case DirectoryLookup::LT_Framework:
- AddFilesFromIncludeDir(IncludeDir.getFrameworkDir()->getName(), IsSystem);
+ AddFilesFromIncludeDir(IncludeDir.getFrameworkDir()->getName(), IsSystem,
+ DirectoryLookup::LT_Framework);
break;
}
};
@@ -8343,7 +8749,8 @@ void Sema::CodeCompleteIncludedFile(llvm::StringRef Dir, bool Angled) {
// The current directory is on the include path for "quoted" includes.
auto *CurFile = PP.getCurrentFileLexer()->getFileEntry();
if (CurFile && CurFile->getDir())
- AddFilesFromIncludeDir(CurFile->getDir()->getName(), false);
+ AddFilesFromIncludeDir(CurFile->getDir()->getName(), false,
+ DirectoryLookup::LT_NormalDir);
for (const auto &D : make_range(S.quoted_dir_begin(), S.quoted_dir_end()))
AddFilesFromDirLookup(D, false);
}
@@ -8393,8 +8800,7 @@ void Sema::GatherGlobalCodeCompletions(
if (!CodeCompleter || CodeCompleter->includeMacros())
AddMacroResults(PP, Builder,
- CodeCompleter ? CodeCompleter->loadExternal() : false,
- true);
+ !CodeCompleter || CodeCompleter->loadExternal(), true);
Results.clear();
Results.insert(Results.end(), Builder.data(),
diff --git a/lib/Sema/SemaConsumer.cpp b/lib/Sema/SemaConsumer.cpp
index d83a13e2f175..02623be00c9c 100644
--- a/lib/Sema/SemaConsumer.cpp
+++ b/lib/Sema/SemaConsumer.cpp
@@ -1,9 +1,8 @@
//===-- SemaConsumer.cpp - Abstract interface for AST semantics -*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Sema/SemaCoroutine.cpp b/lib/Sema/SemaCoroutine.cpp
index 181efa6d3dd0..f0347af6a1bb 100644
--- a/lib/Sema/SemaCoroutine.cpp
+++ b/lib/Sema/SemaCoroutine.cpp
@@ -1,9 +1,8 @@
-//===--- SemaCoroutines.cpp - Semantic Analysis for Coroutines ------------===//
+//===-- SemaCoroutine.cpp - Semantic Analysis for Coroutines --------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -186,21 +185,8 @@ static QualType lookupCoroutineHandleType(Sema &S, QualType PromiseType,
static bool isValidCoroutineContext(Sema &S, SourceLocation Loc,
StringRef Keyword) {
- // 'co_await' and 'co_yield' are not permitted in unevaluated operands,
- // such as subexpressions of \c sizeof.
- //
- // [expr.await]p2, emphasis added: "An await-expression shall appear only in
- // a *potentially evaluated* expression within the compound-statement of a
- // function-body outside of a handler [...] A context within a function where
- // an await-expression can appear is called a suspension context of the
- // function." And per [expr.yield]p1: "A yield-expression shall appear only
- // within a suspension context of a function."
- if (S.isUnevaluatedContext()) {
- S.Diag(Loc, diag::err_coroutine_unevaluated_context) << Keyword;
- return false;
- }
-
- // Per [expr.await]p2, any other usage must be within a function.
+ // [expr.await]p2 dictates that 'co_await' and 'co_yield' must be used within
+ // a function body.
// FIXME: This also covers [expr.await]p2: "An await-expression shall not
// appear in a default argument." But the diagnostic QoI here could be
// improved to inform the user that default arguments specifically are not
@@ -218,12 +204,11 @@ static bool isValidCoroutineContext(Sema &S, SourceLocation Loc,
enum InvalidFuncDiag {
DiagCtor = 0,
DiagDtor,
- DiagCopyAssign,
- DiagMoveAssign,
DiagMain,
DiagConstexpr,
DiagAutoRet,
DiagVarargs,
+ DiagConsteval,
};
bool Diagnosed = false;
auto DiagInvalid = [&](InvalidFuncDiag ID) {
@@ -232,23 +217,15 @@ static bool isValidCoroutineContext(Sema &S, SourceLocation Loc,
return false;
};
- // Diagnose when a constructor, destructor, copy/move assignment operator,
+ // Diagnose when a constructor, destructor
// or the function 'main' are declared as a coroutine.
auto *MD = dyn_cast<CXXMethodDecl>(FD);
- // [class.ctor]p6: "A constructor shall not be a coroutine."
+ // [class.ctor]p11: "A constructor shall not be a coroutine."
if (MD && isa<CXXConstructorDecl>(MD))
return DiagInvalid(DiagCtor);
// [class.dtor]p17: "A destructor shall not be a coroutine."
else if (MD && isa<CXXDestructorDecl>(MD))
return DiagInvalid(DiagDtor);
- // N4499 [special]p6: "A special member function shall not be a coroutine."
- // Per C++ [special]p1, special member functions are the "default constructor,
- // copy constructor and copy assignment operator, move constructor and move
- // assignment operator, and destructor."
- else if (MD && MD->isCopyAssignmentOperator())
- return DiagInvalid(DiagCopyAssign);
- else if (MD && MD->isMoveAssignmentOperator())
- return DiagInvalid(DiagMoveAssign);
// [basic.start.main]p3: "The function main shall not be a coroutine."
else if (FD->isMain())
return DiagInvalid(DiagMain);
@@ -258,7 +235,7 @@ static bool isValidCoroutineContext(Sema &S, SourceLocation Loc,
// evaluation of e [...] would evaluate one of the following expressions:
// [...] an await-expression [...] a yield-expression."
if (FD->isConstexpr())
- DiagInvalid(DiagConstexpr);
+ DiagInvalid(FD->isConsteval() ? DiagConsteval : DiagConstexpr);
// [dcl.spec.auto]p15: "A function declared with a return type that uses a
// placeholder type shall not be a coroutine."
if (FD->getReturnType()->isUndeducedType())
@@ -326,7 +303,7 @@ static Expr *buildBuiltinCall(Sema &S, SourceLocation Loc, Builtin::ID Id,
assert(DeclRef.isUsable() && "Builtin reference cannot fail");
ExprResult Call =
- S.ActOnCallExpr(/*Scope=*/nullptr, DeclRef.get(), Loc, CallArgs, Loc);
+ S.BuildCallExpr(/*Scope=*/nullptr, DeclRef.get(), Loc, CallArgs, Loc);
assert(!Call.isInvalid() && "Call to builtin cannot fail!");
return Call.get();
@@ -356,7 +333,7 @@ static ExprResult buildCoroutineHandle(Sema &S, QualType PromiseType,
if (FromAddr.isInvalid())
return ExprError();
- return S.ActOnCallExpr(nullptr, FromAddr.get(), Loc, FramePtr, Loc);
+ return S.BuildCallExpr(nullptr, FromAddr.get(), Loc, FramePtr, Loc);
}
struct ReadySuspendResumeResult {
@@ -388,7 +365,7 @@ static ExprResult buildMemberCall(Sema &S, Expr *Base, SourceLocation Loc,
return ExprError();
}
- return S.ActOnCallExpr(nullptr, Result.get(), Loc, Args, Loc, nullptr);
+ return S.BuildCallExpr(nullptr, Result.get(), Loc, Args, Loc, nullptr);
}
// See if return type is coroutine-handle and if so, invoke builtin coro-resume
@@ -669,12 +646,57 @@ bool Sema::ActOnCoroutineBodyStart(Scope *SC, SourceLocation KWLoc,
return true;
}
+// Recursively walks up the scope hierarchy until either a 'catch' or a function
+// scope is found, whichever comes first.
+static bool isWithinCatchScope(Scope *S) {
+ // 'co_await' and 'co_yield' keywords are disallowed within catch blocks, but
+ // lambdas that use 'co_await' are allowed. The loop below ends when a
+ // function scope is found in order to ensure the following behavior:
+ //
+ // void foo() { // <- function scope
+ // try { //
+ // co_await x; // <- 'co_await' is OK within a function scope
+ // } catch { // <- catch scope
+ // co_await x; // <- 'co_await' is not OK within a catch scope
+ // []() { // <- function scope
+ // co_await x; // <- 'co_await' is OK within a function scope
+ // }();
+ // }
+ // }
+ while (S && !(S->getFlags() & Scope::FnScope)) {
+ if (S->getFlags() & Scope::CatchScope)
+ return true;
+ S = S->getParent();
+ }
+ return false;
+}
+
+// [expr.await]p2, emphasis added: "An await-expression shall appear only in
+// a *potentially evaluated* expression within the compound-statement of a
+// function-body *outside of a handler* [...] A context within a function
+// where an await-expression can appear is called a suspension context of the
+// function."
+static void checkSuspensionContext(Sema &S, SourceLocation Loc,
+ StringRef Keyword) {
+ // First emphasis of [expr.await]p2: must be a potentially evaluated context.
+ // That is, 'co_await' and 'co_yield' cannot appear in subexpressions of
+ // \c sizeof.
+ if (S.isUnevaluatedContext())
+ S.Diag(Loc, diag::err_coroutine_unevaluated_context) << Keyword;
+
+ // Second emphasis of [expr.await]p2: must be outside of an exception handler.
+ if (isWithinCatchScope(S.getCurScope()))
+ S.Diag(Loc, diag::err_coroutine_within_handler) << Keyword;
+}
+
ExprResult Sema::ActOnCoawaitExpr(Scope *S, SourceLocation Loc, Expr *E) {
if (!ActOnCoroutineBodyStart(S, Loc, "co_await")) {
CorrectDelayedTyposInExpr(E);
return ExprError();
}
+ checkSuspensionContext(*this, Loc, "co_await");
+
if (E->getType()->isPlaceholderType()) {
ExprResult R = CheckPlaceholderExpr(E);
if (R.isInvalid()) return ExprError();
@@ -772,6 +794,8 @@ ExprResult Sema::ActOnCoyieldExpr(Scope *S, SourceLocation Loc, Expr *E) {
return ExprError();
}
+ checkSuspensionContext(*this, Loc, "co_yield");
+
// Build yield_value call.
ExprResult Awaitable = buildPromiseCall(
*this, getCurFunction()->CoroutinePromise, Loc, "yield_value", E);
@@ -1072,7 +1096,7 @@ bool CoroutineStmtBuilder::makeReturnOnAllocFailure() {
return false;
ExprResult ReturnObjectOnAllocationFailure =
- S.ActOnCallExpr(nullptr, DeclNameExpr.get(), Loc, {}, Loc);
+ S.BuildCallExpr(nullptr, DeclNameExpr.get(), Loc, {}, Loc);
if (ReturnObjectOnAllocationFailure.isInvalid())
return false;
@@ -1235,7 +1259,7 @@ bool CoroutineStmtBuilder::makeNewAndDeleteExpr() {
NewArgs.push_back(Arg);
ExprResult NewExpr =
- S.ActOnCallExpr(S.getCurScope(), NewRef.get(), Loc, NewArgs, Loc);
+ S.BuildCallExpr(S.getCurScope(), NewRef.get(), Loc, NewArgs, Loc);
NewExpr = S.ActOnFinishFullExpr(NewExpr.get(), /*DiscardedValue*/ false);
if (NewExpr.isInvalid())
return false;
@@ -1261,7 +1285,7 @@ bool CoroutineStmtBuilder::makeNewAndDeleteExpr() {
DeleteArgs.push_back(FrameSize);
ExprResult DeleteExpr =
- S.ActOnCallExpr(S.getCurScope(), DeleteRef.get(), Loc, DeleteArgs, Loc);
+ S.BuildCallExpr(S.getCurScope(), DeleteRef.get(), Loc, DeleteArgs, Loc);
DeleteExpr =
S.ActOnFinishFullExpr(DeleteExpr.get(), /*DiscardedValue*/ false);
if (DeleteExpr.isInvalid())
diff --git a/lib/Sema/SemaDecl.cpp b/lib/Sema/SemaDecl.cpp
index 23c99d45a78d..a6c52b7d4b2b 100644
--- a/lib/Sema/SemaDecl.cpp
+++ b/lib/Sema/SemaDecl.cpp
@@ -1,9 +1,8 @@
//===--- SemaDecl.cpp - Semantic Analysis for Declarations ----------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -23,6 +22,7 @@
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/EvaluatedExprVisitor.h"
#include "clang/AST/ExprCXX.h"
+#include "clang/AST/NonTrivialTypeVisitor.h"
#include "clang/AST/StmtCXX.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/PartialDiagnostic.h"
@@ -62,7 +62,7 @@ Sema::DeclGroupPtrTy Sema::ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType) {
namespace {
-class TypeNameValidatorCCC : public CorrectionCandidateCallback {
+class TypeNameValidatorCCC final : public CorrectionCandidateCallback {
public:
TypeNameValidatorCCC(bool AllowInvalid, bool WantClass = false,
bool AllowTemplates = false,
@@ -106,6 +106,10 @@ class TypeNameValidatorCCC : public CorrectionCandidateCallback {
return !WantClassName && candidate.isKeyword();
}
+ std::unique_ptr<CorrectionCandidateCallback> clone() override {
+ return llvm::make_unique<TypeNameValidatorCCC>(*this);
+ }
+
private:
bool AllowInvalidDecl;
bool WantClassName;
@@ -368,11 +372,10 @@ ParsedType Sema::getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
case LookupResult::NotFound:
case LookupResult::NotFoundInCurrentInstantiation:
if (CorrectedII) {
- TypoCorrection Correction =
- CorrectTypo(Result.getLookupNameInfo(), Kind, S, SS,
- llvm::make_unique<TypeNameValidatorCCC>(
- true, isClassName, AllowDeducedTemplate),
- CTK_ErrorRecovery);
+ TypeNameValidatorCCC CCC(/*AllowInvalid=*/true, isClassName,
+ AllowDeducedTemplate);
+ TypoCorrection Correction = CorrectTypo(Result.getLookupNameInfo(), Kind,
+ S, SS, CCC, CTK_ErrorRecovery);
IdentifierInfo *NewII = Correction.getCorrectionAsIdentifierInfo();
TemplateTy Template;
bool MemberOfUnknownSpecialization;
@@ -665,11 +668,12 @@ void Sema::DiagnoseUnknownTypeName(IdentifierInfo *&II,
// There may have been a typo in the name of the type. Look up typo
// results, in case we have something that we can suggest.
+ TypeNameValidatorCCC CCC(/*AllowInvalid=*/false, /*WantClass=*/false,
+ /*AllowTemplates=*/IsTemplateName,
+ /*AllowNonTemplates=*/!IsTemplateName);
if (TypoCorrection Corrected =
CorrectTypo(DeclarationNameInfo(II, IILoc), LookupOrdinaryName, S, SS,
- llvm::make_unique<TypeNameValidatorCCC>(
- false, false, IsTemplateName, !IsTemplateName),
- CTK_ErrorRecovery)) {
+ CCC, CTK_ErrorRecovery)) {
// FIXME: Support error recovery for the template-name case.
bool CanRecover = !IsTemplateName;
if (Corrected.isKeyword()) {
@@ -712,7 +716,7 @@ void Sema::DiagnoseUnknownTypeName(IdentifierInfo *&II,
getTypeName(*Corrected.getCorrectionAsIdentifierInfo(), IILoc, S,
tmpSS.isSet() ? &tmpSS : SS, false, false, nullptr,
/*IsCtorOrDtorName=*/false,
- /*NonTrivialTypeSourceInfo=*/true);
+ /*WantNontrivialTypeSourceInfo=*/true);
}
return;
}
@@ -844,8 +848,7 @@ static ParsedType buildNestedType(Sema &S, CXXScopeSpec &SS,
Sema::NameClassification
Sema::ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name,
SourceLocation NameLoc, const Token &NextToken,
- bool IsAddressOfOperand,
- std::unique_ptr<CorrectionCandidateCallback> CCC) {
+ bool IsAddressOfOperand, CorrectionCandidateCallback *CCC) {
DeclarationNameInfo NameInfo(Name, NameLoc);
ObjCMethodDecl *CurMethod = getCurMethodDecl();
@@ -915,6 +918,16 @@ Corrected:
}
}
+ if (getLangOpts().CPlusPlus2a && !SS.isSet() && NextToken.is(tok::less)) {
+ // In C++20 onwards, this could be an ADL-only call to a function
+ // template, and we're required to assume that this is a template name.
+ //
+ // FIXME: Find a way to still do typo correction in this case.
+ TemplateName Template =
+ Context.getAssumedTemplateName(NameInfo.getName());
+ return NameClassification::UndeclaredTemplate(Template);
+ }
+
// In C, we first see whether there is a tag type by the same name, in
// which case it's likely that the user just forgot to write "enum",
// "struct", or "union".
@@ -927,10 +940,9 @@ Corrected:
// close to this name.
if (!SecondTry && CCC) {
SecondTry = true;
- if (TypoCorrection Corrected = CorrectTypo(Result.getLookupNameInfo(),
- Result.getLookupKind(), S,
- &SS, std::move(CCC),
- CTK_ErrorRecovery)) {
+ if (TypoCorrection Corrected =
+ CorrectTypo(Result.getLookupNameInfo(), Result.getLookupKind(), S,
+ &SS, *CCC, CTK_ErrorRecovery)) {
unsigned UnqualifiedDiag = diag::err_undeclared_var_use_suggest;
unsigned QualifiedDiag = diag::err_no_member_suggest;
@@ -1018,7 +1030,8 @@ Corrected:
case LookupResult::Ambiguous:
if (getLangOpts().CPlusPlus && NextToken.is(tok::less) &&
- hasAnyAcceptableTemplateNames(Result)) {
+ hasAnyAcceptableTemplateNames(Result, /*AllowFunctionTemplates=*/true,
+ /*AllowDependent=*/false)) {
// C++ [temp.local]p3:
// A lookup that finds an injected-class-name (10.2) can result in an
// ambiguity in certain cases (for example, if it is found in more than
@@ -1042,50 +1055,63 @@ Corrected:
}
if (getLangOpts().CPlusPlus && NextToken.is(tok::less) &&
- (IsFilteredTemplateName || hasAnyAcceptableTemplateNames(Result))) {
+ (IsFilteredTemplateName ||
+ hasAnyAcceptableTemplateNames(
+ Result, /*AllowFunctionTemplates=*/true,
+ /*AllowDependent=*/false,
+ /*AllowNonTemplateFunctions*/ !SS.isSet() &&
+ getLangOpts().CPlusPlus2a))) {
// C++ [temp.names]p3:
// After name lookup (3.4) finds that a name is a template-name or that
// an operator-function-id or a literal- operator-id refers to a set of
// overloaded functions any member of which is a function template if
// this is followed by a <, the < is always taken as the delimiter of a
// template-argument-list and never as the less-than operator.
+ // C++2a [temp.names]p2:
+ // A name is also considered to refer to a template if it is an
+ // unqualified-id followed by a < and name lookup finds either one
+ // or more functions or finds nothing.
if (!IsFilteredTemplateName)
FilterAcceptableTemplateNames(Result);
- if (!Result.empty()) {
- bool IsFunctionTemplate;
- bool IsVarTemplate;
- TemplateName Template;
- if (Result.end() - Result.begin() > 1) {
- IsFunctionTemplate = true;
- Template = Context.getOverloadedTemplateName(Result.begin(),
- Result.end());
- } else {
- TemplateDecl *TD
- = cast<TemplateDecl>((*Result.begin())->getUnderlyingDecl());
- IsFunctionTemplate = isa<FunctionTemplateDecl>(TD);
- IsVarTemplate = isa<VarTemplateDecl>(TD);
-
- if (SS.isSet() && !SS.isInvalid())
- Template = Context.getQualifiedTemplateName(SS.getScopeRep(),
- /*TemplateKeyword=*/false,
- TD);
- else
- Template = TemplateName(TD);
- }
-
- if (IsFunctionTemplate) {
- // Function templates always go through overload resolution, at which
- // point we'll perform the various checks (e.g., accessibility) we need
- // to based on which function we selected.
- Result.suppressDiagnostics();
+ bool IsFunctionTemplate;
+ bool IsVarTemplate;
+ TemplateName Template;
+ if (Result.end() - Result.begin() > 1) {
+ IsFunctionTemplate = true;
+ Template = Context.getOverloadedTemplateName(Result.begin(),
+ Result.end());
+ } else if (!Result.empty()) {
+ auto *TD = cast<TemplateDecl>(getAsTemplateNameDecl(
+ *Result.begin(), /*AllowFunctionTemplates=*/true,
+ /*AllowDependent=*/false));
+ IsFunctionTemplate = isa<FunctionTemplateDecl>(TD);
+ IsVarTemplate = isa<VarTemplateDecl>(TD);
+
+ if (SS.isSet() && !SS.isInvalid())
+ Template =
+ Context.getQualifiedTemplateName(SS.getScopeRep(),
+ /*TemplateKeyword=*/false, TD);
+ else
+ Template = TemplateName(TD);
+ } else {
+ // All results were non-template functions. This is a function template
+ // name.
+ IsFunctionTemplate = true;
+ Template = Context.getAssumedTemplateName(NameInfo.getName());
+ }
- return NameClassification::FunctionTemplate(Template);
- }
+ if (IsFunctionTemplate) {
+ // Function templates always go through overload resolution, at which
+ // point we'll perform the various checks (e.g., accessibility) we need
+ // to based on which function we selected.
+ Result.suppressDiagnostics();
- return IsVarTemplate ? NameClassification::VarTemplate(Template)
- : NameClassification::TypeTemplate(Template);
+ return NameClassification::FunctionTemplate(Template);
}
+
+ return IsVarTemplate ? NameClassification::VarTemplate(Template)
+ : NameClassification::TypeTemplate(Template);
}
NamedDecl *FirstDecl = (*Result.begin())->getUnderlyingDecl();
@@ -1164,6 +1190,8 @@ Sema::getTemplateNameKindForDiagnostics(TemplateName Name) {
return TemplateNameKindForDiagnostics::AliasTemplate;
if (isa<TemplateTemplateParmDecl>(TD))
return TemplateNameKindForDiagnostics::TemplateTemplateParam;
+ if (isa<ConceptDecl>(TD))
+ return TemplateNameKindForDiagnostics::Concept;
return TemplateNameKindForDiagnostics::DependentTemplate;
}
@@ -1399,11 +1427,6 @@ void Sema::PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext) {
}
}
-void Sema::pushExternalDeclIntoScope(NamedDecl *D, DeclarationName Name) {
- if (IdResolver.tryAddTopLevelDecl(D, Name) && TUScope)
- TUScope->AddDecl(D);
-}
-
bool Sema::isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S,
bool AllowInlineNamespace) {
return IdResolver.isDeclInScope(D, Ctx, S, AllowInlineNamespace);
@@ -1461,12 +1484,17 @@ bool Sema::CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old) {
Module *NewM = New->getOwningModule();
Module *OldM = Old->getOwningModule();
+
+ if (NewM && NewM->Kind == Module::PrivateModuleFragment)
+ NewM = NewM->Parent;
+ if (OldM && OldM->Kind == Module::PrivateModuleFragment)
+ OldM = OldM->Parent;
+
if (NewM == OldM)
return false;
- // FIXME: Check proclaimed-ownership-declarations here too.
- bool NewIsModuleInterface = NewM && NewM->Kind == Module::ModuleInterfaceUnit;
- bool OldIsModuleInterface = OldM && OldM->Kind == Module::ModuleInterfaceUnit;
+ bool NewIsModuleInterface = NewM && NewM->isModulePurview();
+ bool OldIsModuleInterface = OldM && OldM->isModulePurview();
if (NewIsModuleInterface || OldIsModuleInterface) {
// C++ Modules TS [basic.def.odr] 6.2/6.7 [sic]:
// if a declaration of D [...] appears in the purview of a module, all
@@ -1862,10 +1890,10 @@ ObjCInterfaceDecl *Sema::getObjCInterfaceDecl(IdentifierInfo *&Id,
if (!IDecl && DoTypoCorrection) {
// Perform typo correction at the given location, but only if we
// find an Objective-C class name.
- if (TypoCorrection C = CorrectTypo(
- DeclarationNameInfo(Id, IdLoc), LookupOrdinaryName, TUScope, nullptr,
- llvm::make_unique<DeclFilterCCC<ObjCInterfaceDecl>>(),
- CTK_ErrorRecovery)) {
+ DeclFilterCCC<ObjCInterfaceDecl> CCC{};
+ if (TypoCorrection C =
+ CorrectTypo(DeclarationNameInfo(Id, IdLoc), LookupOrdinaryName,
+ TUScope, nullptr, CCC, CTK_ErrorRecovery)) {
diagnoseTypo(C, PDiag(diag::err_undef_interface_suggest) << Id);
IDecl = C.getCorrectionDeclAs<ObjCInterfaceDecl>();
Id = IDecl->getIdentifier();
@@ -1927,10 +1955,13 @@ static void LookupPredefedObjCSuperType(Sema &ThisSema, Scope *S,
Context.setObjCSuperType(Context.getTagDeclType(TD));
}
-static StringRef getHeaderName(ASTContext::GetBuiltinTypeError Error) {
+static StringRef getHeaderName(Builtin::Context &BuiltinInfo, unsigned ID,
+ ASTContext::GetBuiltinTypeError Error) {
switch (Error) {
case ASTContext::GE_None:
return "";
+ case ASTContext::GE_Missing_type:
+ return BuiltinInfo.getHeaderName(ID);
case ASTContext::GE_Missing_stdio:
return "stdio.h";
case ASTContext::GE_Missing_setjmp:
@@ -1955,7 +1986,8 @@ NamedDecl *Sema::LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
if (Error) {
if (ForRedeclaration)
Diag(Loc, diag::warn_implicit_decl_requires_sysheader)
- << getHeaderName(Error) << Context.BuiltinInfo.getName(ID);
+ << getHeaderName(Context.BuiltinInfo, ID, Error)
+ << Context.BuiltinInfo.getName(ID);
return nullptr;
}
@@ -2427,13 +2459,11 @@ static bool mergeDeclAttribute(Sema &S, NamedDecl *D,
InheritableAttr *NewAttr = nullptr;
unsigned AttrSpellingListIndex = Attr->getSpellingListIndex();
if (const auto *AA = dyn_cast<AvailabilityAttr>(Attr))
- NewAttr = S.mergeAvailabilityAttr(D, AA->getRange(), AA->getPlatform(),
- AA->isImplicit(), AA->getIntroduced(),
- AA->getDeprecated(),
- AA->getObsoleted(), AA->getUnavailable(),
- AA->getMessage(), AA->getStrict(),
- AA->getReplacement(), AMK,
- AttrSpellingListIndex);
+ NewAttr = S.mergeAvailabilityAttr(
+ D, AA->getRange(), AA->getPlatform(), AA->isImplicit(),
+ AA->getIntroduced(), AA->getDeprecated(), AA->getObsoleted(),
+ AA->getUnavailable(), AA->getMessage(), AA->getStrict(),
+ AA->getReplacement(), AMK, AA->getPriority(), AttrSpellingListIndex);
else if (const auto *VA = dyn_cast<VisibilityAttr>(Attr))
NewAttr = S.mergeVisibilityAttr(D, VA->getRange(), VA->getVisibility(),
AttrSpellingListIndex);
@@ -2489,6 +2519,10 @@ static bool mergeDeclAttribute(Sema &S, NamedDecl *D,
else if (const auto *UA = dyn_cast<UuidAttr>(Attr))
NewAttr = S.mergeUuidAttr(D, UA->getRange(), AttrSpellingListIndex,
UA->getGuid());
+ else if (const auto *SLHA = dyn_cast<SpeculativeLoadHardeningAttr>(Attr))
+ NewAttr = S.mergeSpeculativeLoadHardeningAttr(D, *SLHA);
+ else if (const auto *SLHA = dyn_cast<NoSpeculativeLoadHardeningAttr>(Attr))
+ NewAttr = S.mergeNoSpeculativeLoadHardeningAttr(D, *SLHA);
else if (Attr->shouldInheritEvenIfAlreadyPresent() || !DeclHasAttr(D, Attr))
NewAttr = cast<InheritableAttr>(Attr->clone(S.Context));
@@ -2926,7 +2960,8 @@ static bool hasIdenticalPassObjectSizeAttrs(const FunctionDecl *A,
const auto *AttrB = B->getAttr<PassObjectSizeAttr>();
if (AttrA == AttrB)
return true;
- return AttrA && AttrB && AttrA->getType() == AttrB->getType();
+ return AttrA && AttrB && AttrA->getType() == AttrB->getType() &&
+ AttrA->isDynamic() == AttrB->isDynamic();
};
return std::equal(A->param_begin(), A->param_end(), B->param_begin(), AttrEq);
@@ -3126,6 +3161,15 @@ bool Sema::MergeFunctionDecl(FunctionDecl *New, NamedDecl *&OldD,
// there but not here.
NewTypeInfo = NewTypeInfo.withCallingConv(OldTypeInfo.getCC());
RequiresAdjustment = true;
+ } else if (New->getBuiltinID()) {
+ // Calling Conventions on a Builtin aren't really useful and setting a
+ // default calling convention and cdecl'ing some builtin redeclarations is
+ // common, so warn and ignore the calling convention on the redeclaration.
+ Diag(New->getLocation(), diag::warn_cconv_unsupported)
+ << FunctionType::getNameForCallConv(NewTypeInfo.getCC())
+ << (int)CallingConventionIgnoredReason::BuiltinFunction;
+ NewTypeInfo = NewTypeInfo.withCallingConv(OldTypeInfo.getCC());
+ RequiresAdjustment = true;
} else {
// Calling conventions aren't compatible, so complain.
bool FirstCCExplicit = getCallingConvAttributedType(First->getType());
@@ -3194,7 +3238,6 @@ bool Sema::MergeFunctionDecl(FunctionDecl *New, NamedDecl *&OldD,
AdjustedType = Context.adjustFunctionType(AdjustedType, NewTypeInfo);
New->setType(QualType(AdjustedType, 0));
NewQType = Context.getCanonicalType(New->getType());
- NewType = cast<FunctionType>(NewQType);
}
// If this redeclaration makes the function inline, we may need to add it to
@@ -4251,14 +4294,18 @@ Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
Diag(DS.getInlineSpecLoc(), diag::err_inline_non_function)
<< getLangOpts().CPlusPlus17;
- if (DS.isConstexprSpecified()) {
+ if (DS.hasConstexprSpecifier()) {
// C++0x [dcl.constexpr]p1: constexpr can only be applied to declarations
// and definitions of functions and variables.
+ // C++2a [dcl.constexpr]p1: The consteval specifier shall be applied only to
+ // the declaration of a function or function template
+ bool IsConsteval = DS.getConstexprSpecifier() == CSK_consteval;
if (Tag)
Diag(DS.getConstexprSpecLoc(), diag::err_constexpr_tag)
- << GetDiagnosticTypeSpecifierID(DS.getTypeSpecType());
+ << GetDiagnosticTypeSpecifierID(DS.getTypeSpecType()) << IsConsteval;
else
- Diag(DS.getConstexprSpecLoc(), diag::err_constexpr_no_declarators);
+ Diag(DS.getConstexprSpecLoc(), diag::err_constexpr_wrong_decl_kind)
+ << IsConsteval;
// Don't emit warnings after this error.
return TagD;
}
@@ -4796,6 +4843,18 @@ Decl *Sema::BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
Invalid = true;
}
+ // C++ [dcl.dcl]p3:
+ // [If there are no declarators], and except for the declaration of an
+ // unnamed bit-field, the decl-specifier-seq shall introduce one or more
+ // names into the program
+ // C++ [class.mem]p2:
+ // each such member-declaration shall either declare at least one member
+ // name of the class or declare at least one unnamed bit-field
+ //
+ // For C this is an error even for a named struct, and is diagnosed elsewhere.
+ if (getLangOpts().CPlusPlus && Record->field_empty())
+ Diag(DS.getBeginLoc(), diag::ext_no_declarators) << DS.getSourceRange();
+
// Mock up a declarator.
Declarator Dc(DS, DeclaratorContext::MemberContext);
TypeSourceInfo *TInfo = GetTypeForDeclarator(Dc, S);
@@ -5082,7 +5141,7 @@ static bool hasSimilarParameters(ASTContext &Context,
QualType DefParamTy = Definition->getParamDecl(Idx)->getType();
// The parameter types are identical
- if (Context.hasSameType(DefParamTy, DeclParamTy))
+ if (Context.hasSameUnqualifiedType(DefParamTy, DeclParamTy))
continue;
QualType DeclParamBaseTy = getCoreType(DeclParamTy);
@@ -5672,7 +5731,7 @@ void Sema::DiagnoseFunctionSpecifiers(const DeclSpec &DS) {
Diag(DS.getVirtualSpecLoc(),
diag::err_virtual_non_function);
- if (DS.isExplicitSpecified())
+ if (DS.hasExplicitSpecifier())
Diag(DS.getExplicitSpecLoc(),
diag::err_explicit_non_function);
@@ -5699,9 +5758,9 @@ Sema::ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
if (D.getDeclSpec().isInlineSpecified())
Diag(D.getDeclSpec().getInlineSpecLoc(), diag::err_inline_non_function)
<< getLangOpts().CPlusPlus17;
- if (D.getDeclSpec().isConstexprSpecified())
+ if (D.getDeclSpec().hasConstexprSpecifier())
Diag(D.getDeclSpec().getConstexprSpecLoc(), diag::err_invalid_constexpr)
- << 1;
+ << 1 << (D.getDeclSpec().getConstexprSpecifier() == CSK_consteval);
if (D.getName().Kind != UnqualifiedIdKind::IK_Identifier) {
if (D.getName().Kind == UnqualifiedIdKind::IK_DeductionGuideName)
@@ -5955,10 +6014,24 @@ static void checkAttributesAfterMerging(Sema &S, NamedDecl &ND) {
}
if (const InheritableAttr *Attr = getDLLAttr(&ND)) {
+ auto *VD = dyn_cast<VarDecl>(&ND);
+ bool IsAnonymousNS = false;
+ bool IsMicrosoft = S.Context.getTargetInfo().getCXXABI().isMicrosoft();
+ if (VD) {
+ const NamespaceDecl *NS = dyn_cast<NamespaceDecl>(VD->getDeclContext());
+ while (NS && !IsAnonymousNS) {
+ IsAnonymousNS = NS->isAnonymousNamespace();
+ NS = dyn_cast<NamespaceDecl>(NS->getParent());
+ }
+ }
// dll attributes require external linkage. Static locals may have external
// linkage but still cannot be explicitly imported or exported.
- auto *VD = dyn_cast<VarDecl>(&ND);
- if (!ND.isExternallyVisible() || (VD && VD->isStaticLocal())) {
+ // In Microsoft mode, a variable defined in anonymous namespace must have
+ // external linkage in order to be exported.
+ bool AnonNSInMicrosoftMode = IsAnonymousNS && IsMicrosoft;
+ if ((ND.isExternallyVisible() && AnonNSInMicrosoftMode) ||
+ (!AnonNSInMicrosoftMode &&
+ (!ND.isExternallyVisible() || (VD && VD->isStaticLocal())))) {
S.Diag(ND.getLocation(), diag::err_attribute_dll_not_extern)
<< &ND << Attr;
ND.setInvalidDecl();
@@ -6187,7 +6260,8 @@ static bool isIncompleteDeclExternC(Sema &S, const T *D) {
static bool shouldConsiderLinkage(const VarDecl *VD) {
const DeclContext *DC = VD->getDeclContext()->getRedeclContext();
- if (DC->isFunctionOrMethod() || isa<OMPDeclareReductionDecl>(DC))
+ if (DC->isFunctionOrMethod() || isa<OMPDeclareReductionDecl>(DC) ||
+ isa<OMPDeclareMapperDecl>(DC))
return VD->hasExternalStorage();
if (DC->isFileContext())
return true;
@@ -6199,7 +6273,7 @@ static bool shouldConsiderLinkage(const VarDecl *VD) {
static bool shouldConsiderLinkage(const FunctionDecl *FD) {
const DeclContext *DC = FD->getDeclContext()->getRedeclContext();
if (DC->isFileContext() || DC->isFunctionOrMethod() ||
- isa<OMPDeclareReductionDecl>(DC))
+ isa<OMPDeclareReductionDecl>(DC) || isa<OMPDeclareMapperDecl>(DC))
return true;
if (DC->isRecord())
return false;
@@ -6354,8 +6428,8 @@ NamedDecl *Sema::ActOnVariableDeclarator(
}
}
- // OpenCL C++ 1.0 s2.9: the thread_local storage qualifier is not
- // supported. OpenCL C does not support thread_local either, and
+ // C++ for OpenCL does not allow the thread_local storage qualifier.
+ // OpenCL C does not support thread_local either, and
// also reject all other thread storage class specifiers.
DeclSpec::TSCS TSC = D.getDeclSpec().getThreadStorageClassSpec();
if (TSC != TSCS_unspecified) {
@@ -6431,6 +6505,11 @@ NamedDecl *Sema::ActOnVariableDeclarator(
if (D.isInvalidType())
NewVD->setInvalidDecl();
+
+ if (NewVD->getType().hasNonTrivialToPrimitiveDestructCUnion() &&
+ NewVD->hasLocalStorage())
+ checkNonTrivialCUnion(NewVD->getType(), NewVD->getLocation(),
+ NTCUC_AutoVar, NTCUK_Destruct);
} else {
bool Invalid = false;
@@ -6580,13 +6659,17 @@ NamedDecl *Sema::ActOnVariableDeclarator(
NewVD->setTemplateParameterListsInfo(
Context, TemplateParamLists.drop_back(VDTemplateParamLists));
- if (D.getDeclSpec().isConstexprSpecified()) {
+ if (D.getDeclSpec().hasConstexprSpecifier()) {
NewVD->setConstexpr(true);
// C++1z [dcl.spec.constexpr]p1:
// A static data member declared with the constexpr specifier is
// implicitly an inline variable.
if (NewVD->isStaticDataMember() && getLangOpts().CPlusPlus17)
NewVD->setImplicitlyInline();
+ if (D.getDeclSpec().getConstexprSpecifier() == CSK_consteval)
+ Diag(D.getDeclSpec().getConstexprSpecLoc(),
+ diag::err_constexpr_wrong_decl_kind)
+ << /*consteval*/ 1;
}
}
@@ -7352,9 +7435,8 @@ void Sema::CheckVariableDeclarationType(VarDecl *NewVD) {
// OpenCL C v2.0 s6.5.1 - Variables defined at program scope and static
// variables inside a function can also be declared in the global
// address space.
- // OpenCL C++ v1.0 s2.5 inherits rule from OpenCL C v2.0 and allows local
- // address space additionally.
- // FIXME: Add local AS for OpenCL C++.
+ // C++ for OpenCL inherits rule from OpenCL C v2.0.
+ // FIXME: Adding local AS in C++ for OpenCL might make sense.
if (NewVD->isFileVarDecl() || NewVD->isStaticLocal() ||
NewVD->hasExternalStorage()) {
if (!T->isSamplerT() &&
@@ -7408,7 +7490,10 @@ void Sema::CheckVariableDeclarationType(VarDecl *NewVD) {
return;
}
}
- } else if (T.getAddressSpace() != LangAS::opencl_private) {
+ } else if (T.getAddressSpace() != LangAS::opencl_private &&
+ // If we are parsing a template we didn't deduce an addr
+ // space yet.
+ T.getAddressSpace() != LangAS::Default) {
// Do not allow other address spaces on automatic variable.
Diag(NewVD->getLocation(), diag::err_as_qualified_auto_decl) << 1;
NewVD->setInvalidDecl();
@@ -7654,7 +7739,7 @@ namespace {
// Callback to only accept typo corrections that have a non-zero edit distance.
// Also only accept corrections that have the same parent decl.
-class DifferentNameValidatorCCC : public CorrectionCandidateCallback {
+class DifferentNameValidatorCCC final : public CorrectionCandidateCallback {
public:
DifferentNameValidatorCCC(ASTContext &Context, FunctionDecl *TypoFD,
CXXRecordDecl *Parent)
@@ -7686,6 +7771,10 @@ class DifferentNameValidatorCCC : public CorrectionCandidateCallback {
return false;
}
+ std::unique_ptr<CorrectionCandidateCallback> clone() override {
+ return llvm::make_unique<DifferentNameValidatorCCC>(*this);
+ }
+
private:
ASTContext &Context;
FunctionDecl *OriginalFD;
@@ -7733,6 +7822,8 @@ static NamedDecl *DiagnoseInvalidRedeclaration(
assert(!Prev.isAmbiguous() &&
"Cannot have an ambiguity in previous-declaration lookup");
CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(NewFD);
+ DifferentNameValidatorCCC CCC(SemaRef.Context, NewFD,
+ MD ? MD->getParent() : nullptr);
if (!Prev.empty()) {
for (LookupResult::iterator Func = Prev.begin(), FuncEnd = Prev.end();
Func != FuncEnd; ++Func) {
@@ -7749,10 +7840,8 @@ static NamedDecl *DiagnoseInvalidRedeclaration(
// If the qualified name lookup yielded nothing, try typo correction
} else if ((Correction = SemaRef.CorrectTypo(
Prev.getLookupNameInfo(), Prev.getLookupKind(), S,
- &ExtraArgs.D.getCXXScopeSpec(),
- llvm::make_unique<DifferentNameValidatorCCC>(
- SemaRef.Context, NewFD, MD ? MD->getParent() : nullptr),
- Sema::CTK_ErrorRecovery, IsLocalFriend ? nullptr : NewDC))) {
+ &ExtraArgs.D.getCXXScopeSpec(), CCC, Sema::CTK_ErrorRecovery,
+ IsLocalFriend ? nullptr : NewDC))) {
// Set up everything for the call to ActOnFunctionDeclarator
ExtraArgs.D.SetIdentifier(Correction.getCorrectionAsIdentifierInfo(),
ExtraArgs.D.getIdentifierLoc());
@@ -7910,16 +7999,16 @@ static FunctionDecl* CreateNewFunctionDecl(Sema &SemaRef, Declarator &D,
(!R->getAsAdjusted<FunctionType>() && R->isFunctionProtoType());
NewFD = FunctionDecl::Create(SemaRef.Context, DC, D.getBeginLoc(), NameInfo,
- R, TInfo, SC, isInline, HasPrototype, false);
+ R, TInfo, SC, isInline, HasPrototype,
+ CSK_unspecified);
if (D.isInvalidType())
NewFD->setInvalidDecl();
return NewFD;
}
- bool isExplicit = D.getDeclSpec().isExplicitSpecified();
- bool isConstexpr = D.getDeclSpec().isConstexprSpecified();
-
+ ExplicitSpecifier ExplicitSpecifier = D.getDeclSpec().getExplicitSpecifier();
+ ConstexprSpecKind ConstexprKind = D.getDeclSpec().getConstexprSpecifier();
// Check that the return type is not an abstract class type.
// For record types, this is done by the AbstractClassUsageDiagnoser once
// the class has been completely parsed.
@@ -7937,8 +8026,8 @@ static FunctionDecl* CreateNewFunctionDecl(Sema &SemaRef, Declarator &D,
R = SemaRef.CheckConstructorDeclarator(D, R, SC);
return CXXConstructorDecl::Create(
SemaRef.Context, cast<CXXRecordDecl>(DC), D.getBeginLoc(), NameInfo, R,
- TInfo, isExplicit, isInline,
- /*isImplicitlyDeclared=*/false, isConstexpr);
+ TInfo, ExplicitSpecifier, isInline,
+ /*isImplicitlyDeclared=*/false, ConstexprKind);
} else if (Name.getNameKind() == DeclarationName::CXXDestructorName) {
// This is a C++ destructor declaration.
@@ -7968,7 +8057,7 @@ static FunctionDecl* CreateNewFunctionDecl(Sema &SemaRef, Declarator &D,
return FunctionDecl::Create(SemaRef.Context, DC, D.getBeginLoc(),
D.getIdentifierLoc(), Name, R, TInfo, SC,
isInline,
- /*hasPrototype=*/true, isConstexpr);
+ /*hasPrototype=*/true, ConstexprKind);
}
} else if (Name.getNameKind() == DeclarationName::CXXConversionFunctionName) {
@@ -7982,13 +8071,13 @@ static FunctionDecl* CreateNewFunctionDecl(Sema &SemaRef, Declarator &D,
IsVirtualOkay = true;
return CXXConversionDecl::Create(
SemaRef.Context, cast<CXXRecordDecl>(DC), D.getBeginLoc(), NameInfo, R,
- TInfo, isInline, isExplicit, isConstexpr, SourceLocation());
+ TInfo, isInline, ExplicitSpecifier, ConstexprKind, SourceLocation());
} else if (Name.getNameKind() == DeclarationName::CXXDeductionGuideName) {
SemaRef.CheckDeductionGuideDeclarator(D, R, SC);
return CXXDeductionGuideDecl::Create(SemaRef.Context, DC, D.getBeginLoc(),
- isExplicit, NameInfo, R, TInfo,
+ ExplicitSpecifier, NameInfo, R, TInfo,
D.getEndLoc());
} else if (DC->isRecord()) {
// If the name of the function is the same as the name of the record,
@@ -8006,7 +8095,7 @@ static FunctionDecl* CreateNewFunctionDecl(Sema &SemaRef, Declarator &D,
// This is a C++ method declaration.
CXXMethodDecl *Ret = CXXMethodDecl::Create(
SemaRef.Context, cast<CXXRecordDecl>(DC), D.getBeginLoc(), NameInfo, R,
- TInfo, SC, isInline, isConstexpr, SourceLocation());
+ TInfo, SC, isInline, ConstexprKind, SourceLocation());
IsVirtualOkay = !Ret->isStatic();
return Ret;
} else {
@@ -8020,7 +8109,7 @@ static FunctionDecl* CreateNewFunctionDecl(Sema &SemaRef, Declarator &D,
// - we're in C++ (where every function has a prototype),
return FunctionDecl::Create(SemaRef.Context, DC, D.getBeginLoc(), NameInfo,
R, TInfo, SC, isInline, true /*HasPrototype*/,
- isConstexpr);
+ ConstexprKind);
}
}
@@ -8044,8 +8133,7 @@ static bool isOpenCLSizeDependentType(ASTContext &C, QualType Ty) {
QualType DesugaredTy = Ty;
do {
ArrayRef<StringRef> Names(SizeTypeNames);
- auto Match =
- std::find(Names.begin(), Names.end(), DesugaredTy.getAsString());
+ auto Match = llvm::find(Names, DesugaredTy.getAsString());
if (Names.end() != Match)
return true;
@@ -8350,8 +8438,8 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
if (getLangOpts().CPlusPlus) {
bool isInline = D.getDeclSpec().isInlineSpecified();
bool isVirtual = D.getDeclSpec().isVirtualSpecified();
- bool isExplicit = D.getDeclSpec().isExplicitSpecified();
- bool isConstexpr = D.getDeclSpec().isConstexprSpecified();
+ bool hasExplicit = D.getDeclSpec().hasExplicitSpecifier();
+ ConstexprSpecKind ConstexprKind = D.getDeclSpec().getConstexprSpecifier();
isFriend = D.getDeclSpec().isFriendSpecified();
if (isFriend && !isInline && D.isFunctionDefinition()) {
// C++ [class.friend]p5
@@ -8533,24 +8621,24 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
// The explicit specifier shall be used only in the declaration of a
// constructor or conversion function within its class definition;
// see 12.3.1 and 12.3.2.
- if (isExplicit && !NewFD->isInvalidDecl() &&
+ if (hasExplicit && !NewFD->isInvalidDecl() &&
!isa<CXXDeductionGuideDecl>(NewFD)) {
if (!CurContext->isRecord()) {
// 'explicit' was specified outside of the class.
Diag(D.getDeclSpec().getExplicitSpecLoc(),
diag::err_explicit_out_of_class)
- << FixItHint::CreateRemoval(D.getDeclSpec().getExplicitSpecLoc());
+ << FixItHint::CreateRemoval(D.getDeclSpec().getExplicitSpecRange());
} else if (!isa<CXXConstructorDecl>(NewFD) &&
!isa<CXXConversionDecl>(NewFD)) {
// 'explicit' was specified on a function that wasn't a constructor
// or conversion function.
Diag(D.getDeclSpec().getExplicitSpecLoc(),
diag::err_explicit_non_ctor_or_conv_function)
- << FixItHint::CreateRemoval(D.getDeclSpec().getExplicitSpecLoc());
+ << FixItHint::CreateRemoval(D.getDeclSpec().getExplicitSpecRange());
}
}
- if (isConstexpr) {
+ if (ConstexprKind != CSK_unspecified) {
// C++11 [dcl.constexpr]p2: constexpr functions and constexpr constructors
// are implicitly inline.
NewFD->setImplicitlyInline();
@@ -8559,7 +8647,8 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
// be either constructors or to return a literal type. Therefore,
// destructors cannot be declared constexpr.
if (isa<CXXDestructorDecl>(NewFD))
- Diag(D.getDeclSpec().getConstexprSpecLoc(), diag::err_constexpr_dtor);
+ Diag(D.getDeclSpec().getConstexprSpecLoc(), diag::err_constexpr_dtor)
+ << (ConstexprKind == CSK_consteval);
}
// If __module_private__ was specified, mark the function accordingly.
@@ -8620,8 +8709,15 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
// Complain about the 'static' specifier if it's on an out-of-line
// member function definition.
+
+ // MSVC permits the use of a 'static' storage specifier on an out-of-line
+ // member function template declaration and class member template
+ // declaration (MSVC versions before 2015), warn about this.
Diag(D.getDeclSpec().getStorageClassSpecLoc(),
- diag::err_static_out_of_line)
+ ((!getLangOpts().isCompatibleWithMSVC(LangOptions::MSVC2015) &&
+ cast<CXXRecordDecl>(DC)->getDescribedClassTemplate()) ||
+ (getLangOpts().MSVCCompat && NewFD->getDescribedFunctionTemplate()))
+ ? diag::ext_static_out_of_line : diag::err_static_out_of_line)
<< FixItHint::CreateRemoval(D.getDeclSpec().getStorageClassSpecLoc());
}
@@ -8836,6 +8932,12 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
<< FunctionType::getNameForCallConv(CC);
}
}
+
+ if (NewFD->getReturnType().hasNonTrivialToPrimitiveDestructCUnion() ||
+ NewFD->getReturnType().hasNonTrivialToPrimitiveCopyCUnion())
+ checkNonTrivialCUnion(NewFD->getReturnType(),
+ NewFD->getReturnTypeSourceRange().getBegin(),
+ NTCUC_FunctionReturn, NTCUK_Destruct|NTCUK_Copy);
} else {
// C++11 [replacement.functions]p3:
// The program's definitions shall not be specified as inline.
@@ -9031,8 +9133,7 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
// nothing will diagnose that error later.
if (isFriend &&
(D.getCXXScopeSpec().getScopeRep()->isDependent() ||
- (!Previous.empty() && (TemplateParamLists.size() ||
- CurContext->isDependentContext())))) {
+ (!Previous.empty() && CurContext->isDependentContext()))) {
// ignore these
} else {
// The user tried to provide an out-of-line definition for a
@@ -9137,13 +9238,12 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
if (getLangOpts().CUDA) {
IdentifierInfo *II = NewFD->getIdentifier();
- if (II &&
- II->isStr(getLangOpts().HIP ? "hipConfigureCall"
- : "cudaConfigureCall") &&
+ if (II && II->isStr(getCudaConfigureFuncName()) &&
!NewFD->isInvalidDecl() &&
NewFD->getDeclContext()->getRedeclContext()->isTranslationUnit()) {
if (!R->getAs<FunctionType>()->getReturnType()->isScalarType())
- Diag(NewFD->getLocation(), diag::err_config_scalar_return);
+ Diag(NewFD->getLocation(), diag::err_config_scalar_return)
+ << getCudaConfigureFuncName();
Context.setcudaConfigureCallDecl(NewFD);
}
@@ -9161,18 +9261,9 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
MarkUnusedFileScopedDecl(NewFD);
- if (getLangOpts().CPlusPlus) {
- if (FunctionTemplate) {
- if (NewFD->isInvalidDecl())
- FunctionTemplate->setInvalidDecl();
- return FunctionTemplate;
- }
- if (isMemberSpecialization && !NewFD->isInvalidDecl())
- CompleteMemberSpecialization(NewFD, Previous);
- }
- if (NewFD->hasAttr<OpenCLKernelAttr>()) {
+ if (getLangOpts().OpenCL && NewFD->hasAttr<OpenCLKernelAttr>()) {
// OpenCL v1.2 s6.8 static is invalid for kernel functions.
if ((getLangOpts().OpenCLVersion >= 120)
&& (SC == SC_Static)) {
@@ -9192,13 +9283,36 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
llvm::SmallPtrSet<const Type *, 16> ValidTypes;
for (auto Param : NewFD->parameters())
checkIsValidOpenCLKernelParameter(*this, D, Param, ValidTypes);
+
+ if (getLangOpts().OpenCLCPlusPlus) {
+ if (DC->isRecord()) {
+ Diag(D.getIdentifierLoc(), diag::err_method_kernel);
+ D.setInvalidType();
+ }
+ if (FunctionTemplate) {
+ Diag(D.getIdentifierLoc(), diag::err_template_kernel);
+ D.setInvalidType();
+ }
+ }
}
+
+ if (getLangOpts().CPlusPlus) {
+ if (FunctionTemplate) {
+ if (NewFD->isInvalidDecl())
+ FunctionTemplate->setInvalidDecl();
+ return FunctionTemplate;
+ }
+
+ if (isMemberSpecialization && !NewFD->isInvalidDecl())
+ CompleteMemberSpecialization(NewFD, Previous);
+ }
+
for (const ParmVarDecl *Param : NewFD->parameters()) {
QualType PT = Param->getType();
// OpenCL 2.0 pipe restrictions forbids pipe packet types to be non-value
// types.
- if (getLangOpts().OpenCLVersion >= 200) {
+ if (getLangOpts().OpenCLVersion >= 200 || getLangOpts().OpenCLCPlusPlus) {
if(const PipeType *PipeTy = PT->getAs<PipeType>()) {
QualType ElemTy = PipeTy->getElementType();
if (ElemTy->isReferenceType() || ElemTy->isPointerType()) {
@@ -9437,6 +9551,7 @@ static bool CheckMultiVersionAdditionalRules(Sema &S, const FunctionDecl *OldFD,
DeletedFuncs = 5,
DefaultedFuncs = 6,
ConstexprFuncs = 7,
+ ConstevalFuncs = 8,
};
enum Different {
CallingConv = 0,
@@ -9512,7 +9627,8 @@ static bool CheckMultiVersionAdditionalRules(Sema &S, const FunctionDecl *OldFD,
if (NewFD->isConstexpr() && (MVType == MultiVersionKind::CPUDispatch ||
MVType == MultiVersionKind::CPUSpecific))
return S.Diag(NewFD->getLocation(), diag::err_multiversion_doesnt_support)
- << IsCPUSpecificCPUDispatchMVType << ConstexprFuncs;
+ << IsCPUSpecificCPUDispatchMVType
+ << (NewFD->isConsteval() ? ConstevalFuncs : ConstexprFuncs);
QualType NewQType = S.getASTContext().getCanonicalType(NewFD->getType());
const auto *NewType = cast<FunctionType>(NewQType);
@@ -9543,7 +9659,7 @@ static bool CheckMultiVersionAdditionalRules(Sema &S, const FunctionDecl *OldFD,
return S.Diag(NewFD->getLocation(), diag::err_multiversion_diff)
<< ReturnType;
- if (OldFD->isConstexpr() != NewFD->isConstexpr())
+ if (OldFD->getConstexprKind() != NewFD->getConstexprKind())
return S.Diag(NewFD->getLocation(), diag::err_multiversion_diff)
<< ConstexprSpec;
@@ -9575,9 +9691,7 @@ static bool CheckMultiVersionAdditionalRules(Sema &S, const FunctionDecl *OldFD,
/// Returns true if there was an error, false otherwise.
static bool CheckMultiVersionFirstFunction(Sema &S, FunctionDecl *FD,
MultiVersionKind MVType,
- const TargetAttr *TA,
- const CPUDispatchAttr *CPUDisp,
- const CPUSpecificAttr *CPUSpec) {
+ const TargetAttr *TA) {
assert(MVType != MultiVersionKind::None &&
"Function lacks multiversion attribute");
@@ -9884,8 +9998,7 @@ static bool CheckMultiVersionFunction(Sema &S, FunctionDecl *NewFD,
// multiversioning, this isn't an error condition.
if (MVType == MultiVersionKind::None)
return false;
- return CheckMultiVersionFirstFunction(S, NewFD, MVType, NewTA, NewCPUDisp,
- NewCPUSpec);
+ return CheckMultiVersionFirstFunction(S, NewFD, MVType, NewTA);
}
FunctionDecl *OldFD = OldDecl->getAsFunction();
@@ -10017,7 +10130,7 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(NewFD);
if (!getLangOpts().CPlusPlus14 && MD && MD->isConstexpr() &&
!MD->isStatic() && !isa<CXXConstructorDecl>(MD) &&
- !MD->getTypeQualifiers().hasConst()) {
+ !MD->getMethodQualifiers().hasConst()) {
CXXMethodDecl *OldMD = nullptr;
if (OldDecl)
OldMD = dyn_cast_or_null<CXXMethodDecl>(OldDecl->getAsFunction());
@@ -10296,8 +10409,9 @@ void Sema::CheckMain(FunctionDecl* FD, const DeclSpec& DS) {
}
if (FD->isConstexpr()) {
Diag(DS.getConstexprSpecLoc(), diag::err_constexpr_main)
- << FixItHint::CreateRemoval(DS.getConstexprSpecLoc());
- FD->setConstexpr(false);
+ << FD->isConsteval()
+ << FixItHint::CreateRemoval(DS.getConstexprSpecLoc());
+ FD->setConstexprKind(CSK_unspecified);
}
if (getLangOpts().OpenCL) {
@@ -10808,7 +10922,7 @@ QualType Sema::deduceVarTypeFromInitializer(VarDecl *VDecl,
DeclarationName Name, QualType Type,
TypeSourceInfo *TSI,
SourceRange Range, bool DirectInit,
- Expr *&Init) {
+ Expr *Init) {
bool IsInitCapture = !VDecl;
assert((!VDecl || !VDecl->isInitCapture()) &&
"init captures are expected to be deduced prior to initialization");
@@ -10924,8 +11038,7 @@ QualType Sema::deduceVarTypeFromInitializer(VarDecl *VDecl,
<< (DeduceInit->getType().isNull() ? TSI->getType()
: DeduceInit->getType())
<< DeduceInit->getSourceRange();
- } else
- Init = DeduceInit;
+ }
// Warn if we deduced 'id'. 'auto' usually implies type-safety, but using
// 'id' instead of a specific object type prevents most of our usual
@@ -10942,7 +11055,7 @@ QualType Sema::deduceVarTypeFromInitializer(VarDecl *VDecl,
}
bool Sema::DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit,
- Expr *&Init) {
+ Expr *Init) {
QualType DeducedType = deduceVarTypeFromInitializer(
VDecl, VDecl->getDeclName(), VDecl->getType(), VDecl->getTypeSourceInfo(),
VDecl->getSourceRange(), DirectInit, Init);
@@ -10971,6 +11084,264 @@ bool Sema::DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit,
return VDecl->isInvalidDecl();
}
+void Sema::checkNonTrivialCUnionInInitializer(const Expr *Init,
+ SourceLocation Loc) {
+ if (auto *CE = dyn_cast<ConstantExpr>(Init))
+ Init = CE->getSubExpr();
+
+ QualType InitType = Init->getType();
+ assert((InitType.hasNonTrivialToPrimitiveDefaultInitializeCUnion() ||
+ InitType.hasNonTrivialToPrimitiveCopyCUnion()) &&
+ "shouldn't be called if type doesn't have a non-trivial C struct");
+ if (auto *ILE = dyn_cast<InitListExpr>(Init)) {
+ for (auto I : ILE->inits()) {
+ if (!I->getType().hasNonTrivialToPrimitiveDefaultInitializeCUnion() &&
+ !I->getType().hasNonTrivialToPrimitiveCopyCUnion())
+ continue;
+ SourceLocation SL = I->getExprLoc();
+ checkNonTrivialCUnionInInitializer(I, SL.isValid() ? SL : Loc);
+ }
+ return;
+ }
+
+ if (isa<ImplicitValueInitExpr>(Init)) {
+ if (InitType.hasNonTrivialToPrimitiveDefaultInitializeCUnion())
+ checkNonTrivialCUnion(InitType, Loc, NTCUC_DefaultInitializedObject,
+ NTCUK_Init);
+ } else {
+ // Assume all other explicit initializers involving copying some existing
+ // object.
+ // TODO: ignore any explicit initializers where we can guarantee
+ // copy-elision.
+ if (InitType.hasNonTrivialToPrimitiveCopyCUnion())
+ checkNonTrivialCUnion(InitType, Loc, NTCUC_CopyInit, NTCUK_Copy);
+ }
+}
+
+namespace {
+
+struct DiagNonTrivalCUnionDefaultInitializeVisitor
+ : DefaultInitializedTypeVisitor<DiagNonTrivalCUnionDefaultInitializeVisitor,
+ void> {
+ using Super =
+ DefaultInitializedTypeVisitor<DiagNonTrivalCUnionDefaultInitializeVisitor,
+ void>;
+
+ DiagNonTrivalCUnionDefaultInitializeVisitor(
+ QualType OrigTy, SourceLocation OrigLoc,
+ Sema::NonTrivialCUnionContext UseContext, Sema &S)
+ : OrigTy(OrigTy), OrigLoc(OrigLoc), UseContext(UseContext), S(S) {}
+
+ void visitWithKind(QualType::PrimitiveDefaultInitializeKind PDIK, QualType QT,
+ const FieldDecl *FD, bool InNonTrivialUnion) {
+ if (const auto *AT = S.Context.getAsArrayType(QT))
+ return this->asDerived().visit(S.Context.getBaseElementType(AT), FD,
+ InNonTrivialUnion);
+ return Super::visitWithKind(PDIK, QT, FD, InNonTrivialUnion);
+ }
+
+ void visitARCStrong(QualType QT, const FieldDecl *FD,
+ bool InNonTrivialUnion) {
+ if (InNonTrivialUnion)
+ S.Diag(FD->getLocation(), diag::note_non_trivial_c_union)
+ << 1 << 0 << QT << FD->getName();
+ }
+
+ void visitARCWeak(QualType QT, const FieldDecl *FD, bool InNonTrivialUnion) {
+ if (InNonTrivialUnion)
+ S.Diag(FD->getLocation(), diag::note_non_trivial_c_union)
+ << 1 << 0 << QT << FD->getName();
+ }
+
+ void visitStruct(QualType QT, const FieldDecl *FD, bool InNonTrivialUnion) {
+ const RecordDecl *RD = QT->castAs<RecordType>()->getDecl();
+ if (RD->isUnion()) {
+ if (OrigLoc.isValid()) {
+ bool IsUnion = false;
+ if (auto *OrigRD = OrigTy->getAsRecordDecl())
+ IsUnion = OrigRD->isUnion();
+ S.Diag(OrigLoc, diag::err_non_trivial_c_union_in_invalid_context)
+ << 0 << OrigTy << IsUnion << UseContext;
+ // Reset OrigLoc so that this diagnostic is emitted only once.
+ OrigLoc = SourceLocation();
+ }
+ InNonTrivialUnion = true;
+ }
+
+ if (InNonTrivialUnion)
+ S.Diag(RD->getLocation(), diag::note_non_trivial_c_union)
+ << 0 << 0 << QT.getUnqualifiedType() << "";
+
+ for (const FieldDecl *FD : RD->fields())
+ asDerived().visit(FD->getType(), FD, InNonTrivialUnion);
+ }
+
+ void visitTrivial(QualType QT, const FieldDecl *FD, bool InNonTrivialUnion) {}
+
+ // The non-trivial C union type or the struct/union type that contains a
+ // non-trivial C union.
+ QualType OrigTy;
+ SourceLocation OrigLoc;
+ Sema::NonTrivialCUnionContext UseContext;
+ Sema &S;
+};
+
+struct DiagNonTrivalCUnionDestructedTypeVisitor
+ : DestructedTypeVisitor<DiagNonTrivalCUnionDestructedTypeVisitor, void> {
+ using Super =
+ DestructedTypeVisitor<DiagNonTrivalCUnionDestructedTypeVisitor, void>;
+
+ DiagNonTrivalCUnionDestructedTypeVisitor(
+ QualType OrigTy, SourceLocation OrigLoc,
+ Sema::NonTrivialCUnionContext UseContext, Sema &S)
+ : OrigTy(OrigTy), OrigLoc(OrigLoc), UseContext(UseContext), S(S) {}
+
+ void visitWithKind(QualType::DestructionKind DK, QualType QT,
+ const FieldDecl *FD, bool InNonTrivialUnion) {
+ if (const auto *AT = S.Context.getAsArrayType(QT))
+ return this->asDerived().visit(S.Context.getBaseElementType(AT), FD,
+ InNonTrivialUnion);
+ return Super::visitWithKind(DK, QT, FD, InNonTrivialUnion);
+ }
+
+ void visitARCStrong(QualType QT, const FieldDecl *FD,
+ bool InNonTrivialUnion) {
+ if (InNonTrivialUnion)
+ S.Diag(FD->getLocation(), diag::note_non_trivial_c_union)
+ << 1 << 1 << QT << FD->getName();
+ }
+
+ void visitARCWeak(QualType QT, const FieldDecl *FD, bool InNonTrivialUnion) {
+ if (InNonTrivialUnion)
+ S.Diag(FD->getLocation(), diag::note_non_trivial_c_union)
+ << 1 << 1 << QT << FD->getName();
+ }
+
+ void visitStruct(QualType QT, const FieldDecl *FD, bool InNonTrivialUnion) {
+ const RecordDecl *RD = QT->castAs<RecordType>()->getDecl();
+ if (RD->isUnion()) {
+ if (OrigLoc.isValid()) {
+ bool IsUnion = false;
+ if (auto *OrigRD = OrigTy->getAsRecordDecl())
+ IsUnion = OrigRD->isUnion();
+ S.Diag(OrigLoc, diag::err_non_trivial_c_union_in_invalid_context)
+ << 1 << OrigTy << IsUnion << UseContext;
+ // Reset OrigLoc so that this diagnostic is emitted only once.
+ OrigLoc = SourceLocation();
+ }
+ InNonTrivialUnion = true;
+ }
+
+ if (InNonTrivialUnion)
+ S.Diag(RD->getLocation(), diag::note_non_trivial_c_union)
+ << 0 << 1 << QT.getUnqualifiedType() << "";
+
+ for (const FieldDecl *FD : RD->fields())
+ asDerived().visit(FD->getType(), FD, InNonTrivialUnion);
+ }
+
+ void visitTrivial(QualType QT, const FieldDecl *FD, bool InNonTrivialUnion) {}
+ void visitCXXDestructor(QualType QT, const FieldDecl *FD,
+ bool InNonTrivialUnion) {}
+
+ // The non-trivial C union type or the struct/union type that contains a
+ // non-trivial C union.
+ QualType OrigTy;
+ SourceLocation OrigLoc;
+ Sema::NonTrivialCUnionContext UseContext;
+ Sema &S;
+};
+
+struct DiagNonTrivalCUnionCopyVisitor
+ : CopiedTypeVisitor<DiagNonTrivalCUnionCopyVisitor, false, void> {
+ using Super = CopiedTypeVisitor<DiagNonTrivalCUnionCopyVisitor, false, void>;
+
+ DiagNonTrivalCUnionCopyVisitor(QualType OrigTy, SourceLocation OrigLoc,
+ Sema::NonTrivialCUnionContext UseContext,
+ Sema &S)
+ : OrigTy(OrigTy), OrigLoc(OrigLoc), UseContext(UseContext), S(S) {}
+
+ void visitWithKind(QualType::PrimitiveCopyKind PCK, QualType QT,
+ const FieldDecl *FD, bool InNonTrivialUnion) {
+ if (const auto *AT = S.Context.getAsArrayType(QT))
+ return this->asDerived().visit(S.Context.getBaseElementType(AT), FD,
+ InNonTrivialUnion);
+ return Super::visitWithKind(PCK, QT, FD, InNonTrivialUnion);
+ }
+
+ void visitARCStrong(QualType QT, const FieldDecl *FD,
+ bool InNonTrivialUnion) {
+ if (InNonTrivialUnion)
+ S.Diag(FD->getLocation(), diag::note_non_trivial_c_union)
+ << 1 << 2 << QT << FD->getName();
+ }
+
+ void visitARCWeak(QualType QT, const FieldDecl *FD, bool InNonTrivialUnion) {
+ if (InNonTrivialUnion)
+ S.Diag(FD->getLocation(), diag::note_non_trivial_c_union)
+ << 1 << 2 << QT << FD->getName();
+ }
+
+ void visitStruct(QualType QT, const FieldDecl *FD, bool InNonTrivialUnion) {
+ const RecordDecl *RD = QT->castAs<RecordType>()->getDecl();
+ if (RD->isUnion()) {
+ if (OrigLoc.isValid()) {
+ bool IsUnion = false;
+ if (auto *OrigRD = OrigTy->getAsRecordDecl())
+ IsUnion = OrigRD->isUnion();
+ S.Diag(OrigLoc, diag::err_non_trivial_c_union_in_invalid_context)
+ << 2 << OrigTy << IsUnion << UseContext;
+ // Reset OrigLoc so that this diagnostic is emitted only once.
+ OrigLoc = SourceLocation();
+ }
+ InNonTrivialUnion = true;
+ }
+
+ if (InNonTrivialUnion)
+ S.Diag(RD->getLocation(), diag::note_non_trivial_c_union)
+ << 0 << 2 << QT.getUnqualifiedType() << "";
+
+ for (const FieldDecl *FD : RD->fields())
+ asDerived().visit(FD->getType(), FD, InNonTrivialUnion);
+ }
+
+ void preVisit(QualType::PrimitiveCopyKind PCK, QualType QT,
+ const FieldDecl *FD, bool InNonTrivialUnion) {}
+ void visitTrivial(QualType QT, const FieldDecl *FD, bool InNonTrivialUnion) {}
+ void visitVolatileTrivial(QualType QT, const FieldDecl *FD,
+ bool InNonTrivialUnion) {}
+
+ // The non-trivial C union type or the struct/union type that contains a
+ // non-trivial C union.
+ QualType OrigTy;
+ SourceLocation OrigLoc;
+ Sema::NonTrivialCUnionContext UseContext;
+ Sema &S;
+};
+
+} // namespace
+
+void Sema::checkNonTrivialCUnion(QualType QT, SourceLocation Loc,
+ NonTrivialCUnionContext UseContext,
+ unsigned NonTrivialKind) {
+ assert((QT.hasNonTrivialToPrimitiveDefaultInitializeCUnion() ||
+ QT.hasNonTrivialToPrimitiveDestructCUnion() ||
+ QT.hasNonTrivialToPrimitiveCopyCUnion()) &&
+ "shouldn't be called if type doesn't have a non-trivial C union");
+
+ if ((NonTrivialKind & NTCUK_Init) &&
+ QT.hasNonTrivialToPrimitiveDefaultInitializeCUnion())
+ DiagNonTrivalCUnionDefaultInitializeVisitor(QT, Loc, UseContext, *this)
+ .visit(QT, nullptr, false);
+ if ((NonTrivialKind & NTCUK_Destruct) &&
+ QT.hasNonTrivialToPrimitiveDestructCUnion())
+ DiagNonTrivalCUnionDestructedTypeVisitor(QT, Loc, UseContext, *this)
+ .visit(QT, nullptr, false);
+ if ((NonTrivialKind & NTCUK_Copy) && QT.hasNonTrivialToPrimitiveCopyCUnion())
+ DiagNonTrivalCUnionCopyVisitor(QT, Loc, UseContext, *this)
+ .visit(QT, nullptr, false);
+}
+
/// AddInitializerToDecl - Adds the initializer Init to the
/// declaration dcl. If DirectInit is true, this is C++ direct
/// initialization rather than copy initialization.
@@ -11218,7 +11589,7 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, bool DirectInit) {
// do nothing
// OpenCL v1.2 s6.5.3: __constant locals must be constant-initialized.
- // This is true even in OpenCL C++.
+ // This is true even in C++ for OpenCL.
} else if (VDecl->getType().getAddressSpace() == LangAS::opencl_constant) {
CheckForConstantInitializer(Init, DclT);
@@ -11244,6 +11615,11 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, bool DirectInit) {
<< Culprit->getSourceRange();
}
}
+
+ if (auto *E = dyn_cast<ExprWithCleanups>(Init))
+ if (auto *BE = dyn_cast<BlockExpr>(E->getSubExpr()->IgnoreParens()))
+ if (VDecl->hasLocalStorage())
+ BE->getBlockDecl()->setCanAvoidCopyToHeap();
} else if (VDecl->isStaticDataMember() && !VDecl->isInline() &&
VDecl->getLexicalDeclContext()->isRecord()) {
// This is an in-class initialization for a static data member, e.g.,
@@ -11358,11 +11734,25 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, bool DirectInit) {
!isTemplateInstantiation(VDecl->getTemplateSpecializationKind()))
Diag(VDecl->getLocation(), diag::warn_extern_init);
+ // In Microsoft C++ mode, a const variable defined in namespace scope has
+ // external linkage by default if the variable is declared with
+ // __declspec(dllexport).
+ if (Context.getTargetInfo().getCXXABI().isMicrosoft() &&
+ getLangOpts().CPlusPlus && VDecl->getType().isConstQualified() &&
+ VDecl->hasAttr<DLLExportAttr>() && VDecl->getDefinition())
+ VDecl->setStorageClass(SC_Extern);
+
// C99 6.7.8p4. All file scoped initializers need to be constant.
if (!getLangOpts().CPlusPlus && !VDecl->isInvalidDecl())
CheckForConstantInitializer(Init, DclT);
}
+ QualType InitType = Init->getType();
+ if (!InitType.isNull() &&
+ (InitType.hasNonTrivialToPrimitiveDefaultInitializeCUnion() ||
+ InitType.hasNonTrivialToPrimitiveCopyCUnion()))
+ checkNonTrivialCUnionInInitializer(Init, Init->getExprLoc());
+
// We will represent direct-initialization similarly to copy-initialization:
// int x(1); -as-> int x = 1;
// ClassType x(a,b,c); -as-> ClassType x = ClassType(a,b,c);
@@ -11448,9 +11838,8 @@ void Sema::ActOnUninitializedDecl(Decl *RealDecl) {
return;
}
- Expr *TmpInit = nullptr;
if (Type->isUndeducedType() &&
- DeduceVariableDeclarationType(Var, false, TmpInit))
+ DeduceVariableDeclarationType(Var, false, nullptr))
return;
// C++11 [class.static.data]p3: A static data member can be declared with
@@ -11488,7 +11877,14 @@ void Sema::ActOnUninitializedDecl(Decl *RealDecl) {
return;
}
- switch (Var->isThisDeclarationADefinition()) {
+ VarDecl::DefinitionKind DefKind = Var->isThisDeclarationADefinition();
+ if (!Var->isInvalidDecl() && DefKind != VarDecl::DeclarationOnly &&
+ Var->getType().hasNonTrivialToPrimitiveDefaultInitializeCUnion())
+ checkNonTrivialCUnion(Var->getType(), Var->getLocation(),
+ NTCUC_DefaultInitializedObject, NTCUK_Init);
+
+
+ switch (DefKind) {
case VarDecl::Definition:
if (!Var->isStaticDataMember() || !Var->getAnyInitializer())
break;
@@ -11627,7 +12023,11 @@ void Sema::ActOnUninitializedDecl(Decl *RealDecl) {
setFunctionHasBranchProtectedScope();
}
}
-
+ // In OpenCL, we can't initialize objects in the __local address space,
+ // even implicitly, so don't synthesize an implicit initializer.
+ if (getLangOpts().OpenCL &&
+ Var->getType().getAddressSpace() == LangAS::opencl_local)
+ return;
// C++03 [dcl.init]p9:
// If no initializer is specified for an object, and the
// object is of (possibly cv-qualified) non-POD class type (or
@@ -11723,7 +12123,6 @@ Sema::ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
D.SetIdentifier(Ident, IdentLoc);
D.takeAttributes(Attrs, AttrEnd);
- ParsedAttributes EmptyAttrs(Attrs.getPool().getFactory());
D.AddTypeInfo(DeclaratorChunk::getReference(0, IdentLoc, /*lvalue*/ false),
IdentLoc);
Decl *Var = ActOnDeclarator(S, D);
@@ -11786,13 +12185,16 @@ void Sema::CheckCompleteVariableDeclaration(VarDecl *var) {
while (prev && prev->isThisDeclarationADefinition())
prev = prev->getPreviousDecl();
- if (!prev)
+ if (!prev) {
Diag(var->getLocation(), diag::warn_missing_variable_declarations) << var;
+ Diag(var->getTypeSpecStartLoc(), diag::note_static_for_internal_linkage)
+ << /* variable */ 0;
+ }
}
// Cache the result of checking for constant initialization.
Optional<bool> CacheHasConstInit;
- const Expr *CacheCulprit;
+ const Expr *CacheCulprit = nullptr;
auto checkConstInit = [&]() mutable {
if (!CacheHasConstInit)
CacheHasConstInit = var->getInit()->isConstantInitializer(
@@ -11893,7 +12295,7 @@ void Sema::CheckCompleteVariableDeclaration(VarDecl *var) {
for (unsigned I = 0, N = Notes.size(); I != N; ++I)
Diag(Notes[I].first, Notes[I].second);
}
- } else if (var->isUsableInConstantExpressions(Context)) {
+ } else if (var->mightBeUsableInConstantExpressions(Context)) {
// Check whether the initializer of a const variable of integral or
// enumeration type is an ICE now, since we can't tell whether it was
// initialized by a constant expression if we check later.
@@ -12340,6 +12742,45 @@ void Sema::ActOnDocumentableDecls(ArrayRef<Decl *> Group) {
}
}
+/// Common checks for a parameter-declaration that should apply to both function
+/// parameters and non-type template parameters.
+void Sema::CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D) {
+ // Check that there are no default arguments inside the type of this
+ // parameter.
+ if (getLangOpts().CPlusPlus)
+ CheckExtraCXXDefaultArguments(D);
+
+ // Parameter declarators cannot be qualified (C++ [dcl.meaning]p1).
+ if (D.getCXXScopeSpec().isSet()) {
+ Diag(D.getIdentifierLoc(), diag::err_qualified_param_declarator)
+ << D.getCXXScopeSpec().getRange();
+ }
+
+ // [dcl.meaning]p1: An unqualified-id occurring in a declarator-id shall be a
+ // simple identifier except [...irrelevant cases...].
+ switch (D.getName().getKind()) {
+ case UnqualifiedIdKind::IK_Identifier:
+ break;
+
+ case UnqualifiedIdKind::IK_OperatorFunctionId:
+ case UnqualifiedIdKind::IK_ConversionFunctionId:
+ case UnqualifiedIdKind::IK_LiteralOperatorId:
+ case UnqualifiedIdKind::IK_ConstructorName:
+ case UnqualifiedIdKind::IK_DestructorName:
+ case UnqualifiedIdKind::IK_ImplicitSelfParam:
+ case UnqualifiedIdKind::IK_DeductionGuideName:
+ Diag(D.getIdentifierLoc(), diag::err_bad_parameter_name)
+ << GetNameForDeclarator(D).getName();
+ break;
+
+ case UnqualifiedIdKind::IK_TemplateId:
+ case UnqualifiedIdKind::IK_ConstructorTemplateId:
+ // GetNameForDeclarator would not produce a useful name in this case.
+ Diag(D.getIdentifierLoc(), diag::err_bad_parameter_name_template_id);
+ break;
+ }
+}
+
/// ActOnParamDeclarator - Called from Parser::ParseFunctionDeclarator()
/// to introduce parameters into function prototype scope.
Decl *Sema::ActOnParamDeclarator(Scope *S, Declarator &D) {
@@ -12374,40 +12815,19 @@ Decl *Sema::ActOnParamDeclarator(Scope *S, Declarator &D) {
if (DS.isInlineSpecified())
Diag(DS.getInlineSpecLoc(), diag::err_inline_non_function)
<< getLangOpts().CPlusPlus17;
- if (DS.isConstexprSpecified())
+ if (DS.hasConstexprSpecifier())
Diag(DS.getConstexprSpecLoc(), diag::err_invalid_constexpr)
- << 0;
+ << 0 << (D.getDeclSpec().getConstexprSpecifier() == CSK_consteval);
DiagnoseFunctionSpecifiers(DS);
+ CheckFunctionOrTemplateParamDeclarator(S, D);
+
TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
QualType parmDeclType = TInfo->getType();
- if (getLangOpts().CPlusPlus) {
- // Check that there are no default arguments inside the type of this
- // parameter.
- CheckExtraCXXDefaultArguments(D);
-
- // Parameter declarators cannot be qualified (C++ [dcl.meaning]p1).
- if (D.getCXXScopeSpec().isSet()) {
- Diag(D.getIdentifierLoc(), diag::err_qualified_param_declarator)
- << D.getCXXScopeSpec().getRange();
- D.getCXXScopeSpec().clear();
- }
- }
-
- // Ensure we have a valid name
- IdentifierInfo *II = nullptr;
- if (D.hasName()) {
- II = D.getIdentifier();
- if (!II) {
- Diag(D.getIdentifierLoc(), diag::err_bad_parameter_name)
- << GetNameForDeclarator(D).getName();
- D.setInvalidType(true);
- }
- }
-
// Check for redeclaration of parameters, e.g. int foo(int x, int x);
+ IdentifierInfo *II = D.getIdentifier();
if (II) {
LookupResult R(*this, II, D.getIdentifierLoc(), LookupOrdinaryName,
ForVisibleRedeclaration);
@@ -12538,9 +12958,13 @@ ParmVarDecl *Sema::CheckParameter(DeclContext *DC, SourceLocation StartLoc,
// - otherwise, it's an error
if (T->isArrayType()) {
if (!T.isConstQualified()) {
- DelayedDiagnostics.add(
- sema::DelayedDiagnostic::makeForbiddenType(
- NameLoc, diag::err_arc_array_param_no_ownership, T, false));
+ if (DelayedDiagnostics.shouldDelayDiagnostics())
+ DelayedDiagnostics.add(
+ sema::DelayedDiagnostic::makeForbiddenType(
+ NameLoc, diag::err_arc_array_param_no_ownership, T, false));
+ else
+ Diag(NameLoc, diag::err_arc_array_param_no_ownership)
+ << TSInfo->getTypeLoc().getSourceRange();
}
lifetime = Qualifiers::OCL_ExplicitNone;
} else {
@@ -12553,6 +12977,11 @@ ParmVarDecl *Sema::CheckParameter(DeclContext *DC, SourceLocation StartLoc,
Context.getAdjustedParameterType(T),
TSInfo, SC, nullptr);
+ if (New->getType().hasNonTrivialToPrimitiveDestructCUnion() ||
+ New->getType().hasNonTrivialToPrimitiveCopyCUnion())
+ checkNonTrivialCUnion(New->getType(), New->getLocation(),
+ NTCUC_FunctionParam, NTCUK_Destruct|NTCUK_Copy);
+
// Parameters can not be abstract class types.
// For record types, this is done by the AbstractClassUsageDiagnoser once
// the class has been completely parsed.
@@ -12642,8 +13071,9 @@ void Sema::ActOnFinishInlineFunctionDef(FunctionDecl *D) {
Consumer.HandleInlineFunctionDefinition(D);
}
-static bool ShouldWarnAboutMissingPrototype(const FunctionDecl *FD,
- const FunctionDecl*& PossibleZeroParamPrototype) {
+static bool
+ShouldWarnAboutMissingPrototype(const FunctionDecl *FD,
+ const FunctionDecl *&PossiblePrototype) {
// Don't warn about invalid declarations.
if (FD->isInvalidDecl())
return false;
@@ -12680,7 +13110,6 @@ static bool ShouldWarnAboutMissingPrototype(const FunctionDecl *FD,
if (FD->isDeleted())
return false;
- bool MissingPrototype = true;
for (const FunctionDecl *Prev = FD->getPreviousDecl();
Prev; Prev = Prev->getPreviousDecl()) {
// Ignore any declarations that occur in function or method
@@ -12688,13 +13117,11 @@ static bool ShouldWarnAboutMissingPrototype(const FunctionDecl *FD,
if (Prev->getLexicalDeclContext()->isFunctionOrMethod())
continue;
- MissingPrototype = !Prev->getType()->isFunctionProtoType();
- if (FD->getNumParams() == 0)
- PossibleZeroParamPrototype = Prev;
- break;
+ PossiblePrototype = Prev;
+ return Prev->getType()->isFunctionNoProtoType();
}
- return MissingPrototype;
+ return true;
}
void
@@ -12834,14 +13261,14 @@ static void RebuildLambdaScopeInfo(CXXMethodDecl *CallOperator,
/*RefersToEnclosingVariableOrCapture*/true, C.getLocation(),
/*EllipsisLoc*/C.isPackExpansion()
? C.getEllipsisLoc() : SourceLocation(),
- CaptureType, /*Expr*/ nullptr);
+ CaptureType, /*Invalid*/false);
} else if (C.capturesThis()) {
- LSI->addThisCapture(/*Nested*/ false, C.getLocation(),
- /*Expr*/ nullptr,
- C.getCaptureKind() == LCK_StarThis);
+ LSI->addThisCapture(/*Nested*/ false, C.getLocation(), I->getType(),
+ C.getCaptureKind() == LCK_StarThis);
} else {
- LSI->addVLATypeCapture(C.getLocation(), I->getType());
+ LSI->addVLATypeCapture(C.getLocation(), I->getCapturedVLAType(),
+ I->getType());
}
++I;
}
@@ -13023,7 +13450,7 @@ void Sema::computeNRVO(Stmt *Body, FunctionScopeInfo *Scope) {
bool Sema::canDelayFunctionBody(const Declarator &D) {
// We can't delay parsing the body of a constexpr function template (yet).
- if (D.getDeclSpec().isConstexprSpecified())
+ if (D.getDeclSpec().hasConstexprSpecifier())
return false;
// We can't delay parsing the body of a function template with a deduced
@@ -13092,6 +13519,35 @@ private:
bool IsLambda = false;
};
+static void diagnoseImplicitlyRetainedSelf(Sema &S) {
+ llvm::DenseMap<const BlockDecl *, bool> EscapeInfo;
+
+ auto IsOrNestedInEscapingBlock = [&](const BlockDecl *BD) {
+ if (EscapeInfo.count(BD))
+ return EscapeInfo[BD];
+
+ bool R = false;
+ const BlockDecl *CurBD = BD;
+
+ do {
+ R = !CurBD->doesNotEscape();
+ if (R)
+ break;
+ CurBD = CurBD->getParent()->getInnermostBlockDecl();
+ } while (CurBD);
+
+ return EscapeInfo[BD] = R;
+ };
+
+ // If the location where 'self' is implicitly retained is inside a escaping
+ // block, emit a diagnostic.
+ for (const std::pair<SourceLocation, const BlockDecl *> &P :
+ S.ImplicitlyRetainedSelfLocs)
+ if (IsOrNestedInEscapingBlock(P.second))
+ S.Diag(P.first, diag::warn_implicitly_retains_self)
+ << FixItHint::CreateInsertion(P.first, "self->");
+}
+
Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
bool IsInstantiation) {
FunctionDecl *FD = dcl ? dcl->getAsFunction() : nullptr;
@@ -13099,7 +13555,7 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
sema::AnalysisBasedWarnings::Policy WP = AnalysisWarnings.getDefaultPolicy();
sema::AnalysisBasedWarnings::Policy *ActivePolicy = nullptr;
- if (getLangOpts().CoroutinesTS && getCurFunction()->isCoroutine())
+ if (getLangOpts().Coroutines && getCurFunction()->isCoroutine())
CheckCompletedCoroutineBody(FD, Body);
// Do not call PopExpressionEvaluationContext() if it is a lambda because one
@@ -13156,7 +13612,7 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
// MSVC permits the use of pure specifier (=0) on function definition,
// defined at class scope, warn about this non-standard construct.
- if (getLangOpts().MicrosoftExt && FD->isPure() && FD->isCanonicalDecl())
+ if (getLangOpts().MicrosoftExt && FD->isPure() && !FD->isOutOfLine())
Diag(FD->getLocation(), diag::ext_pure_function_definition);
if (!FD->isInvalidDecl()) {
@@ -13185,22 +13641,30 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
// prototype declaration. This warning is issued even if the
// definition itself provides a prototype. The aim is to detect
// global functions that fail to be declared in header files.
- const FunctionDecl *PossibleZeroParamPrototype = nullptr;
- if (ShouldWarnAboutMissingPrototype(FD, PossibleZeroParamPrototype)) {
+ const FunctionDecl *PossiblePrototype = nullptr;
+ if (ShouldWarnAboutMissingPrototype(FD, PossiblePrototype)) {
Diag(FD->getLocation(), diag::warn_missing_prototype) << FD;
- if (PossibleZeroParamPrototype) {
+ if (PossiblePrototype) {
// We found a declaration that is not a prototype,
// but that could be a zero-parameter prototype
- if (TypeSourceInfo *TI =
- PossibleZeroParamPrototype->getTypeSourceInfo()) {
+ if (TypeSourceInfo *TI = PossiblePrototype->getTypeSourceInfo()) {
TypeLoc TL = TI->getTypeLoc();
if (FunctionNoProtoTypeLoc FTL = TL.getAs<FunctionNoProtoTypeLoc>())
- Diag(PossibleZeroParamPrototype->getLocation(),
+ Diag(PossiblePrototype->getLocation(),
diag::note_declaration_not_a_prototype)
- << PossibleZeroParamPrototype
- << FixItHint::CreateInsertion(FTL.getRParenLoc(), "void");
+ << (FD->getNumParams() != 0)
+ << (FD->getNumParams() == 0
+ ? FixItHint::CreateInsertion(FTL.getRParenLoc(), "void")
+ : FixItHint{});
}
+ } else {
+ Diag(FD->getTypeSpecStartLoc(), diag::note_static_for_internal_linkage)
+ << /* function */ 1
+ << (FD->getStorageClass() == SC_None
+ ? FixItHint::CreateInsertion(FD->getTypeSpecStartLoc(),
+ "static ")
+ : FixItHint{});
}
// GNU warning -Wstrict-prototypes
@@ -13256,8 +13720,6 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
assert(MD == getCurMethodDecl() && "Method parsing confused");
MD->setBody(Body);
if (!MD->isInvalidDecl()) {
- if (!MD->hasSkippedBody())
- DiagnoseUnusedParameters(MD->parameters());
DiagnoseSizeOfParametersAndReturnValue(MD->parameters(),
MD->getReturnType(), MD);
@@ -13303,6 +13765,8 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
diag::warn_objc_secondary_init_missing_init_call);
getCurFunction()->ObjCWarnForNoInitDelegation = false;
}
+
+ diagnoseImplicitlyRetainedSelf(*this);
} else {
// Parsing the function declaration failed in some way. Pop the fake scope
// we pushed on.
@@ -13484,10 +13948,10 @@ NamedDecl *Sema::ImplicitlyDefineFunction(SourceLocation Loc,
// function declaration is going to be treated as an error.
if (Diags.getDiagnosticLevel(diag_id, Loc) >= DiagnosticsEngine::Error) {
TypoCorrection Corrected;
- if (S &&
- (Corrected = CorrectTypo(
- DeclarationNameInfo(&II, Loc), LookupOrdinaryName, S, nullptr,
- llvm::make_unique<DeclFilterCCC<FunctionDecl>>(), CTK_NonError)))
+ DeclFilterCCC<FunctionDecl> CCC{};
+ if (S && (Corrected =
+ CorrectTypo(DeclarationNameInfo(&II, Loc), LookupOrdinaryName,
+ S, nullptr, CCC, CTK_NonError)))
diagnoseTypo(Corrected, PDiag(diag::note_function_suggestion),
/*ErrorRecovery*/false);
}
@@ -13576,6 +14040,13 @@ void Sema::AddKnownFunctionAttributes(FunctionDecl *FD) {
FD->getLocation()));
}
+ // Handle automatically recognized callbacks.
+ SmallVector<int, 4> Encoding;
+ if (!FD->hasAttr<CallbackAttr>() &&
+ Context.BuiltinInfo.performsCallback(BuiltinID, Encoding))
+ FD->addAttr(CallbackAttr::CreateImplicit(
+ Context, Encoding.data(), Encoding.size(), FD->getLocation()));
+
// Mark const if we don't care about errno and that is the only thing
// preventing the function from being const. This allows IRgen to use LLVM
// intrinsics for such functions.
@@ -14743,8 +15214,7 @@ CreateNewDecl:
// If this is an undefined enum, warn.
if (TUK != TUK_Definition && !Invalid) {
TagDecl *Def;
- if (IsFixed && (getLangOpts().CPlusPlus11 || getLangOpts().ObjC) &&
- cast<EnumDecl>(New)->isFixed()) {
+ if (IsFixed && cast<EnumDecl>(New)->isFixed()) {
// C++0x: 7.2p2: opaque-enum-declaration.
// Conflicts are diagnosed above. Do nothing.
}
@@ -15758,7 +16228,6 @@ void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl,
// Verify that all the fields are okay.
SmallVector<FieldDecl*, 32> RecFields;
- bool ObjCFieldLifetimeErrReported = false;
for (ArrayRef<Decl *>::iterator i = Fields.begin(), end = Fields.end();
i != end; ++i) {
FieldDecl *FD = cast<FieldDecl>(*i);
@@ -15903,27 +16372,6 @@ void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl,
<< FixItHint::CreateInsertion(FD->getLocation(), "*");
QualType T = Context.getObjCObjectPointerType(FD->getType());
FD->setType(T);
- } else if (getLangOpts().allowsNonTrivialObjCLifetimeQualifiers() &&
- Record && !ObjCFieldLifetimeErrReported && Record->isUnion()) {
- // It's an error in ARC or Weak if a field has lifetime.
- // We don't want to report this in a system header, though,
- // so we just make the field unavailable.
- // FIXME: that's really not sufficient; we need to make the type
- // itself invalid to, say, initialize or copy.
- QualType T = FD->getType();
- if (T.hasNonTrivialObjCLifetime()) {
- SourceLocation loc = FD->getLocation();
- if (getSourceManager().isInSystemHeader(loc)) {
- if (!FD->hasAttr<UnavailableAttr>()) {
- FD->addAttr(UnavailableAttr::CreateImplicit(Context, "",
- UnavailableAttr::IR_ARCFieldWithOwnership, loc));
- }
- } else {
- Diag(FD->getLocation(), diag::err_arc_objc_object_in_tag)
- << T->isBlockPointerType() << Record->getTagKind();
- }
- ObjCFieldLifetimeErrReported = true;
- }
} else if (getLangOpts().ObjC &&
getLangOpts().getGC() != LangOptions::NonGC &&
Record && !Record->hasObjectMember()) {
@@ -15943,14 +16391,23 @@ void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl,
if (Record && !getLangOpts().CPlusPlus && !FD->hasAttr<UnavailableAttr>()) {
QualType FT = FD->getType();
- if (FT.isNonTrivialToPrimitiveDefaultInitialize())
+ if (FT.isNonTrivialToPrimitiveDefaultInitialize()) {
Record->setNonTrivialToPrimitiveDefaultInitialize(true);
+ if (FT.hasNonTrivialToPrimitiveDefaultInitializeCUnion() ||
+ Record->isUnion())
+ Record->setHasNonTrivialToPrimitiveDefaultInitializeCUnion(true);
+ }
QualType::PrimitiveCopyKind PCK = FT.isNonTrivialToPrimitiveCopy();
- if (PCK != QualType::PCK_Trivial && PCK != QualType::PCK_VolatileTrivial)
+ if (PCK != QualType::PCK_Trivial && PCK != QualType::PCK_VolatileTrivial) {
Record->setNonTrivialToPrimitiveCopy(true);
+ if (FT.hasNonTrivialToPrimitiveCopyCUnion() || Record->isUnion())
+ Record->setHasNonTrivialToPrimitiveCopyCUnion(true);
+ }
if (FT.isDestructedType()) {
Record->setNonTrivialToPrimitiveDestroy(true);
Record->setParamDestroyedInCallee(true);
+ if (FT.hasNonTrivialToPrimitiveDestructCUnion() || Record->isUnion())
+ Record->setHasNonTrivialToPrimitiveDestructCUnion(true);
}
if (const auto *RT = FT->getAs<RecordType>()) {
@@ -16907,390 +17364,6 @@ Decl *Sema::ActOnFileScopeAsmDecl(Expr *expr,
return New;
}
-static void checkModuleImportContext(Sema &S, Module *M,
- SourceLocation ImportLoc, DeclContext *DC,
- bool FromInclude = false) {
- SourceLocation ExternCLoc;
-
- if (auto *LSD = dyn_cast<LinkageSpecDecl>(DC)) {
- switch (LSD->getLanguage()) {
- case LinkageSpecDecl::lang_c:
- if (ExternCLoc.isInvalid())
- ExternCLoc = LSD->getBeginLoc();
- break;
- case LinkageSpecDecl::lang_cxx:
- break;
- }
- DC = LSD->getParent();
- }
-
- while (isa<LinkageSpecDecl>(DC) || isa<ExportDecl>(DC))
- DC = DC->getParent();
-
- if (!isa<TranslationUnitDecl>(DC)) {
- S.Diag(ImportLoc, (FromInclude && S.isModuleVisible(M))
- ? diag::ext_module_import_not_at_top_level_noop
- : diag::err_module_import_not_at_top_level_fatal)
- << M->getFullModuleName() << DC;
- S.Diag(cast<Decl>(DC)->getBeginLoc(),
- diag::note_module_import_not_at_top_level)
- << DC;
- } else if (!M->IsExternC && ExternCLoc.isValid()) {
- S.Diag(ImportLoc, diag::ext_module_import_in_extern_c)
- << M->getFullModuleName();
- S.Diag(ExternCLoc, diag::note_extern_c_begins_here);
- }
-}
-
-Sema::DeclGroupPtrTy Sema::ActOnModuleDecl(SourceLocation StartLoc,
- SourceLocation ModuleLoc,
- ModuleDeclKind MDK,
- ModuleIdPath Path) {
- assert(getLangOpts().ModulesTS &&
- "should only have module decl in modules TS");
-
- // A module implementation unit requires that we are not compiling a module
- // of any kind. A module interface unit requires that we are not compiling a
- // module map.
- switch (getLangOpts().getCompilingModule()) {
- case LangOptions::CMK_None:
- // It's OK to compile a module interface as a normal translation unit.
- break;
-
- case LangOptions::CMK_ModuleInterface:
- if (MDK != ModuleDeclKind::Implementation)
- break;
-
- // We were asked to compile a module interface unit but this is a module
- // implementation unit. That indicates the 'export' is missing.
- Diag(ModuleLoc, diag::err_module_interface_implementation_mismatch)
- << FixItHint::CreateInsertion(ModuleLoc, "export ");
- MDK = ModuleDeclKind::Interface;
- break;
-
- case LangOptions::CMK_ModuleMap:
- Diag(ModuleLoc, diag::err_module_decl_in_module_map_module);
- return nullptr;
-
- case LangOptions::CMK_HeaderModule:
- Diag(ModuleLoc, diag::err_module_decl_in_header_module);
- return nullptr;
- }
-
- assert(ModuleScopes.size() == 1 && "expected to be at global module scope");
-
- // FIXME: Most of this work should be done by the preprocessor rather than
- // here, in order to support macro import.
-
- // Only one module-declaration is permitted per source file.
- if (ModuleScopes.back().Module->Kind == Module::ModuleInterfaceUnit) {
- Diag(ModuleLoc, diag::err_module_redeclaration);
- Diag(VisibleModules.getImportLoc(ModuleScopes.back().Module),
- diag::note_prev_module_declaration);
- return nullptr;
- }
-
- // Flatten the dots in a module name. Unlike Clang's hierarchical module map
- // modules, the dots here are just another character that can appear in a
- // module name.
- std::string ModuleName;
- for (auto &Piece : Path) {
- if (!ModuleName.empty())
- ModuleName += ".";
- ModuleName += Piece.first->getName();
- }
-
- // If a module name was explicitly specified on the command line, it must be
- // correct.
- if (!getLangOpts().CurrentModule.empty() &&
- getLangOpts().CurrentModule != ModuleName) {
- Diag(Path.front().second, diag::err_current_module_name_mismatch)
- << SourceRange(Path.front().second, Path.back().second)
- << getLangOpts().CurrentModule;
- return nullptr;
- }
- const_cast<LangOptions&>(getLangOpts()).CurrentModule = ModuleName;
-
- auto &Map = PP.getHeaderSearchInfo().getModuleMap();
- Module *Mod;
-
- switch (MDK) {
- case ModuleDeclKind::Interface: {
- // We can't have parsed or imported a definition of this module or parsed a
- // module map defining it already.
- if (auto *M = Map.findModule(ModuleName)) {
- Diag(Path[0].second, diag::err_module_redefinition) << ModuleName;
- if (M->DefinitionLoc.isValid())
- Diag(M->DefinitionLoc, diag::note_prev_module_definition);
- else if (const auto *FE = M->getASTFile())
- Diag(M->DefinitionLoc, diag::note_prev_module_definition_from_ast_file)
- << FE->getName();
- Mod = M;
- break;
- }
-
- // Create a Module for the module that we're defining.
- Mod = Map.createModuleForInterfaceUnit(ModuleLoc, ModuleName,
- ModuleScopes.front().Module);
- assert(Mod && "module creation should not fail");
- break;
- }
-
- case ModuleDeclKind::Partition:
- // FIXME: Check we are in a submodule of the named module.
- return nullptr;
-
- case ModuleDeclKind::Implementation:
- std::pair<IdentifierInfo *, SourceLocation> ModuleNameLoc(
- PP.getIdentifierInfo(ModuleName), Path[0].second);
- Mod = getModuleLoader().loadModule(ModuleLoc, {ModuleNameLoc},
- Module::AllVisible,
- /*IsIncludeDirective=*/false);
- if (!Mod) {
- Diag(ModuleLoc, diag::err_module_not_defined) << ModuleName;
- // Create an empty module interface unit for error recovery.
- Mod = Map.createModuleForInterfaceUnit(ModuleLoc, ModuleName,
- ModuleScopes.front().Module);
- }
- break;
- }
-
- // Switch from the global module to the named module.
- ModuleScopes.back().Module = Mod;
- ModuleScopes.back().ModuleInterface = MDK != ModuleDeclKind::Implementation;
- VisibleModules.setVisible(Mod, ModuleLoc);
-
- // From now on, we have an owning module for all declarations we see.
- // However, those declarations are module-private unless explicitly
- // exported.
- auto *TU = Context.getTranslationUnitDecl();
- TU->setModuleOwnershipKind(Decl::ModuleOwnershipKind::ModulePrivate);
- TU->setLocalOwningModule(Mod);
-
- // FIXME: Create a ModuleDecl.
- return nullptr;
-}
-
-DeclResult Sema::ActOnModuleImport(SourceLocation StartLoc,
- SourceLocation ImportLoc,
- ModuleIdPath Path) {
- // Flatten the module path for a Modules TS module name.
- std::pair<IdentifierInfo *, SourceLocation> ModuleNameLoc;
- if (getLangOpts().ModulesTS) {
- std::string ModuleName;
- for (auto &Piece : Path) {
- if (!ModuleName.empty())
- ModuleName += ".";
- ModuleName += Piece.first->getName();
- }
- ModuleNameLoc = {PP.getIdentifierInfo(ModuleName), Path[0].second};
- Path = ModuleIdPath(ModuleNameLoc);
- }
-
- Module *Mod =
- getModuleLoader().loadModule(ImportLoc, Path, Module::AllVisible,
- /*IsIncludeDirective=*/false);
- if (!Mod)
- return true;
-
- VisibleModules.setVisible(Mod, ImportLoc);
-
- checkModuleImportContext(*this, Mod, ImportLoc, CurContext);
-
- // FIXME: we should support importing a submodule within a different submodule
- // of the same top-level module. Until we do, make it an error rather than
- // silently ignoring the import.
- // Import-from-implementation is valid in the Modules TS. FIXME: Should we
- // warn on a redundant import of the current module?
- if (Mod->getTopLevelModuleName() == getLangOpts().CurrentModule &&
- (getLangOpts().isCompilingModule() || !getLangOpts().ModulesTS))
- Diag(ImportLoc, getLangOpts().isCompilingModule()
- ? diag::err_module_self_import
- : diag::err_module_import_in_implementation)
- << Mod->getFullModuleName() << getLangOpts().CurrentModule;
-
- SmallVector<SourceLocation, 2> IdentifierLocs;
- Module *ModCheck = Mod;
- for (unsigned I = 0, N = Path.size(); I != N; ++I) {
- // If we've run out of module parents, just drop the remaining identifiers.
- // We need the length to be consistent.
- if (!ModCheck)
- break;
- ModCheck = ModCheck->Parent;
-
- IdentifierLocs.push_back(Path[I].second);
- }
-
- ImportDecl *Import = ImportDecl::Create(Context, CurContext, StartLoc,
- Mod, IdentifierLocs);
- if (!ModuleScopes.empty())
- Context.addModuleInitializer(ModuleScopes.back().Module, Import);
- CurContext->addDecl(Import);
-
- // Re-export the module if needed.
- if (Import->isExported() &&
- !ModuleScopes.empty() && ModuleScopes.back().ModuleInterface)
- getCurrentModule()->Exports.emplace_back(Mod, false);
-
- return Import;
-}
-
-void Sema::ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod) {
- checkModuleImportContext(*this, Mod, DirectiveLoc, CurContext, true);
- BuildModuleInclude(DirectiveLoc, Mod);
-}
-
-void Sema::BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod) {
- // Determine whether we're in the #include buffer for a module. The #includes
- // in that buffer do not qualify as module imports; they're just an
- // implementation detail of us building the module.
- //
- // FIXME: Should we even get ActOnModuleInclude calls for those?
- bool IsInModuleIncludes =
- TUKind == TU_Module &&
- getSourceManager().isWrittenInMainFile(DirectiveLoc);
-
- bool ShouldAddImport = !IsInModuleIncludes;
-
- // If this module import was due to an inclusion directive, create an
- // implicit import declaration to capture it in the AST.
- if (ShouldAddImport) {
- TranslationUnitDecl *TU = getASTContext().getTranslationUnitDecl();
- ImportDecl *ImportD = ImportDecl::CreateImplicit(getASTContext(), TU,
- DirectiveLoc, Mod,
- DirectiveLoc);
- if (!ModuleScopes.empty())
- Context.addModuleInitializer(ModuleScopes.back().Module, ImportD);
- TU->addDecl(ImportD);
- Consumer.HandleImplicitImportDecl(ImportD);
- }
-
- getModuleLoader().makeModuleVisible(Mod, Module::AllVisible, DirectiveLoc);
- VisibleModules.setVisible(Mod, DirectiveLoc);
-}
-
-void Sema::ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod) {
- checkModuleImportContext(*this, Mod, DirectiveLoc, CurContext, true);
-
- ModuleScopes.push_back({});
- ModuleScopes.back().Module = Mod;
- if (getLangOpts().ModulesLocalVisibility)
- ModuleScopes.back().OuterVisibleModules = std::move(VisibleModules);
-
- VisibleModules.setVisible(Mod, DirectiveLoc);
-
- // The enclosing context is now part of this module.
- // FIXME: Consider creating a child DeclContext to hold the entities
- // lexically within the module.
- if (getLangOpts().trackLocalOwningModule()) {
- for (auto *DC = CurContext; DC; DC = DC->getLexicalParent()) {
- cast<Decl>(DC)->setModuleOwnershipKind(
- getLangOpts().ModulesLocalVisibility
- ? Decl::ModuleOwnershipKind::VisibleWhenImported
- : Decl::ModuleOwnershipKind::Visible);
- cast<Decl>(DC)->setLocalOwningModule(Mod);
- }
- }
-}
-
-void Sema::ActOnModuleEnd(SourceLocation EomLoc, Module *Mod) {
- if (getLangOpts().ModulesLocalVisibility) {
- VisibleModules = std::move(ModuleScopes.back().OuterVisibleModules);
- // Leaving a module hides namespace names, so our visible namespace cache
- // is now out of date.
- VisibleNamespaceCache.clear();
- }
-
- assert(!ModuleScopes.empty() && ModuleScopes.back().Module == Mod &&
- "left the wrong module scope");
- ModuleScopes.pop_back();
-
- // We got to the end of processing a local module. Create an
- // ImportDecl as we would for an imported module.
- FileID File = getSourceManager().getFileID(EomLoc);
- SourceLocation DirectiveLoc;
- if (EomLoc == getSourceManager().getLocForEndOfFile(File)) {
- // We reached the end of a #included module header. Use the #include loc.
- assert(File != getSourceManager().getMainFileID() &&
- "end of submodule in main source file");
- DirectiveLoc = getSourceManager().getIncludeLoc(File);
- } else {
- // We reached an EOM pragma. Use the pragma location.
- DirectiveLoc = EomLoc;
- }
- BuildModuleInclude(DirectiveLoc, Mod);
-
- // Any further declarations are in whatever module we returned to.
- if (getLangOpts().trackLocalOwningModule()) {
- // The parser guarantees that this is the same context that we entered
- // the module within.
- for (auto *DC = CurContext; DC; DC = DC->getLexicalParent()) {
- cast<Decl>(DC)->setLocalOwningModule(getCurrentModule());
- if (!getCurrentModule())
- cast<Decl>(DC)->setModuleOwnershipKind(
- Decl::ModuleOwnershipKind::Unowned);
- }
- }
-}
-
-void Sema::createImplicitModuleImportForErrorRecovery(SourceLocation Loc,
- Module *Mod) {
- // Bail if we're not allowed to implicitly import a module here.
- if (isSFINAEContext() || !getLangOpts().ModulesErrorRecovery ||
- VisibleModules.isVisible(Mod))
- return;
-
- // Create the implicit import declaration.
- TranslationUnitDecl *TU = getASTContext().getTranslationUnitDecl();
- ImportDecl *ImportD = ImportDecl::CreateImplicit(getASTContext(), TU,
- Loc, Mod, Loc);
- TU->addDecl(ImportD);
- Consumer.HandleImplicitImportDecl(ImportD);
-
- // Make the module visible.
- getModuleLoader().makeModuleVisible(Mod, Module::AllVisible, Loc);
- VisibleModules.setVisible(Mod, Loc);
-}
-
-/// We have parsed the start of an export declaration, including the '{'
-/// (if present).
-Decl *Sema::ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
- SourceLocation LBraceLoc) {
- ExportDecl *D = ExportDecl::Create(Context, CurContext, ExportLoc);
-
- // C++ Modules TS draft:
- // An export-declaration shall appear in the purview of a module other than
- // the global module.
- if (ModuleScopes.empty() || !ModuleScopes.back().ModuleInterface)
- Diag(ExportLoc, diag::err_export_not_in_module_interface);
-
- // An export-declaration [...] shall not contain more than one
- // export keyword.
- //
- // The intent here is that an export-declaration cannot appear within another
- // export-declaration.
- if (D->isExported())
- Diag(ExportLoc, diag::err_export_within_export);
-
- CurContext->addDecl(D);
- PushDeclContext(S, D);
- D->setModuleOwnershipKind(Decl::ModuleOwnershipKind::VisibleWhenImported);
- return D;
-}
-
-/// Complete the definition of an export declaration.
-Decl *Sema::ActOnFinishExportDecl(Scope *S, Decl *D, SourceLocation RBraceLoc) {
- auto *ED = cast<ExportDecl>(D);
- if (RBraceLoc.isValid())
- ED->setRBraceLoc(RBraceLoc);
-
- // FIXME: Diagnose export of internal-linkage declaration (including
- // anonymous namespace).
-
- PopDeclContext();
- return D;
-}
-
void Sema::ActOnPragmaRedefineExtname(IdentifierInfo* Name,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
diff --git a/lib/Sema/SemaDeclAttr.cpp b/lib/Sema/SemaDeclAttr.cpp
index 0e10804a2ec7..ee06f8ae5114 100644
--- a/lib/Sema/SemaDeclAttr.cpp
+++ b/lib/Sema/SemaDeclAttr.cpp
@@ -1,9 +1,8 @@
//===--- SemaDeclAttr.cpp - Declaration Attribute Handling ----------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -246,11 +245,11 @@ static bool checkUInt32Argument(Sema &S, const AttrInfo &AI, const Expr *Expr,
!Expr->isIntegerConstantExpr(I, S.Context)) {
if (Idx != UINT_MAX)
S.Diag(getAttrLoc(AI), diag::err_attribute_argument_n_type)
- << AI << Idx << AANT_ArgumentIntegerConstant
+ << &AI << Idx << AANT_ArgumentIntegerConstant
<< Expr->getSourceRange();
else
S.Diag(getAttrLoc(AI), diag::err_attribute_argument_type)
- << AI << AANT_ArgumentIntegerConstant << Expr->getSourceRange();
+ << &AI << AANT_ArgumentIntegerConstant << Expr->getSourceRange();
return false;
}
@@ -262,7 +261,7 @@ static bool checkUInt32Argument(Sema &S, const AttrInfo &AI, const Expr *Expr,
if (StrictlyUnsigned && I.isSigned() && I.isNegative()) {
S.Diag(getAttrLoc(AI), diag::err_attribute_requires_positive_integer)
- << AI << /*non-negative*/ 1;
+ << &AI << /*non-negative*/ 1;
return false;
}
@@ -717,7 +716,8 @@ static void checkAttrArgsAreCapabilityObjs(Sema &S, Decl *D,
uint64_t ParamIdxFromOne = ArgValue.getZExtValue();
uint64_t ParamIdxFromZero = ParamIdxFromOne - 1;
if (!ArgValue.isStrictlyPositive() || ParamIdxFromOne > NumParams) {
- S.Diag(AL.getLoc(), diag::err_attribute_argument_out_of_range)
+ S.Diag(AL.getLoc(),
+ diag::err_attribute_argument_out_of_bounds_extra_info)
<< AL << Idx + 1 << NumParams;
continue;
}
@@ -900,7 +900,7 @@ static void handleAllocSizeAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
const Expr *SizeExpr = AL.getArgAsExpr(0);
int SizeArgNoVal;
// Parameter indices are 1-indexed, hence Index=1
- if (!checkPositiveIntArgument(S, AL, SizeExpr, SizeArgNoVal, /*Index=*/1))
+ if (!checkPositiveIntArgument(S, AL, SizeExpr, SizeArgNoVal, /*Idx=*/1))
return;
if (!checkParamIsIntegerType(S, FD, AL, /*AttrArgNo=*/0))
return;
@@ -911,7 +911,7 @@ static void handleAllocSizeAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
const Expr *NumberExpr = AL.getArgAsExpr(1);
int Val;
// Parameter indices are 1-based, hence Index=2
- if (!checkPositiveIntArgument(S, AL, NumberExpr, Val, /*Index=*/2))
+ if (!checkPositiveIntArgument(S, AL, NumberExpr, Val, /*Idx=*/2))
return;
if (!checkParamIsIntegerType(S, FD, AL, /*AttrArgNo=*/1))
return;
@@ -1119,7 +1119,7 @@ static void handlePassObjectSizeAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// __builtin_object_size. So, it has the same constraints as that second
// argument; namely, it must be in the range [0, 3].
if (Type > 3) {
- S.Diag(E->getBeginLoc(), diag::err_attribute_argument_outof_range)
+ S.Diag(E->getBeginLoc(), diag::err_attribute_argument_out_of_range)
<< AL << 0 << 3 << E->getSourceRange();
return;
}
@@ -1673,7 +1673,7 @@ void Sema::AddAllocAlignAttr(SourceRange AttrRange, Decl *D, Expr *ParamExpr,
ParamIdx Idx;
const auto *FuncDecl = cast<FunctionDecl>(D);
if (!checkFunctionOrMethodParameterIndex(*this, FuncDecl, TmpAttr,
- /*AttrArgNo=*/1, ParamExpr, Idx))
+ /*AttrArgNum=*/1, ParamExpr, Idx))
return;
QualType Ty = getFunctionOrMethodParamType(D, Idx.getASTIndex());
@@ -2284,18 +2284,11 @@ static bool versionsMatch(const VersionTuple &X, const VersionTuple &Y,
return false;
}
-AvailabilityAttr *Sema::mergeAvailabilityAttr(NamedDecl *D, SourceRange Range,
- IdentifierInfo *Platform,
- bool Implicit,
- VersionTuple Introduced,
- VersionTuple Deprecated,
- VersionTuple Obsoleted,
- bool IsUnavailable,
- StringRef Message,
- bool IsStrict,
- StringRef Replacement,
- AvailabilityMergeKind AMK,
- unsigned AttrSpellingListIndex) {
+AvailabilityAttr *Sema::mergeAvailabilityAttr(
+ NamedDecl *D, SourceRange Range, IdentifierInfo *Platform, bool Implicit,
+ VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted,
+ bool IsUnavailable, StringRef Message, bool IsStrict, StringRef Replacement,
+ AvailabilityMergeKind AMK, int Priority, unsigned AttrSpellingListIndex) {
VersionTuple MergedIntroduced = Introduced;
VersionTuple MergedDeprecated = Deprecated;
VersionTuple MergedObsoleted = Obsoleted;
@@ -2329,16 +2322,15 @@ AvailabilityAttr *Sema::mergeAvailabilityAttr(NamedDecl *D, SourceRange Range,
}
// If there is an existing availability attribute for this platform that
- // is explicit and the new one is implicit use the explicit one and
- // discard the new implicit attribute.
- if (!OldAA->isImplicit() && Implicit) {
+ // has a lower priority use the existing one and discard the new
+ // attribute.
+ if (OldAA->getPriority() < Priority)
return nullptr;
- }
- // If there is an existing attribute for this platform that is implicit
- // and the new attribute is explicit then erase the old one and
- // continue processing the attributes.
- if (!Implicit && OldAA->isImplicit()) {
+ // If there is an existing attribute for this platform that has a higher
+ // priority than the new attribute then erase the old one and continue
+ // processing the attributes.
+ if (OldAA->getPriority() > Priority) {
Attrs.erase(Attrs.begin() + i);
--e;
continue;
@@ -2437,11 +2429,10 @@ AvailabilityAttr *Sema::mergeAvailabilityAttr(NamedDecl *D, SourceRange Range,
if (!checkAvailabilityAttr(*this, Range, Platform, MergedIntroduced,
MergedDeprecated, MergedObsoleted) &&
!OverrideOrImpl) {
- auto *Avail = ::new (Context) AvailabilityAttr(Range, Context, Platform,
- Introduced, Deprecated,
- Obsoleted, IsUnavailable, Message,
- IsStrict, Replacement,
- AttrSpellingListIndex);
+ auto *Avail = ::new (Context)
+ AvailabilityAttr(Range, Context, Platform, Introduced, Deprecated,
+ Obsoleted, IsUnavailable, Message, IsStrict,
+ Replacement, Priority, AttrSpellingListIndex);
Avail->setImplicit(Implicit);
return Avail;
}
@@ -2484,15 +2475,13 @@ static void handleAvailabilityAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
}
}
- AvailabilityAttr *NewAttr = S.mergeAvailabilityAttr(ND, AL.getRange(), II,
- false/*Implicit*/,
- Introduced.Version,
- Deprecated.Version,
- Obsoleted.Version,
- IsUnavailable, Str,
- IsStrict, Replacement,
- Sema::AMK_None,
- Index);
+ int PriorityModifier = AL.isPragmaClangAttribute()
+ ? Sema::AP_PragmaClangAttribute
+ : Sema::AP_Explicit;
+ AvailabilityAttr *NewAttr = S.mergeAvailabilityAttr(
+ ND, AL.getRange(), II, false /*Implicit*/, Introduced.Version,
+ Deprecated.Version, Obsoleted.Version, IsUnavailable, Str, IsStrict,
+ Replacement, Sema::AMK_None, PriorityModifier, Index);
if (NewAttr)
D->addAttr(NewAttr);
@@ -2519,6 +2508,7 @@ static void handleAvailabilityAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
else
return VersionTuple(NewMajor, Version.getMinor().getValue());
}
+ return VersionTuple(NewMajor);
}
return VersionTuple(2, 0);
@@ -2528,18 +2518,11 @@ static void handleAvailabilityAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
auto NewDeprecated = adjustWatchOSVersion(Deprecated.Version);
auto NewObsoleted = adjustWatchOSVersion(Obsoleted.Version);
- AvailabilityAttr *NewAttr = S.mergeAvailabilityAttr(ND,
- AL.getRange(),
- NewII,
- true/*Implicit*/,
- NewIntroduced,
- NewDeprecated,
- NewObsoleted,
- IsUnavailable, Str,
- IsStrict,
- Replacement,
- Sema::AMK_None,
- Index);
+ AvailabilityAttr *NewAttr = S.mergeAvailabilityAttr(
+ ND, AL.getRange(), NewII, true /*Implicit*/, NewIntroduced,
+ NewDeprecated, NewObsoleted, IsUnavailable, Str, IsStrict,
+ Replacement, Sema::AMK_None,
+ PriorityModifier + Sema::AP_InferredFromOtherPlatform, Index);
if (NewAttr)
D->addAttr(NewAttr);
}
@@ -2553,20 +2536,13 @@ static void handleAvailabilityAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
NewII = &S.Context.Idents.get("tvos_app_extension");
if (NewII) {
- AvailabilityAttr *NewAttr = S.mergeAvailabilityAttr(ND,
- AL.getRange(),
- NewII,
- true/*Implicit*/,
- Introduced.Version,
- Deprecated.Version,
- Obsoleted.Version,
- IsUnavailable, Str,
- IsStrict,
- Replacement,
- Sema::AMK_None,
- Index);
- if (NewAttr)
- D->addAttr(NewAttr);
+ AvailabilityAttr *NewAttr = S.mergeAvailabilityAttr(
+ ND, AL.getRange(), NewII, true /*Implicit*/, Introduced.Version,
+ Deprecated.Version, Obsoleted.Version, IsUnavailable, Str, IsStrict,
+ Replacement, Sema::AMK_None,
+ PriorityModifier + Sema::AP_InferredFromOtherPlatform, Index);
+ if (NewAttr)
+ D->addAttr(NewAttr);
}
}
}
@@ -3035,13 +3011,18 @@ static void handleSectionAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
D->addAttr(NewAttr);
}
-static bool checkCodeSegName(Sema&S, SourceLocation LiteralLoc, StringRef CodeSegName) {
- std::string Error = S.Context.getTargetInfo().isValidSectionSpecifier(CodeSegName);
+// This is used for `__declspec(code_seg("segname"))` on a decl.
+// `#pragma code_seg("segname")` uses checkSectionName() instead.
+static bool checkCodeSegName(Sema &S, SourceLocation LiteralLoc,
+ StringRef CodeSegName) {
+ std::string Error =
+ S.Context.getTargetInfo().isValidSectionSpecifier(CodeSegName);
if (!Error.empty()) {
- S.Diag(LiteralLoc, diag::err_attribute_section_invalid_for_target) << Error
- << 0 /*'code-seg'*/;
+ S.Diag(LiteralLoc, diag::err_attribute_section_invalid_for_target)
+ << Error << 0 /*'code-seg'*/;
return false;
}
+
return true;
}
@@ -3325,7 +3306,7 @@ static void handleInitPriorityAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
}
if (prioritynum < 101 || prioritynum > 65535) {
- S.Diag(AL.getLoc(), diag::err_attribute_argument_outof_range)
+ S.Diag(AL.getLoc(), diag::err_attribute_argument_out_of_range)
<< E->getSourceRange() << AL << 101 << 65535;
AL.setInvalid();
return;
@@ -3480,6 +3461,146 @@ static void handleFormatAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
D->addAttr(NewAttr);
}
+/// Handle __attribute__((callback(CalleeIdx, PayloadIdx0, ...))) attributes.
+static void handleCallbackAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ // The index that identifies the callback callee is mandatory.
+ if (AL.getNumArgs() == 0) {
+ S.Diag(AL.getLoc(), diag::err_callback_attribute_no_callee)
+ << AL.getRange();
+ return;
+ }
+
+ bool HasImplicitThisParam = isInstanceMethod(D);
+ int32_t NumArgs = getFunctionOrMethodNumParams(D);
+
+ FunctionDecl *FD = D->getAsFunction();
+ assert(FD && "Expected a function declaration!");
+
+ llvm::StringMap<int> NameIdxMapping;
+ NameIdxMapping["__"] = -1;
+
+ NameIdxMapping["this"] = 0;
+
+ int Idx = 1;
+ for (const ParmVarDecl *PVD : FD->parameters())
+ NameIdxMapping[PVD->getName()] = Idx++;
+
+ auto UnknownName = NameIdxMapping.end();
+
+ SmallVector<int, 8> EncodingIndices;
+ for (unsigned I = 0, E = AL.getNumArgs(); I < E; ++I) {
+ SourceRange SR;
+ int32_t ArgIdx;
+
+ if (AL.isArgIdent(I)) {
+ IdentifierLoc *IdLoc = AL.getArgAsIdent(I);
+ auto It = NameIdxMapping.find(IdLoc->Ident->getName());
+ if (It == UnknownName) {
+ S.Diag(AL.getLoc(), diag::err_callback_attribute_argument_unknown)
+ << IdLoc->Ident << IdLoc->Loc;
+ return;
+ }
+
+ SR = SourceRange(IdLoc->Loc);
+ ArgIdx = It->second;
+ } else if (AL.isArgExpr(I)) {
+ Expr *IdxExpr = AL.getArgAsExpr(I);
+
+ // If the expression is not parseable as an int32_t we have a problem.
+ if (!checkUInt32Argument(S, AL, IdxExpr, (uint32_t &)ArgIdx, I + 1,
+ false)) {
+ S.Diag(AL.getLoc(), diag::err_attribute_argument_out_of_bounds)
+ << AL << (I + 1) << IdxExpr->getSourceRange();
+ return;
+ }
+
+ // Check oob, excluding the special values, 0 and -1.
+ if (ArgIdx < -1 || ArgIdx > NumArgs) {
+ S.Diag(AL.getLoc(), diag::err_attribute_argument_out_of_bounds)
+ << AL << (I + 1) << IdxExpr->getSourceRange();
+ return;
+ }
+
+ SR = IdxExpr->getSourceRange();
+ } else {
+ llvm_unreachable("Unexpected ParsedAttr argument type!");
+ }
+
+ if (ArgIdx == 0 && !HasImplicitThisParam) {
+ S.Diag(AL.getLoc(), diag::err_callback_implicit_this_not_available)
+ << (I + 1) << SR;
+ return;
+ }
+
+ // Adjust for the case we do not have an implicit "this" parameter. In this
+ // case we decrease all positive values by 1 to get LLVM argument indices.
+ if (!HasImplicitThisParam && ArgIdx > 0)
+ ArgIdx -= 1;
+
+ EncodingIndices.push_back(ArgIdx);
+ }
+
+ int CalleeIdx = EncodingIndices.front();
+ // Check if the callee index is proper, thus not "this" and not "unknown".
+ // This means the "CalleeIdx" has to be non-negative if "HasImplicitThisParam"
+ // is false and positive if "HasImplicitThisParam" is true.
+ if (CalleeIdx < (int)HasImplicitThisParam) {
+ S.Diag(AL.getLoc(), diag::err_callback_attribute_invalid_callee)
+ << AL.getRange();
+ return;
+ }
+
+ // Get the callee type, note the index adjustment as the AST doesn't contain
+ // the this type (which the callee cannot reference anyway!).
+ const Type *CalleeType =
+ getFunctionOrMethodParamType(D, CalleeIdx - HasImplicitThisParam)
+ .getTypePtr();
+ if (!CalleeType || !CalleeType->isFunctionPointerType()) {
+ S.Diag(AL.getLoc(), diag::err_callback_callee_no_function_type)
+ << AL.getRange();
+ return;
+ }
+
+ const Type *CalleeFnType =
+ CalleeType->getPointeeType()->getUnqualifiedDesugaredType();
+
+ // TODO: Check the type of the callee arguments.
+
+ const auto *CalleeFnProtoType = dyn_cast<FunctionProtoType>(CalleeFnType);
+ if (!CalleeFnProtoType) {
+ S.Diag(AL.getLoc(), diag::err_callback_callee_no_function_type)
+ << AL.getRange();
+ return;
+ }
+
+ if (CalleeFnProtoType->getNumParams() > EncodingIndices.size() - 1) {
+ S.Diag(AL.getLoc(), diag::err_attribute_wrong_number_arguments)
+ << AL << (unsigned)(EncodingIndices.size() - 1);
+ return;
+ }
+
+ if (CalleeFnProtoType->getNumParams() < EncodingIndices.size() - 1) {
+ S.Diag(AL.getLoc(), diag::err_attribute_wrong_number_arguments)
+ << AL << (unsigned)(EncodingIndices.size() - 1);
+ return;
+ }
+
+ if (CalleeFnProtoType->isVariadic()) {
+ S.Diag(AL.getLoc(), diag::err_callback_callee_is_variadic) << AL.getRange();
+ return;
+ }
+
+ // Do not allow multiple callback attributes.
+ if (D->hasAttr<CallbackAttr>()) {
+ S.Diag(AL.getLoc(), diag::err_callback_attribute_multiple) << AL.getRange();
+ return;
+ }
+
+ D->addAttr(::new (S.Context) CallbackAttr(
+ AL.getRange(), S.Context, EncodingIndices.data(), EncodingIndices.size(),
+ AL.getAttributeSpellingListIndex()));
+}
+
static void handleTransparentUnionAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// Try to find the underlying union declaration.
RecordDecl *RD = nullptr;
@@ -4157,6 +4278,15 @@ MinSizeAttr *Sema::mergeMinSizeAttr(Decl *D, SourceRange Range,
return ::new (Context) MinSizeAttr(Range, Context, AttrSpellingListIndex);
}
+NoSpeculativeLoadHardeningAttr *Sema::mergeNoSpeculativeLoadHardeningAttr(
+ Decl *D, const NoSpeculativeLoadHardeningAttr &AL) {
+ if (checkAttrMutualExclusion<SpeculativeLoadHardeningAttr>(*this, D, AL))
+ return nullptr;
+
+ return ::new (Context) NoSpeculativeLoadHardeningAttr(
+ AL.getRange(), Context, AL.getSpellingListIndex());
+}
+
OptimizeNoneAttr *Sema::mergeOptimizeNoneAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex) {
if (AlwaysInlineAttr *Inline = D->getAttr<AlwaysInlineAttr>()) {
@@ -4177,6 +4307,15 @@ OptimizeNoneAttr *Sema::mergeOptimizeNoneAttr(Decl *D, SourceRange Range,
AttrSpellingListIndex);
}
+SpeculativeLoadHardeningAttr *Sema::mergeSpeculativeLoadHardeningAttr(
+ Decl *D, const SpeculativeLoadHardeningAttr &AL) {
+ if (checkAttrMutualExclusion<NoSpeculativeLoadHardeningAttr>(*this, D, AL))
+ return nullptr;
+
+ return ::new (Context) SpeculativeLoadHardeningAttr(
+ AL.getRange(), Context, AL.getSpellingListIndex());
+}
+
static void handleAlwaysInlineAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (checkAttrMutualExclusion<NotTailCalledAttr>(S, D, AL))
return;
@@ -4482,11 +4621,56 @@ bool Sema::CheckCallingConvAttr(const ParsedAttr &Attrs, CallingConv &CC,
default: llvm_unreachable("unexpected attribute kind");
}
+ TargetInfo::CallingConvCheckResult A = TargetInfo::CCCR_OK;
const TargetInfo &TI = Context.getTargetInfo();
- TargetInfo::CallingConvCheckResult A = TI.checkCallingConvention(CC);
- if (A != TargetInfo::CCCR_OK) {
- if (A == TargetInfo::CCCR_Warning)
- Diag(Attrs.getLoc(), diag::warn_cconv_ignored) << Attrs;
+ // CUDA functions may have host and/or device attributes which indicate
+ // their targeted execution environment, therefore the calling convention
+ // of functions in CUDA should be checked against the target deduced based
+ // on their host/device attributes.
+ if (LangOpts.CUDA) {
+ auto *Aux = Context.getAuxTargetInfo();
+ auto CudaTarget = IdentifyCUDATarget(FD);
+ bool CheckHost = false, CheckDevice = false;
+ switch (CudaTarget) {
+ case CFT_HostDevice:
+ CheckHost = true;
+ CheckDevice = true;
+ break;
+ case CFT_Host:
+ CheckHost = true;
+ break;
+ case CFT_Device:
+ case CFT_Global:
+ CheckDevice = true;
+ break;
+ case CFT_InvalidTarget:
+ llvm_unreachable("unexpected cuda target");
+ }
+ auto *HostTI = LangOpts.CUDAIsDevice ? Aux : &TI;
+ auto *DeviceTI = LangOpts.CUDAIsDevice ? &TI : Aux;
+ if (CheckHost && HostTI)
+ A = HostTI->checkCallingConvention(CC);
+ if (A == TargetInfo::CCCR_OK && CheckDevice && DeviceTI)
+ A = DeviceTI->checkCallingConvention(CC);
+ } else {
+ A = TI.checkCallingConvention(CC);
+ }
+
+ switch (A) {
+ case TargetInfo::CCCR_OK:
+ break;
+
+ case TargetInfo::CCCR_Ignore:
+ // Treat an ignored convention as if it was an explicit C calling convention
+ // attribute. For example, __stdcall on Win x64 functions as __cdecl, so
+ // that command line flags that change the default convention to
+ // __vectorcall don't affect declarations marked __stdcall.
+ CC = CC_C;
+ break;
+
+ case TargetInfo::CCCR_Warning: {
+ Diag(Attrs.getLoc(), diag::warn_cconv_unsupported)
+ << Attrs << (int)CallingConventionIgnoredReason::ForThisTarget;
// This convention is not valid for the target. Use the default function or
// method calling convention.
@@ -4496,6 +4680,8 @@ bool Sema::CheckCallingConvAttr(const ParsedAttr &Attrs, CallingConv &CC,
IsVariadic = FD->isVariadic();
}
CC = Context.getDefaultCallingConvention(IsVariadic, IsCXXMethod);
+ break;
+ }
}
Attrs.setProcessingCache((unsigned) CC);
@@ -5116,11 +5302,22 @@ static void handleObjCBridgeRelatedAttr(Sema &S, Decl *D,
static void handleObjCDesignatedInitializer(Sema &S, Decl *D,
const ParsedAttr &AL) {
+ DeclContext *Ctx = D->getDeclContext();
+
+ // This attribute can only be applied to methods in interfaces or class
+ // extensions.
+ if (!isa<ObjCInterfaceDecl>(Ctx) &&
+ !(isa<ObjCCategoryDecl>(Ctx) &&
+ cast<ObjCCategoryDecl>(Ctx)->IsClassExtension())) {
+ S.Diag(D->getLocation(), diag::err_designated_init_attr_non_init);
+ return;
+ }
+
ObjCInterfaceDecl *IFace;
- if (auto *CatDecl = dyn_cast<ObjCCategoryDecl>(D->getDeclContext()))
+ if (auto *CatDecl = dyn_cast<ObjCCategoryDecl>(Ctx))
IFace = CatDecl->getClassInterface();
else
- IFace = cast<ObjCInterfaceDecl>(D->getDeclContext());
+ IFace = cast<ObjCInterfaceDecl>(Ctx);
if (!IFace)
return;
@@ -5377,6 +5574,27 @@ static void handleARMInterruptAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
}
static void handleMSP430InterruptAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ // MSP430 'interrupt' attribute is applied to
+ // a function with no parameters and void return type.
+ if (!isFunctionOrMethod(D)) {
+ S.Diag(D->getLocation(), diag::warn_attribute_wrong_decl_type)
+ << "'interrupt'" << ExpectedFunctionOrMethod;
+ return;
+ }
+
+ if (hasFunctionProto(D) && getFunctionOrMethodNumParams(D) != 0) {
+ S.Diag(D->getLocation(), diag::warn_interrupt_attribute_invalid)
+ << /*MSP430*/ 1 << 0;
+ return;
+ }
+
+ if (!getFunctionOrMethodResultType(D)->isVoidType()) {
+ S.Diag(D->getLocation(), diag::warn_interrupt_attribute_invalid)
+ << /*MSP430*/ 1 << 1;
+ return;
+ }
+
+ // The attribute takes one integer argument.
if (!checkAttributeNumArgs(S, AL, 1))
return;
@@ -5386,8 +5604,6 @@ static void handleMSP430InterruptAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
return;
}
- // FIXME: Check for decl - it should be void ()(void).
-
Expr *NumParamsExpr = static_cast<Expr *>(AL.getArgAsExpr(0));
llvm::APSInt NumParams(32);
if (!NumParamsExpr->isIntegerConstantExpr(NumParams, S.Context)) {
@@ -5396,9 +5612,9 @@ static void handleMSP430InterruptAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
<< NumParamsExpr->getSourceRange();
return;
}
-
+ // The argument should be in range 0..63.
unsigned Num = NumParams.getLimitedValue(255);
- if ((Num & 1) || Num > 30) {
+ if (Num > 63) {
S.Diag(AL.getLoc(), diag::err_attribute_argument_out_of_bounds)
<< AL << (int)NumParams.getSExtValue()
<< NumParamsExpr->getSourceRange();
@@ -5442,14 +5658,14 @@ static void handleMipsInterruptAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
}
if (hasFunctionProto(D) && getFunctionOrMethodNumParams(D) != 0) {
- S.Diag(D->getLocation(), diag::warn_mips_interrupt_attribute)
- << 0;
+ S.Diag(D->getLocation(), diag::warn_interrupt_attribute_invalid)
+ << /*MIPS*/ 0 << 0;
return;
}
if (!getFunctionOrMethodResultType(D)->isVoidType()) {
- S.Diag(D->getLocation(), diag::warn_mips_interrupt_attribute)
- << 1;
+ S.Diag(D->getLocation(), diag::warn_interrupt_attribute_invalid)
+ << /*MIPS*/ 0 << 1;
return;
}
@@ -5558,6 +5774,51 @@ static void handleAVRSignalAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
handleSimpleAttribute<AVRSignalAttr>(S, D, AL);
}
+static void handleWebAssemblyImportModuleAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ if (!isFunctionOrMethod(D)) {
+ S.Diag(D->getLocation(), diag::warn_attribute_wrong_decl_type)
+ << "'import_module'" << ExpectedFunction;
+ return;
+ }
+
+ auto *FD = cast<FunctionDecl>(D);
+ if (FD->isThisDeclarationADefinition()) {
+ S.Diag(D->getLocation(), diag::err_alias_is_definition) << FD << 0;
+ return;
+ }
+
+ StringRef Str;
+ SourceLocation ArgLoc;
+ if (!S.checkStringLiteralArgumentAttr(AL, 0, Str, &ArgLoc))
+ return;
+
+ FD->addAttr(::new (S.Context) WebAssemblyImportModuleAttr(
+ AL.getRange(), S.Context, Str,
+ AL.getAttributeSpellingListIndex()));
+}
+
+static void handleWebAssemblyImportNameAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ if (!isFunctionOrMethod(D)) {
+ S.Diag(D->getLocation(), diag::warn_attribute_wrong_decl_type)
+ << "'import_name'" << ExpectedFunction;
+ return;
+ }
+
+ auto *FD = cast<FunctionDecl>(D);
+ if (FD->isThisDeclarationADefinition()) {
+ S.Diag(D->getLocation(), diag::err_alias_is_definition) << FD << 0;
+ return;
+ }
+
+ StringRef Str;
+ SourceLocation ArgLoc;
+ if (!S.checkStringLiteralArgumentAttr(AL, 0, Str, &ArgLoc))
+ return;
+
+ FD->addAttr(::new (S.Context) WebAssemblyImportNameAttr(
+ AL.getRange(), S.Context, Str,
+ AL.getAttributeSpellingListIndex()));
+}
static void handleRISCVInterruptAttr(Sema &S, Decl *D,
const ParsedAttr &AL) {
@@ -5596,12 +5857,14 @@ static void handleRISCVInterruptAttr(Sema &S, Decl *D,
}
if (hasFunctionProto(D) && getFunctionOrMethodNumParams(D) != 0) {
- S.Diag(D->getLocation(), diag::warn_riscv_interrupt_attribute) << 0;
+ S.Diag(D->getLocation(), diag::warn_interrupt_attribute_invalid)
+ << /*RISC-V*/ 2 << 0;
return;
}
if (!getFunctionOrMethodResultType(D)->isVoidType()) {
- S.Diag(D->getLocation(), diag::warn_riscv_interrupt_attribute) << 1;
+ S.Diag(D->getLocation(), diag::warn_interrupt_attribute_invalid)
+ << /*RISC-V*/ 2 << 1;
return;
}
@@ -5643,57 +5906,115 @@ static void handleInterruptAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
}
}
-static void handleAMDGPUFlatWorkGroupSizeAttr(Sema &S, Decl *D,
- const ParsedAttr &AL) {
+static bool
+checkAMDGPUFlatWorkGroupSizeArguments(Sema &S, Expr *MinExpr, Expr *MaxExpr,
+ const AMDGPUFlatWorkGroupSizeAttr &Attr) {
+ // Accept template arguments for now as they depend on something else.
+ // We'll get to check them when they eventually get instantiated.
+ if (MinExpr->isValueDependent() || MaxExpr->isValueDependent())
+ return false;
+
uint32_t Min = 0;
- Expr *MinExpr = AL.getArgAsExpr(0);
- if (!checkUInt32Argument(S, AL, MinExpr, Min))
- return;
+ if (!checkUInt32Argument(S, Attr, MinExpr, Min, 0))
+ return true;
uint32_t Max = 0;
- Expr *MaxExpr = AL.getArgAsExpr(1);
- if (!checkUInt32Argument(S, AL, MaxExpr, Max))
- return;
+ if (!checkUInt32Argument(S, Attr, MaxExpr, Max, 1))
+ return true;
if (Min == 0 && Max != 0) {
- S.Diag(AL.getLoc(), diag::err_attribute_argument_invalid) << AL << 0;
- return;
+ S.Diag(Attr.getLocation(), diag::err_attribute_argument_invalid)
+ << &Attr << 0;
+ return true;
}
if (Min > Max) {
- S.Diag(AL.getLoc(), diag::err_attribute_argument_invalid) << AL << 1;
- return;
+ S.Diag(Attr.getLocation(), diag::err_attribute_argument_invalid)
+ << &Attr << 1;
+ return true;
}
- D->addAttr(::new (S.Context)
- AMDGPUFlatWorkGroupSizeAttr(AL.getLoc(), S.Context, Min, Max,
- AL.getAttributeSpellingListIndex()));
+ return false;
}
-static void handleAMDGPUWavesPerEUAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- uint32_t Min = 0;
- Expr *MinExpr = AL.getArgAsExpr(0);
- if (!checkUInt32Argument(S, AL, MinExpr, Min))
+void Sema::addAMDGPUFlatWorkGroupSizeAttr(SourceRange AttrRange, Decl *D,
+ Expr *MinExpr, Expr *MaxExpr,
+ unsigned SpellingListIndex) {
+ AMDGPUFlatWorkGroupSizeAttr TmpAttr(AttrRange, Context, MinExpr, MaxExpr,
+ SpellingListIndex);
+
+ if (checkAMDGPUFlatWorkGroupSizeArguments(*this, MinExpr, MaxExpr, TmpAttr))
return;
+ D->addAttr(::new (Context) AMDGPUFlatWorkGroupSizeAttr(
+ AttrRange, Context, MinExpr, MaxExpr, SpellingListIndex));
+}
+
+static void handleAMDGPUFlatWorkGroupSizeAttr(Sema &S, Decl *D,
+ const ParsedAttr &AL) {
+ Expr *MinExpr = AL.getArgAsExpr(0);
+ Expr *MaxExpr = AL.getArgAsExpr(1);
+
+ S.addAMDGPUFlatWorkGroupSizeAttr(AL.getRange(), D, MinExpr, MaxExpr,
+ AL.getAttributeSpellingListIndex());
+}
+
+static bool checkAMDGPUWavesPerEUArguments(Sema &S, Expr *MinExpr,
+ Expr *MaxExpr,
+ const AMDGPUWavesPerEUAttr &Attr) {
+ if (S.DiagnoseUnexpandedParameterPack(MinExpr) ||
+ (MaxExpr && S.DiagnoseUnexpandedParameterPack(MaxExpr)))
+ return true;
+
+ // Accept template arguments for now as they depend on something else.
+ // We'll get to check them when they eventually get instantiated.
+ if (MinExpr->isValueDependent() || (MaxExpr && MaxExpr->isValueDependent()))
+ return false;
+
+ uint32_t Min = 0;
+ if (!checkUInt32Argument(S, Attr, MinExpr, Min, 0))
+ return true;
+
uint32_t Max = 0;
- if (AL.getNumArgs() == 2) {
- Expr *MaxExpr = AL.getArgAsExpr(1);
- if (!checkUInt32Argument(S, AL, MaxExpr, Max))
- return;
- }
+ if (MaxExpr && !checkUInt32Argument(S, Attr, MaxExpr, Max, 1))
+ return true;
if (Min == 0 && Max != 0) {
- S.Diag(AL.getLoc(), diag::err_attribute_argument_invalid) << AL << 0;
- return;
+ S.Diag(Attr.getLocation(), diag::err_attribute_argument_invalid)
+ << &Attr << 0;
+ return true;
}
if (Max != 0 && Min > Max) {
- S.Diag(AL.getLoc(), diag::err_attribute_argument_invalid) << AL << 1;
- return;
+ S.Diag(Attr.getLocation(), diag::err_attribute_argument_invalid)
+ << &Attr << 1;
+ return true;
}
- D->addAttr(::new (S.Context)
- AMDGPUWavesPerEUAttr(AL.getLoc(), S.Context, Min, Max,
- AL.getAttributeSpellingListIndex()));
+ return false;
+}
+
+void Sema::addAMDGPUWavesPerEUAttr(SourceRange AttrRange, Decl *D,
+ Expr *MinExpr, Expr *MaxExpr,
+ unsigned SpellingListIndex) {
+ AMDGPUWavesPerEUAttr TmpAttr(AttrRange, Context, MinExpr, MaxExpr,
+ SpellingListIndex);
+
+ if (checkAMDGPUWavesPerEUArguments(*this, MinExpr, MaxExpr, TmpAttr))
+ return;
+
+ D->addAttr(::new (Context) AMDGPUWavesPerEUAttr(AttrRange, Context, MinExpr,
+ MaxExpr, SpellingListIndex));
+}
+
+static void handleAMDGPUWavesPerEUAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ if (!checkAttributeAtLeastNumArgs(S, AL, 1) ||
+ !checkAttributeAtMostNumArgs(S, AL, 2))
+ return;
+
+ Expr *MinExpr = AL.getArgAsExpr(0);
+ Expr *MaxExpr = (AL.getNumArgs() > 1) ? AL.getArgAsExpr(1) : nullptr;
+
+ S.addAMDGPUWavesPerEUAttr(AL.getRange(), D, MinExpr, MaxExpr,
+ AL.getAttributeSpellingListIndex());
}
static void handleAMDGPUNumSGPRAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
@@ -6001,7 +6322,8 @@ static void handleNoSanitizeAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (!S.checkStringLiteralArgumentAttr(AL, I, SanitizerName, &LiteralLoc))
return;
- if (parseSanitizerValue(SanitizerName, /*AllowGroups=*/true) == 0)
+ if (parseSanitizerValue(SanitizerName, /*AllowGroups=*/true) ==
+ SanitizerMask())
S.Diag(LiteralLoc, diag::warn_unknown_sanitizer_ignored) << SanitizerName;
else if (isGlobalVar(D) && SanitizerName != "address")
S.Diag(D->getLocation(), diag::err_attribute_wrong_decl_type)
@@ -6026,9 +6348,21 @@ static void handleNoSanitizeSpecificAttr(Sema &S, Decl *D,
if (isGlobalVar(D) && SanitizerName != "address")
S.Diag(D->getLocation(), diag::err_attribute_wrong_decl_type)
<< AL << ExpectedFunction;
- D->addAttr(::new (S.Context)
- NoSanitizeAttr(AL.getRange(), S.Context, &SanitizerName, 1,
- AL.getAttributeSpellingListIndex()));
+
+ // FIXME: Rather than create a NoSanitizeSpecificAttr, this creates a
+ // NoSanitizeAttr object; but we need to calculate the correct spelling list
+ // index rather than incorrectly assume the index for NoSanitizeSpecificAttr
+ // has the same spellings as the index for NoSanitizeAttr. We don't have a
+ // general way to "translate" between the two, so this hack attempts to work
+ // around the issue with hard-coded indicies. This is critical for calling
+ // getSpelling() or prettyPrint() on the resulting semantic attribute object
+ // without failing assertions.
+ unsigned TranslatedSpellingIndex = 0;
+ if (AL.isC2xAttribute() || AL.isCXX11Attribute())
+ TranslatedSpellingIndex = 1;
+
+ D->addAttr(::new (S.Context) NoSanitizeAttr(
+ AL.getRange(), S.Context, &SanitizerName, 1, TranslatedSpellingIndex));
}
static void handleInternalLinkageAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
@@ -6114,7 +6448,9 @@ static void handleOpenCLAccessAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (const auto *PDecl = dyn_cast<ParmVarDecl>(D)) {
const Type *DeclTy = PDecl->getType().getCanonicalType().getTypePtr();
if (AL.getName()->getName().find("read_write") != StringRef::npos) {
- if (S.getLangOpts().OpenCLVersion < 200 || DeclTy->isPipeType()) {
+ if ((!S.getLangOpts().OpenCLCPlusPlus &&
+ S.getLangOpts().OpenCLVersion < 200) ||
+ DeclTy->isPipeType()) {
S.Diag(AL.getLoc(), diag::err_opencl_invalid_read_write)
<< AL << PDecl->getType() << DeclTy->isImageType();
D->setInvalidDecl(true);
@@ -6221,6 +6557,42 @@ static void handleObjCExternallyRetainedAttr(Sema &S, Decl *D,
handleSimpleAttribute<ObjCExternallyRetainedAttr>(S, D, AL);
}
+static void handleMIGServerRoutineAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ // Check that the return type is a `typedef int kern_return_t` or a typedef
+ // around it, because otherwise MIG convention checks make no sense.
+ // BlockDecl doesn't store a return type, so it's annoying to check,
+ // so let's skip it for now.
+ if (!isa<BlockDecl>(D)) {
+ QualType T = getFunctionOrMethodResultType(D);
+ bool IsKernReturnT = false;
+ while (const auto *TT = T->getAs<TypedefType>()) {
+ IsKernReturnT = (TT->getDecl()->getName() == "kern_return_t");
+ T = TT->desugar();
+ }
+ if (!IsKernReturnT || T.getCanonicalType() != S.getASTContext().IntTy) {
+ S.Diag(D->getBeginLoc(),
+ diag::warn_mig_server_routine_does_not_return_kern_return_t);
+ return;
+ }
+ }
+
+ handleSimpleAttribute<MIGServerRoutineAttr>(S, D, AL);
+}
+
+static void handleMSAllocatorAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ // Warn if the return type is not a pointer or reference type.
+ if (auto *FD = dyn_cast<FunctionDecl>(D)) {
+ QualType RetTy = FD->getReturnType();
+ if (!RetTy->isPointerType() && !RetTy->isReferenceType()) {
+ S.Diag(AL.getLoc(), diag::warn_declspec_allocator_nonpointer)
+ << AL.getRange() << RetTy;
+ return;
+ }
+ }
+
+ handleSimpleAttribute<MSAllocatorAttr>(S, D, AL);
+}
+
//===----------------------------------------------------------------------===//
// Top Level Sema Entry Points
//===----------------------------------------------------------------------===//
@@ -6311,6 +6683,12 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_AVRSignal:
handleAVRSignalAttr(S, D, AL);
break;
+ case ParsedAttr::AT_WebAssemblyImportModule:
+ handleWebAssemblyImportModuleAttr(S, D, AL);
+ break;
+ case ParsedAttr::AT_WebAssemblyImportName:
+ handleWebAssemblyImportNameAttr(S, D, AL);
+ break;
case ParsedAttr::AT_IBAction:
handleSimpleAttribute<IBActionAttr>(S, D, AL);
break;
@@ -6414,6 +6792,9 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_FormatArg:
handleFormatArgAttr(S, D, AL);
break;
+ case ParsedAttr::AT_Callback:
+ handleCallbackAttr(S, D, AL);
+ break;
case ParsedAttr::AT_CUDAGlobal:
handleGlobalAttr(S, D, AL);
break;
@@ -6424,6 +6805,10 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_CUDAHost:
handleSimpleAttributeWithExclusions<CUDAHostAttr, CUDAGlobalAttr>(S, D, AL);
break;
+ case ParsedAttr::AT_HIPPinnedShadow:
+ handleSimpleAttributeWithExclusions<HIPPinnedShadowAttr, CUDADeviceAttr,
+ CUDAConstantAttr>(S, D, AL);
+ break;
case ParsedAttr::AT_GNUInline:
handleGNUInlineAttr(S, D, AL);
break;
@@ -6451,6 +6836,9 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_NoSplitStack:
handleSimpleAttribute<NoSplitStackAttr>(S, D, AL);
break;
+ case ParsedAttr::AT_NoUniqueAddress:
+ handleSimpleAttribute<NoUniqueAddressAttr>(S, D, AL);
+ break;
case ParsedAttr::AT_NonNull:
if (auto *PVD = dyn_cast<ParmVarDecl>(D))
handleNonNullAttrParameter(S, PVD, AL);
@@ -6491,7 +6879,8 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
handleNoCfCheckAttr(S, D, AL);
break;
case ParsedAttr::AT_NoThrow:
- handleSimpleAttribute<NoThrowAttr>(S, D, AL);
+ if (!AL.isUsedAsTypeAttr())
+ handleSimpleAttribute<NoThrowAttr>(S, D, AL);
break;
case ParsedAttr::AT_CUDAShared:
handleSharedAttr(S, D, AL);
@@ -6599,7 +6988,13 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
handleSectionAttr(S, D, AL);
break;
case ParsedAttr::AT_SpeculativeLoadHardening:
- handleSimpleAttribute<SpeculativeLoadHardeningAttr>(S, D, AL);
+ handleSimpleAttributeWithExclusions<SpeculativeLoadHardeningAttr,
+ NoSpeculativeLoadHardeningAttr>(S, D,
+ AL);
+ break;
+ case ParsedAttr::AT_NoSpeculativeLoadHardening:
+ handleSimpleAttributeWithExclusions<NoSpeculativeLoadHardeningAttr,
+ SpeculativeLoadHardeningAttr>(S, D, AL);
break;
case ParsedAttr::AT_CodeSeg:
handleCodeSegAttr(S, D, AL);
@@ -6619,9 +7014,15 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_ObjCRootClass:
handleSimpleAttribute<ObjCRootClassAttr>(S, D, AL);
break;
+ case ParsedAttr::AT_ObjCNonLazyClass:
+ handleSimpleAttribute<ObjCNonLazyClassAttr>(S, D, AL);
+ break;
case ParsedAttr::AT_ObjCSubclassingRestricted:
handleSimpleAttribute<ObjCSubclassingRestrictedAttr>(S, D, AL);
break;
+ case ParsedAttr::AT_ObjCClassStub:
+ handleSimpleAttribute<ObjCClassStubAttr>(S, D, AL);
+ break;
case ParsedAttr::AT_ObjCExplicitProtocolImpl:
handleObjCSuppresProtocolAttr(S, D, AL);
break;
@@ -6932,6 +7333,14 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_ObjCExternallyRetained:
handleObjCExternallyRetainedAttr(S, D, AL);
break;
+
+ case ParsedAttr::AT_MIGServerRoutine:
+ handleMIGServerRoutineAttr(S, D, AL);
+ break;
+
+ case ParsedAttr::AT_MSAllocator:
+ handleMSAllocatorAttr(S, D, AL);
+ break;
}
}
@@ -6997,6 +7406,17 @@ void Sema::ProcessDeclAttributeList(Scope *S, Decl *D,
}
}
}
+
+ // Do this check after processing D's attributes because the attribute
+ // objc_method_family can change whether the given method is in the init
+ // family, and it can be applied after objc_designated_initializer. This is a
+ // bit of a hack, but we need it to be compatible with versions of clang that
+ // processed the attribute list in the wrong order.
+ if (D->hasAttr<ObjCDesignatedInitializerAttr>() &&
+ cast<ObjCMethodDecl>(D)->getMethodFamily() != OMF_init) {
+ Diag(D->getLocation(), diag::err_designated_init_attr_non_init);
+ D->dropAttr<ObjCDesignatedInitializerAttr>();
+ }
}
// Helper for delayed processing TransparentUnion attribute.
@@ -7066,12 +7486,10 @@ NamedDecl * Sema::DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
// FIXME: Mangling?
// FIXME: Is the qualifier info correct?
// FIXME: Is the DeclContext correct?
- NewFD = FunctionDecl::Create(FD->getASTContext(), FD->getDeclContext(),
- Loc, Loc, DeclarationName(II),
- FD->getType(), FD->getTypeSourceInfo(),
- SC_None, false/*isInlineSpecified*/,
- FD->hasPrototype(),
- false/*isConstexprSpecified*/);
+ NewFD = FunctionDecl::Create(
+ FD->getASTContext(), FD->getDeclContext(), Loc, Loc,
+ DeclarationName(II), FD->getType(), FD->getTypeSourceInfo(), SC_None,
+ false /*isInlineSpecified*/, FD->hasPrototype(), CSK_unspecified);
NewD = NewFD;
if (FD->getQualifier())
@@ -7346,13 +7764,11 @@ ShouldDiagnoseAvailabilityInContext(Sema &S, AvailabilityResult K,
return true;
} else if (K == AR_Unavailable) {
// It is perfectly fine to refer to an 'unavailable' Objective-C method
- // when it's actually defined and is referenced from within the
- // @implementation itself. In this context, we interpret unavailable as a
- // form of access control.
+ // when it is referenced from within the @implementation itself. In this
+ // context, we interpret unavailable as a form of access control.
if (const auto *MD = dyn_cast<ObjCMethodDecl>(OffendingDecl)) {
if (const auto *Impl = dyn_cast<ObjCImplDecl>(C)) {
- if (MD->getClassInterface() == Impl->getClassInterface() &&
- MD->isDefined())
+ if (MD->getClassInterface() == Impl->getClassInterface())
return true;
}
}
diff --git a/lib/Sema/SemaDeclCXX.cpp b/lib/Sema/SemaDeclCXX.cpp
index 43b289d8d0de..9a6385f28319 100644
--- a/lib/Sema/SemaDeclCXX.cpp
+++ b/lib/Sema/SemaDeclCXX.cpp
@@ -1,9 +1,8 @@
//===------ SemaDeclCXX.cpp - Semantic Analysis for C++ Declarations ------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -63,7 +62,7 @@ namespace {
public:
CheckDefaultArgumentVisitor(Expr *defarg, Sema *s)
- : DefaultArg(defarg), S(s) {}
+ : DefaultArg(defarg), S(s) {}
bool VisitExpr(Expr *Node);
bool VisitDeclRefExpr(DeclRefExpr *DRE);
@@ -193,6 +192,7 @@ Sema::ImplicitExceptionSpecification::CalledDecl(SourceLocation CallLoc,
// If this function has a basic noexcept, it doesn't affect the outcome.
case EST_BasicNoexcept:
case EST_NoexceptTrue:
+ case EST_NoThrow:
return;
// If we're still at noexcept(true) and there's a throw() callee,
// change to that specification.
@@ -638,9 +638,9 @@ bool Sema::MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old,
// C++11 [dcl.constexpr]p1: If any declaration of a function or function
// template has a constexpr specifier then all its declarations shall
// contain the constexpr specifier.
- if (New->isConstexpr() != Old->isConstexpr()) {
+ if (New->getConstexprKind() != Old->getConstexprKind()) {
Diag(New->getLocation(), diag::err_constexpr_redecl_mismatch)
- << New << New->isConstexpr();
+ << New << New->getConstexprKind() << Old->getConstexprKind();
Diag(Old->getLocation(), diag::note_previous_declaration);
Invalid = true;
} else if (!Old->getMostRecentDecl()->isInlined() && New->isInlined() &&
@@ -658,14 +658,13 @@ bool Sema::MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old,
Invalid = true;
}
- // FIXME: It's not clear what should happen if multiple declarations of a
- // deduction guide have different explicitness. For now at least we simply
- // reject any case where the explicitness changes.
- auto *NewGuide = dyn_cast<CXXDeductionGuideDecl>(New);
- if (NewGuide && NewGuide->isExplicitSpecified() !=
- cast<CXXDeductionGuideDecl>(Old)->isExplicitSpecified()) {
- Diag(New->getLocation(), diag::err_deduction_guide_explicit_mismatch)
- << NewGuide->isExplicitSpecified();
+ // C++17 [temp.deduct.guide]p3:
+ // Two deduction guide declarations in the same translation unit
+ // for the same class template shall not have equivalent
+ // parameter-declaration-clauses.
+ if (isa<CXXDeductionGuideDecl>(New) &&
+ !New->isFunctionTemplateSpecialization()) {
+ Diag(New->getLocation(), diag::err_deduction_guide_redeclared);
Diag(Old->getLocation(), diag::note_previous_declaration);
}
@@ -717,23 +716,34 @@ Sema::ActOnDecompositionDeclarator(Scope *S, Declarator &D,
// The semantic context is always just the current context.
DeclContext *const DC = CurContext;
- // C++1z [dcl.dcl]/8:
+ // C++17 [dcl.dcl]/8:
// The decl-specifier-seq shall contain only the type-specifier auto
// and cv-qualifiers.
+ // C++2a [dcl.dcl]/8:
+ // If decl-specifier-seq contains any decl-specifier other than static,
+ // thread_local, auto, or cv-qualifiers, the program is ill-formed.
auto &DS = D.getDeclSpec();
{
SmallVector<StringRef, 8> BadSpecifiers;
SmallVector<SourceLocation, 8> BadSpecifierLocs;
+ SmallVector<StringRef, 8> CPlusPlus20Specifiers;
+ SmallVector<SourceLocation, 8> CPlusPlus20SpecifierLocs;
if (auto SCS = DS.getStorageClassSpec()) {
- BadSpecifiers.push_back(DeclSpec::getSpecifierName(SCS));
- BadSpecifierLocs.push_back(DS.getStorageClassSpecLoc());
+ if (SCS == DeclSpec::SCS_static) {
+ CPlusPlus20Specifiers.push_back(DeclSpec::getSpecifierName(SCS));
+ CPlusPlus20SpecifierLocs.push_back(DS.getStorageClassSpecLoc());
+ } else {
+ BadSpecifiers.push_back(DeclSpec::getSpecifierName(SCS));
+ BadSpecifierLocs.push_back(DS.getStorageClassSpecLoc());
+ }
}
if (auto TSCS = DS.getThreadStorageClassSpec()) {
- BadSpecifiers.push_back(DeclSpec::getSpecifierName(TSCS));
- BadSpecifierLocs.push_back(DS.getThreadStorageClassSpecLoc());
+ CPlusPlus20Specifiers.push_back(DeclSpec::getSpecifierName(TSCS));
+ CPlusPlus20SpecifierLocs.push_back(DS.getThreadStorageClassSpecLoc());
}
- if (DS.isConstexprSpecified()) {
- BadSpecifiers.push_back("constexpr");
+ if (DS.hasConstexprSpecifier()) {
+ BadSpecifiers.push_back(
+ DeclSpec::getSpecifierName(DS.getConstexprSpecifier()));
BadSpecifierLocs.push_back(DS.getConstexprSpecLoc());
}
if (DS.isInlineSpecified()) {
@@ -748,6 +758,16 @@ Sema::ActOnDecompositionDeclarator(Scope *S, Declarator &D,
// them when building the underlying variable.
for (auto Loc : BadSpecifierLocs)
Err << SourceRange(Loc, Loc);
+ } else if (!CPlusPlus20Specifiers.empty()) {
+ auto &&Warn = Diag(CPlusPlus20SpecifierLocs.front(),
+ getLangOpts().CPlusPlus2a
+ ? diag::warn_cxx17_compat_decomp_decl_spec
+ : diag::ext_decomp_decl_spec);
+ Warn << (int)CPlusPlus20Specifiers.size()
+ << llvm::join(CPlusPlus20Specifiers.begin(),
+ CPlusPlus20Specifiers.end(), " ");
+ for (auto Loc : CPlusPlus20SpecifierLocs)
+ Warn << SourceRange(Loc, Loc);
}
// We can't recover from it being declared as a typedef.
if (DS.getStorageClassSpec() == DeclSpec::SCS_typedef)
@@ -1129,7 +1149,6 @@ static bool checkTupleLikeDecomposition(Sema &S,
}
}
}
- S.FilterAcceptableTemplateNames(MemberGet);
}
unsigned I = 0;
@@ -1160,7 +1179,7 @@ static bool checkTupleLikeDecomposition(Sema &S,
if (E.isInvalid())
return true;
- E = S.ActOnCallExpr(nullptr, E.get(), Loc, None, Loc);
+ E = S.BuildCallExpr(nullptr, E.get(), Loc, None, Loc);
} else {
// Otherwise, the initializer is get<i-1>(e), where get is looked up
// in the associated namespaces.
@@ -1170,7 +1189,7 @@ static bool checkTupleLikeDecomposition(Sema &S,
UnresolvedSetIterator(), UnresolvedSetIterator());
Expr *Arg = E.get();
- E = S.ActOnCallExpr(nullptr, Get, Loc, Arg, Loc);
+ E = S.BuildCallExpr(nullptr, Get, Loc, Arg, Loc);
}
if (E.isInvalid())
return true;
@@ -1301,6 +1320,10 @@ static DeclAccessPair findDecomposableBaseClass(Sema &S, SourceLocation Loc,
static bool checkMemberDecomposition(Sema &S, ArrayRef<BindingDecl*> Bindings,
ValueDecl *Src, QualType DecompType,
const CXXRecordDecl *OrigRD) {
+ if (S.RequireCompleteType(Src->getLocation(), DecompType,
+ diag::err_incomplete_type))
+ return true;
+
CXXCastPath BasePath;
DeclAccessPair BasePair =
findDecomposableBaseClass(S, Src->getLocation(), OrigRD, BasePath);
@@ -1559,10 +1582,10 @@ static bool CheckConstexprParameterTypes(Sema &SemaRef,
const ParmVarDecl *PD = FD->getParamDecl(ArgIndex);
SourceLocation ParamLoc = PD->getLocation();
if (!(*i)->isDependentType() &&
- SemaRef.RequireLiteralType(ParamLoc, *i,
- diag::err_constexpr_non_literal_param,
- ArgIndex+1, PD->getSourceRange(),
- isa<CXXConstructorDecl>(FD)))
+ SemaRef.RequireLiteralType(
+ ParamLoc, *i, diag::err_constexpr_non_literal_param, ArgIndex + 1,
+ PD->getSourceRange(), isa<CXXConstructorDecl>(FD),
+ FD->isConsteval()))
return false;
}
return true;
@@ -1595,6 +1618,9 @@ bool Sema::CheckConstexprFunctionDecl(const FunctionDecl *NewFD) {
// The definition of a constexpr constructor shall satisfy the following
// constraints:
// - the class shall not have any virtual base classes;
+ //
+ // FIXME: This only applies to constructors, not arbitrary member
+ // functions.
const CXXRecordDecl *RD = MD->getParent();
if (RD->getNumVBases()) {
Diag(NewFD->getLocation(), diag::err_constexpr_virtual_base)
@@ -1611,28 +1637,33 @@ bool Sema::CheckConstexprFunctionDecl(const FunctionDecl *NewFD) {
// C++11 [dcl.constexpr]p3:
// The definition of a constexpr function shall satisfy the following
// constraints:
- // - it shall not be virtual;
+ // - it shall not be virtual; (removed in C++20)
const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(NewFD);
if (Method && Method->isVirtual()) {
- Method = Method->getCanonicalDecl();
- Diag(Method->getLocation(), diag::err_constexpr_virtual);
-
- // If it's not obvious why this function is virtual, find an overridden
- // function which uses the 'virtual' keyword.
- const CXXMethodDecl *WrittenVirtual = Method;
- while (!WrittenVirtual->isVirtualAsWritten())
- WrittenVirtual = *WrittenVirtual->begin_overridden_methods();
- if (WrittenVirtual != Method)
- Diag(WrittenVirtual->getLocation(),
- diag::note_overridden_virtual_function);
- return false;
+ if (getLangOpts().CPlusPlus2a) {
+ Diag(Method->getLocation(), diag::warn_cxx17_compat_constexpr_virtual);
+ } else {
+ Method = Method->getCanonicalDecl();
+ Diag(Method->getLocation(), diag::err_constexpr_virtual);
+
+ // If it's not obvious why this function is virtual, find an overridden
+ // function which uses the 'virtual' keyword.
+ const CXXMethodDecl *WrittenVirtual = Method;
+ while (!WrittenVirtual->isVirtualAsWritten())
+ WrittenVirtual = *WrittenVirtual->begin_overridden_methods();
+ if (WrittenVirtual != Method)
+ Diag(WrittenVirtual->getLocation(),
+ diag::note_overridden_virtual_function);
+ return false;
+ }
}
// - its return type shall be a literal type;
QualType RT = NewFD->getReturnType();
if (!RT->isDependentType() &&
RequireLiteralType(NewFD->getLocation(), RT,
- diag::err_constexpr_non_literal_return))
+ diag::err_constexpr_non_literal_return,
+ NewFD->isConsteval()))
return false;
}
@@ -1746,7 +1777,7 @@ static bool CheckConstexprDeclStmt(Sema &SemaRef, const FunctionDecl *Dcl,
default:
SemaRef.Diag(DS->getBeginLoc(), diag::err_constexpr_body_invalid_stmt)
- << isa<CXXConstructorDecl>(Dcl);
+ << isa<CXXConstructorDecl>(Dcl) << Dcl->isConsteval();
return false;
}
}
@@ -1931,7 +1962,7 @@ CheckConstexprFunctionStmt(Sema &SemaRef, const FunctionDecl *Dcl, Stmt *S,
}
SemaRef.Diag(S->getBeginLoc(), diag::err_constexpr_body_invalid_stmt)
- << isa<CXXConstructorDecl>(Dcl);
+ << isa<CXXConstructorDecl>(Dcl) << Dcl->isConsteval();
return false;
}
@@ -2053,7 +2084,8 @@ bool Sema::CheckConstexprFunctionBody(const FunctionDecl *Dcl, Stmt *Body) {
Dcl->getReturnType()->isDependentType());
Diag(Dcl->getLocation(),
OK ? diag::warn_cxx11_compat_constexpr_body_no_return
- : diag::err_constexpr_body_no_return);
+ : diag::err_constexpr_body_no_return)
+ << Dcl->isConsteval();
if (!OK)
return false;
} else if (ReturnStmts.size() > 1) {
@@ -3023,7 +3055,7 @@ Sema::ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D,
DS.getStorageClassSpec() == DeclSpec::SCS_mutable) &&
!isFunc);
- if (DS.isConstexprSpecified() && isInstField) {
+ if (DS.hasConstexprSpecifier() && isInstField) {
SemaDiagnosticBuilder B =
Diag(DS.getConstexprSpecLoc(), diag::err_invalid_constexpr_member);
SourceLocation ConstexprLoc = DS.getConstexprSpecLoc();
@@ -3172,7 +3204,11 @@ Sema::ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D,
// declared] with the same access [as the template].
if (auto *DG = dyn_cast<CXXDeductionGuideDecl>(NonTemplateMember)) {
auto *TD = DG->getDeducedTemplate();
- if (AS != TD->getAccess()) {
+ // Access specifiers are only meaningful if both the template and the
+ // deduction guide are from the same scope.
+ if (AS != TD->getAccess() &&
+ TD->getDeclContext()->getRedeclContext()->Equals(
+ DG->getDeclContext()->getRedeclContext())) {
Diag(DG->getBeginLoc(), diag::err_deduction_guide_wrong_access);
Diag(TD->getBeginLoc(), diag::note_deduction_guide_template_access)
<< TD->getAccess();
@@ -3775,7 +3811,7 @@ namespace {
// Callback to only accept typo corrections that can be a valid C++ member
// intializer: either a non-static field member or a base class.
-class MemInitializerValidatorCCC : public CorrectionCandidateCallback {
+class MemInitializerValidatorCCC final : public CorrectionCandidateCallback {
public:
explicit MemInitializerValidatorCCC(CXXRecordDecl *ClassDecl)
: ClassDecl(ClassDecl) {}
@@ -3789,6 +3825,10 @@ public:
return false;
}
+ std::unique_ptr<CorrectionCandidateCallback> clone() override {
+ return llvm::make_unique<MemInitializerValidatorCCC>(*this);
+ }
+
private:
CXXRecordDecl *ClassDecl;
};
@@ -3871,6 +3911,8 @@ Sema::BuildMemInitializer(Decl *ConstructorD,
if (TemplateTypeTy) {
BaseType = GetTypeFromParser(TemplateTypeTy, &TInfo);
+ if (BaseType.isNull())
+ return true;
} else if (DS.getTypeSpecType() == TST_decltype) {
BaseType = BuildDecltypeType(DS.getRepAsExpr(), DS.getTypeSpecTypeLoc());
} else if (DS.getTypeSpecType() == TST_decltype_auto) {
@@ -3918,11 +3960,10 @@ Sema::BuildMemInitializer(Decl *ConstructorD,
// If no results were found, try to correct typos.
TypoCorrection Corr;
+ MemInitializerValidatorCCC CCC(ClassDecl);
if (R.empty() && BaseType.isNull() &&
- (Corr = CorrectTypo(
- R.getLookupNameInfo(), R.getLookupKind(), S, &SS,
- llvm::make_unique<MemInitializerValidatorCCC>(ClassDecl),
- CTK_ErrorRecovery, ClassDecl))) {
+ (Corr = CorrectTypo(R.getLookupNameInfo(), R.getLookupKind(), S, &SS,
+ CCC, CTK_ErrorRecovery, ClassDecl))) {
if (FieldDecl *Member = Corr.getCorrectionDeclAs<FieldDecl>()) {
// We have found a non-static data member with a similar
// name to what was typed; complain and initialize that
@@ -5688,9 +5729,11 @@ void Sema::checkClassLevelDLLAttribute(CXXRecordDecl *Class) {
TemplateSpecializationKind TSK = Class->getTemplateSpecializationKind();
- // Ignore explicit dllexport on explicit class template instantiation declarations.
+ // Ignore explicit dllexport on explicit class template instantiation
+ // declarations, except in MinGW mode.
if (ClassExported && !ClassAttr->isInherited() &&
- TSK == TSK_ExplicitInstantiationDeclaration) {
+ TSK == TSK_ExplicitInstantiationDeclaration &&
+ !Context.getTargetInfo().getTriple().isWindowsGNUEnvironment()) {
Class->dropAttr<DLLExportAttr>();
return;
}
@@ -5715,9 +5758,12 @@ void Sema::checkClassLevelDLLAttribute(CXXRecordDecl *Class) {
continue;
if (MD->isInlined()) {
- // MinGW does not import or export inline methods.
+ // MinGW does not import or export inline methods. But do it for
+ // template instantiations.
if (!Context.getTargetInfo().getCXXABI().isMicrosoft() &&
- !Context.getTargetInfo().getTriple().isWindowsItaniumEnvironment())
+ !Context.getTargetInfo().getTriple().isWindowsItaniumEnvironment() &&
+ TSK != TSK_ExplicitInstantiationDeclaration &&
+ TSK != TSK_ExplicitInstantiationDefinition)
continue;
// MSVC versions before 2015 don't export the move assignment operators
@@ -5886,9 +5932,6 @@ static bool canPassInRegisters(Sema &S, CXXRecordDecl *D,
if (D->isDependentType() || D->isInvalidDecl())
return false;
- if (D->hasAttr<TrivialABIAttr>())
- return true;
-
// Clang <= 4 used the pre-C++11 rule, which ignores move operations.
// The PS4 platform ABI follows the behavior of Clang 3.2.
if (CCK == TargetInfo::CCK_ClangABI4OrPS4)
@@ -5946,8 +5989,11 @@ static bool canPassInRegisters(Sema &S, CXXRecordDecl *D,
// Note: This permits small classes with nontrivial destructors to be
// passed in registers, which is non-conforming.
+ bool isAArch64 = S.Context.getTargetInfo().getTriple().isAArch64();
+ uint64_t TypeSize = isAArch64 ? 128 : 64;
+
if (CopyCtorIsTrivial &&
- S.getASTContext().getTypeSize(D->getTypeForDecl()) <= 64)
+ S.getASTContext().getTypeSize(D->getTypeForDecl()) <= TypeSize)
return true;
return false;
}
@@ -6082,9 +6128,60 @@ void Sema::CheckCompletedCXXClass(CXXRecordDecl *Record) {
if (HasTrivialABI)
Record->setHasTrivialSpecialMemberForCall();
+ auto CompleteMemberFunction = [&](CXXMethodDecl *M) {
+ // Check whether the explicitly-defaulted special members are valid.
+ if (!M->isInvalidDecl() && M->isExplicitlyDefaulted())
+ CheckExplicitlyDefaultedSpecialMember(M);
+
+ // For an explicitly defaulted or deleted special member, we defer
+ // determining triviality until the class is complete. That time is now!
+ CXXSpecialMember CSM = getSpecialMember(M);
+ if (!M->isImplicit() && !M->isUserProvided()) {
+ if (CSM != CXXInvalid) {
+ M->setTrivial(SpecialMemberIsTrivial(M, CSM));
+ // Inform the class that we've finished declaring this member.
+ Record->finishedDefaultedOrDeletedMember(M);
+ M->setTrivialForCall(
+ HasTrivialABI ||
+ SpecialMemberIsTrivial(M, CSM, TAH_ConsiderTrivialABI));
+ Record->setTrivialForCallFlags(M);
+ }
+ }
+
+ // Set triviality for the purpose of calls if this is a user-provided
+ // copy/move constructor or destructor.
+ if ((CSM == CXXCopyConstructor || CSM == CXXMoveConstructor ||
+ CSM == CXXDestructor) && M->isUserProvided()) {
+ M->setTrivialForCall(HasTrivialABI);
+ Record->setTrivialForCallFlags(M);
+ }
+
+ if (!M->isInvalidDecl() && M->isExplicitlyDefaulted() &&
+ M->hasAttr<DLLExportAttr>()) {
+ if (getLangOpts().isCompatibleWithMSVC(LangOptions::MSVC2015) &&
+ M->isTrivial() &&
+ (CSM == CXXDefaultConstructor || CSM == CXXCopyConstructor ||
+ CSM == CXXDestructor))
+ M->dropAttr<DLLExportAttr>();
+
+ if (M->hasAttr<DLLExportAttr>()) {
+ DefineImplicitSpecialMember(*this, M, M->getLocation());
+ ActOnFinishInlineFunctionDef(M);
+ }
+ }
+ };
+
bool HasMethodWithOverrideControl = false,
HasOverridingMethodWithoutOverrideControl = false;
if (!Record->isDependentType()) {
+ // Check the destructor before any other member function. We need to
+ // determine whether it's trivial in order to determine whether the claas
+ // type is a literal type, which is a prerequisite for determining whether
+ // other special member functions are valid and whether they're implicitly
+ // 'constexpr'.
+ if (CXXDestructorDecl *Dtor = Record->getDestructor())
+ CompleteMemberFunction(Dtor);
+
for (auto *M : Record->methods()) {
// See if a method overloads virtual methods in a base
// class without overriding any.
@@ -6094,46 +6191,9 @@ void Sema::CheckCompletedCXXClass(CXXRecordDecl *Record) {
HasMethodWithOverrideControl = true;
else if (M->size_overridden_methods() > 0)
HasOverridingMethodWithoutOverrideControl = true;
- // Check whether the explicitly-defaulted special members are valid.
- if (!M->isInvalidDecl() && M->isExplicitlyDefaulted())
- CheckExplicitlyDefaultedSpecialMember(M);
-
- // For an explicitly defaulted or deleted special member, we defer
- // determining triviality until the class is complete. That time is now!
- CXXSpecialMember CSM = getSpecialMember(M);
- if (!M->isImplicit() && !M->isUserProvided()) {
- if (CSM != CXXInvalid) {
- M->setTrivial(SpecialMemberIsTrivial(M, CSM));
- // Inform the class that we've finished declaring this member.
- Record->finishedDefaultedOrDeletedMember(M);
- M->setTrivialForCall(
- HasTrivialABI ||
- SpecialMemberIsTrivial(M, CSM, TAH_ConsiderTrivialABI));
- Record->setTrivialForCallFlags(M);
- }
- }
- // Set triviality for the purpose of calls if this is a user-provided
- // copy/move constructor or destructor.
- if ((CSM == CXXCopyConstructor || CSM == CXXMoveConstructor ||
- CSM == CXXDestructor) && M->isUserProvided()) {
- M->setTrivialForCall(HasTrivialABI);
- Record->setTrivialForCallFlags(M);
- }
-
- if (!M->isInvalidDecl() && M->isExplicitlyDefaulted() &&
- M->hasAttr<DLLExportAttr>()) {
- if (getLangOpts().isCompatibleWithMSVC(LangOptions::MSVC2015) &&
- M->isTrivial() &&
- (CSM == CXXDefaultConstructor || CSM == CXXCopyConstructor ||
- CSM == CXXDestructor))
- M->dropAttr<DLLExportAttr>();
-
- if (M->hasAttr<DLLExportAttr>()) {
- DefineImplicitSpecialMember(*this, M, M->getLocation());
- ActOnFinishInlineFunctionDef(M);
- }
- }
+ if (!isa<CXXDestructorDecl>(M))
+ CompleteMemberFunction(M);
}
}
@@ -6553,7 +6613,7 @@ void Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD) {
ReturnType = Type->getReturnType();
QualType DeclType = Context.getTypeDeclType(RD);
- DeclType = Context.getAddrSpaceQualType(DeclType, MD->getTypeQualifiers().getAddressSpace());
+ DeclType = Context.getAddrSpaceQualType(DeclType, MD->getMethodQualifiers().getAddressSpace());
QualType ExpectedReturnType = Context.getLValueReferenceType(DeclType);
if (!Context.hasSameType(ReturnType, ExpectedReturnType)) {
@@ -6563,7 +6623,7 @@ void Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD) {
}
// A defaulted special member cannot have cv-qualifiers.
- if (Type->getTypeQuals().hasConst() || Type->getTypeQuals().hasVolatile()) {
+ if (Type->getMethodQuals().hasConst() || Type->getMethodQuals().hasVolatile()) {
if (DeleteOnTypeMismatch)
ShouldDeleteForTypeMismatch = true;
else {
@@ -6623,49 +6683,42 @@ void Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD) {
// makes such functions always instantiate to constexpr functions. For
// functions which cannot be constexpr (for non-constructors in C++11 and for
// destructors in C++1y), this is checked elsewhere.
+ //
+ // FIXME: This should not apply if the member is deleted.
bool Constexpr = defaultedSpecialMemberIsConstexpr(*this, RD, CSM,
HasConstParam);
if ((getLangOpts().CPlusPlus14 ? !isa<CXXDestructorDecl>(MD)
: isa<CXXConstructorDecl>(MD)) &&
MD->isConstexpr() && !Constexpr &&
MD->getTemplatedKind() == FunctionDecl::TK_NonTemplate) {
- Diag(MD->getBeginLoc(), diag::err_incorrect_defaulted_constexpr) << CSM;
+ Diag(MD->getBeginLoc(), MD->isConsteval()
+ ? diag::err_incorrect_defaulted_consteval
+ : diag::err_incorrect_defaulted_constexpr)
+ << CSM;
// FIXME: Explain why the special member can't be constexpr.
HadError = true;
}
- // and may have an explicit exception-specification only if it is compatible
- // with the exception-specification on the implicit declaration.
- if (Type->hasExceptionSpec()) {
- // Delay the check if this is the first declaration of the special member,
- // since we may not have parsed some necessary in-class initializers yet.
- if (First) {
- // If the exception specification needs to be instantiated, do so now,
- // before we clobber it with an EST_Unevaluated specification below.
- if (Type->getExceptionSpecType() == EST_Uninstantiated) {
- InstantiateExceptionSpec(MD->getBeginLoc(), MD);
- Type = MD->getType()->getAs<FunctionProtoType>();
- }
- DelayedDefaultedMemberExceptionSpecs.push_back(std::make_pair(MD, Type));
- } else
- CheckExplicitlyDefaultedMemberExceptionSpec(MD, Type);
- }
-
- // If a function is explicitly defaulted on its first declaration,
if (First) {
- // -- it is implicitly considered to be constexpr if the implicit
- // definition would be,
- MD->setConstexpr(Constexpr);
-
- // -- it is implicitly considered to have the same exception-specification
- // as if it had been implicitly declared,
- FunctionProtoType::ExtProtoInfo EPI = Type->getExtProtoInfo();
- EPI.ExceptionSpec.Type = EST_Unevaluated;
- EPI.ExceptionSpec.SourceDecl = MD;
- MD->setType(Context.getFunctionType(ReturnType,
- llvm::makeArrayRef(&ArgType,
- ExpectedParams),
- EPI));
+ // C++2a [dcl.fct.def.default]p3:
+ // If a function is explicitly defaulted on its first declaration, it is
+ // implicitly considered to be constexpr if the implicit declaration
+ // would be.
+ MD->setConstexprKind(Constexpr ? CSK_constexpr : CSK_unspecified);
+
+ if (!Type->hasExceptionSpec()) {
+ // C++2a [except.spec]p3:
+ // If a declaration of a function does not have a noexcept-specifier
+ // [and] is defaulted on its first declaration, [...] the exception
+ // specification is as specified below
+ FunctionProtoType::ExtProtoInfo EPI = Type->getExtProtoInfo();
+ EPI.ExceptionSpec.Type = EST_Unevaluated;
+ EPI.ExceptionSpec.SourceDecl = MD;
+ MD->setType(Context.getFunctionType(ReturnType,
+ llvm::makeArrayRef(&ArgType,
+ ExpectedParams),
+ EPI));
+ }
}
if (ShouldDeleteForTypeMismatch || ShouldDeleteSpecialMember(MD, CSM)) {
@@ -6698,43 +6751,12 @@ void Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD) {
MD->setInvalidDecl();
}
-/// Check whether the exception specification provided for an
-/// explicitly-defaulted special member matches the exception specification
-/// that would have been generated for an implicit special member, per
-/// C++11 [dcl.fct.def.default]p2.
-void Sema::CheckExplicitlyDefaultedMemberExceptionSpec(
- CXXMethodDecl *MD, const FunctionProtoType *SpecifiedType) {
- // If the exception specification was explicitly specified but hadn't been
- // parsed when the method was defaulted, grab it now.
- if (SpecifiedType->getExceptionSpecType() == EST_Unparsed)
- SpecifiedType =
- MD->getTypeSourceInfo()->getType()->castAs<FunctionProtoType>();
-
- // Compute the implicit exception specification.
- CallingConv CC = Context.getDefaultCallingConvention(/*IsVariadic=*/false,
- /*IsCXXMethod=*/true);
- FunctionProtoType::ExtProtoInfo EPI(CC);
- auto IES = computeImplicitExceptionSpec(*this, MD->getLocation(), MD);
- EPI.ExceptionSpec = IES.getExceptionSpec();
- const FunctionProtoType *ImplicitType = cast<FunctionProtoType>(
- Context.getFunctionType(Context.VoidTy, None, EPI));
-
- // Ensure that it matches.
- CheckEquivalentExceptionSpec(
- PDiag(diag::err_incorrect_defaulted_exception_spec)
- << getSpecialMember(MD), PDiag(),
- ImplicitType, SourceLocation(),
- SpecifiedType, MD->getLocation());
-}
-
void Sema::CheckDelayedMemberExceptionSpecs() {
decltype(DelayedOverridingExceptionSpecChecks) Overriding;
decltype(DelayedEquivalentExceptionSpecChecks) Equivalent;
- decltype(DelayedDefaultedMemberExceptionSpecs) Defaulted;
std::swap(Overriding, DelayedOverridingExceptionSpecChecks);
std::swap(Equivalent, DelayedEquivalentExceptionSpecChecks);
- std::swap(Defaulted, DelayedDefaultedMemberExceptionSpecs);
// Perform any deferred checking of exception specifications for virtual
// destructors.
@@ -6745,11 +6767,6 @@ void Sema::CheckDelayedMemberExceptionSpecs() {
// special members.
for (auto &Check : Equivalent)
CheckEquivalentExceptionSpec(Check.second, Check.first);
-
- // Check that any explicitly-defaulted methods have exception specifications
- // compatible with their implicit exception specifications.
- for (auto &Spec : Defaulted)
- CheckExplicitlyDefaultedMemberExceptionSpec(Spec.first, Spec.second);
}
namespace {
@@ -6891,6 +6908,8 @@ struct SpecialMemberDeletionInfo
return ICI ? Sema::CXXInvalid : CSM;
}
+ bool shouldDeleteForVariantObjCPtrMember(FieldDecl *FD, QualType FieldType);
+
bool visitBase(CXXBaseSpecifier *Base) { return shouldDeleteForBase(Base); }
bool visitField(FieldDecl *Field) { return shouldDeleteForField(Field); }
@@ -6962,13 +6981,14 @@ bool SpecialMemberDeletionInfo::shouldDeleteForSubobjectCall(
S.Diag(Field->getLocation(),
diag::note_deleted_special_member_class_subobject)
<< getEffectiveCSM() << MD->getParent() << /*IsField*/true
- << Field << DiagKind << IsDtorCallInCtor;
+ << Field << DiagKind << IsDtorCallInCtor << /*IsObjCPtr*/false;
} else {
CXXBaseSpecifier *Base = Subobj.get<CXXBaseSpecifier*>();
S.Diag(Base->getBeginLoc(),
diag::note_deleted_special_member_class_subobject)
<< getEffectiveCSM() << MD->getParent() << /*IsField*/ false
- << Base->getType() << DiagKind << IsDtorCallInCtor;
+ << Base->getType() << DiagKind << IsDtorCallInCtor
+ << /*IsObjCPtr*/false;
}
if (DiagKind == 1)
@@ -7020,6 +7040,30 @@ bool SpecialMemberDeletionInfo::shouldDeleteForClassSubobject(
return false;
}
+bool SpecialMemberDeletionInfo::shouldDeleteForVariantObjCPtrMember(
+ FieldDecl *FD, QualType FieldType) {
+ // The defaulted special functions are defined as deleted if this is a variant
+ // member with a non-trivial ownership type, e.g., ObjC __strong or __weak
+ // type under ARC.
+ if (!FieldType.hasNonTrivialObjCLifetime())
+ return false;
+
+ // Don't make the defaulted default constructor defined as deleted if the
+ // member has an in-class initializer.
+ if (CSM == Sema::CXXDefaultConstructor && FD->hasInClassInitializer())
+ return false;
+
+ if (Diagnose) {
+ auto *ParentClass = cast<CXXRecordDecl>(FD->getParent());
+ S.Diag(FD->getLocation(),
+ diag::note_deleted_special_member_class_subobject)
+ << getEffectiveCSM() << ParentClass << /*IsField*/true
+ << FD << 4 << /*IsDtorCallInCtor*/false << /*IsObjCPtr*/true;
+ }
+
+ return true;
+}
+
/// Check whether we should delete a special member function due to the class
/// having a particular direct or virtual base class.
bool SpecialMemberDeletionInfo::shouldDeleteForBase(CXXBaseSpecifier *Base) {
@@ -7040,7 +7084,8 @@ bool SpecialMemberDeletionInfo::shouldDeleteForBase(CXXBaseSpecifier *Base) {
S.Diag(Base->getBeginLoc(),
diag::note_deleted_special_member_class_subobject)
<< getEffectiveCSM() << MD->getParent() << /*IsField*/ false
- << Base->getType() << /*Deleted*/ 1 << /*IsDtorCallInCtor*/ false;
+ << Base->getType() << /*Deleted*/ 1 << /*IsDtorCallInCtor*/ false
+ << /*IsObjCPtr*/false;
S.NoteDeletedFunction(BaseCtor);
}
return BaseCtor->isDeleted();
@@ -7054,6 +7099,9 @@ bool SpecialMemberDeletionInfo::shouldDeleteForField(FieldDecl *FD) {
QualType FieldType = S.Context.getBaseElementType(FD->getType());
CXXRecordDecl *FieldRecord = FieldType->getAsCXXRecordDecl();
+ if (inUnion() && shouldDeleteForVariantObjCPtrMember(FD, FieldType))
+ return true;
+
if (CSM == Sema::CXXDefaultConstructor) {
// For a default constructor, all references must be initialized in-class
// and, if a union, it must have a non-const member.
@@ -7115,6 +7163,9 @@ bool SpecialMemberDeletionInfo::shouldDeleteForField(FieldDecl *FD) {
for (auto *UI : FieldRecord->fields()) {
QualType UnionFieldType = S.Context.getBaseElementType(UI->getType());
+ if (shouldDeleteForVariantObjCPtrMember(&*UI, UnionFieldType))
+ return true;
+
if (!UnionFieldType.isConstQualified())
AllVariantFieldsAreConst = false;
@@ -7187,7 +7238,7 @@ bool Sema::ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
// The closure type associated with a lambda-expression has a
// deleted (8.4.3) default constructor and a deleted copy
// assignment operator.
- // C++2a adds back these operators if the lambda has no capture-default.
+ // C++2a adds back these operators if the lambda has no lambda-capture.
if (RD->isLambda() && !RD->lambdaIsDefaultConstructibleAndAssignable() &&
(CSM == CXXDefaultConstructor || CSM == CXXCopyAssignment)) {
if (Diagnose)
@@ -7934,14 +7985,14 @@ void Sema::ActOnFinishCXXMemberSpecification(
/// definition of the class is complete.
void Sema::AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl) {
if (ClassDecl->needsImplicitDefaultConstructor()) {
- ++ASTContext::NumImplicitDefaultConstructors;
+ ++getASTContext().NumImplicitDefaultConstructors;
if (ClassDecl->hasInheritedConstructor())
DeclareImplicitDefaultConstructor(ClassDecl);
}
if (ClassDecl->needsImplicitCopyConstructor()) {
- ++ASTContext::NumImplicitCopyConstructors;
+ ++getASTContext().NumImplicitCopyConstructors;
// If the properties or semantics of the copy constructor couldn't be
// determined while the class was being declared, force a declaration
@@ -7963,7 +8014,7 @@ void Sema::AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl) {
}
if (getLangOpts().CPlusPlus11 && ClassDecl->needsImplicitMoveConstructor()) {
- ++ASTContext::NumImplicitMoveConstructors;
+ ++getASTContext().NumImplicitMoveConstructors;
if (ClassDecl->needsOverloadResolutionForMoveConstructor() ||
ClassDecl->hasInheritedConstructor())
@@ -7971,7 +8022,7 @@ void Sema::AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl) {
}
if (ClassDecl->needsImplicitCopyAssignment()) {
- ++ASTContext::NumImplicitCopyAssignmentOperators;
+ ++getASTContext().NumImplicitCopyAssignmentOperators;
// If we have a dynamic class, then the copy assignment operator may be
// virtual, so we have to declare it immediately. This ensures that, e.g.,
@@ -7984,7 +8035,7 @@ void Sema::AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl) {
}
if (getLangOpts().CPlusPlus11 && ClassDecl->needsImplicitMoveAssignment()) {
- ++ASTContext::NumImplicitMoveAssignmentOperators;
+ ++getASTContext().NumImplicitMoveAssignmentOperators;
// Likewise for the move assignment operator.
if (ClassDecl->isDynamicClass() ||
@@ -7994,7 +8045,7 @@ void Sema::AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl) {
}
if (ClassDecl->needsImplicitDestructor()) {
- ++ASTContext::NumImplicitDestructors;
+ ++getASTContext().NumImplicitDestructors;
// If we have a dynamic class, then the destructor may be virtual, so we
// have to declare the destructor immediately. This ensures that, e.g., it
@@ -8139,6 +8190,27 @@ void Sema::ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *MethodD) {
CheckCXXDefaultArguments(Method);
}
+// Emit the given diagnostic for each non-address-space qualifier.
+// Common part of CheckConstructorDeclarator and CheckDestructorDeclarator.
+static void checkMethodTypeQualifiers(Sema &S, Declarator &D, unsigned DiagID) {
+ const DeclaratorChunk::FunctionTypeInfo &FTI = D.getFunctionTypeInfo();
+ if (FTI.hasMethodTypeQualifiers() && !D.isInvalidType()) {
+ bool DiagOccured = false;
+ FTI.MethodQualifiers->forEachQualifier(
+ [DiagID, &S, &DiagOccured](DeclSpec::TQ, StringRef QualName,
+ SourceLocation SL) {
+ // This diagnostic should be emitted on any qualifier except an addr
+ // space qualifier. However, forEachQualifier currently doesn't visit
+ // addr space qualifiers, so there's no way to write this condition
+ // right now; we just diagnose on everything.
+ S.Diag(SL, DiagID) << QualName << SourceRange(SL);
+ DiagOccured = true;
+ });
+ if (DiagOccured)
+ D.setInvalidType();
+ }
+}
+
/// CheckConstructorDeclarator - Called by ActOnDeclarator to check
/// the well-formedness of the constructor declarator @p D with type @p
/// R. If there are any errors in the declarator, this routine will
@@ -8179,18 +8251,11 @@ QualType Sema::CheckConstructorDeclarator(Declarator &D, QualType R,
D.setInvalidType();
}
- DeclaratorChunk::FunctionTypeInfo &FTI = D.getFunctionTypeInfo();
- if (FTI.hasMethodTypeQualifiers()) {
- FTI.MethodQualifiers->forEachQualifier(
- [&](DeclSpec::TQ TypeQual, StringRef QualName, SourceLocation SL) {
- Diag(SL, diag::err_invalid_qualified_constructor)
- << QualName << SourceRange(SL);
- });
- D.setInvalidType();
- }
+ checkMethodTypeQualifiers(*this, D, diag::err_invalid_qualified_constructor);
// C++0x [class.ctor]p4:
// A constructor shall not be declared with a ref-qualifier.
+ DeclaratorChunk::FunctionTypeInfo &FTI = D.getFunctionTypeInfo();
if (FTI.hasRefQualifier()) {
Diag(FTI.getRefQualifierLoc(), diag::err_ref_qualifier_constructor)
<< FTI.RefQualifierIsLValueRef
@@ -8365,18 +8430,11 @@ QualType Sema::CheckDestructorDeclarator(Declarator &D, QualType R,
}
}
- DeclaratorChunk::FunctionTypeInfo &FTI = D.getFunctionTypeInfo();
- if (FTI.hasMethodTypeQualifiers() && !D.isInvalidType()) {
- FTI.MethodQualifiers->forEachQualifier(
- [&](DeclSpec::TQ TypeQual, StringRef QualName, SourceLocation SL) {
- Diag(SL, diag::err_invalid_qualified_destructor)
- << QualName << SourceRange(SL);
- });
- D.setInvalidType();
- }
+ checkMethodTypeQualifiers(*this, D, diag::err_invalid_qualified_destructor);
// C++0x [class.dtor]p2:
// A destructor shall not be declared with a ref-qualifier.
+ DeclaratorChunk::FunctionTypeInfo &FTI = D.getFunctionTypeInfo();
if (FTI.hasRefQualifier()) {
Diag(FTI.getRefQualifierLoc(), diag::err_ref_qualifier_destructor)
<< FTI.RefQualifierIsLValueRef
@@ -8590,12 +8648,12 @@ void Sema::CheckConversionDeclarator(Declarator &D, QualType &R,
R = Context.getFunctionType(ConvType, None, Proto->getExtProtoInfo());
// C++0x explicit conversion operators.
- if (DS.isExplicitSpecified())
+ if (DS.hasExplicitSpecifier() && !getLangOpts().CPlusPlus2a)
Diag(DS.getExplicitSpecLoc(),
getLangOpts().CPlusPlus11
? diag::warn_cxx98_compat_explicit_conversion_functions
: diag::ext_explicit_conversion_functions)
- << SourceRange(DS.getExplicitSpecLoc());
+ << SourceRange(DS.getExplicitSpecRange());
}
/// ActOnConversionDeclarator - Called by ActOnDeclarator to complete
@@ -8699,7 +8757,7 @@ void Sema::CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
// We leave 'friend' and 'virtual' to be rejected in the normal way.
if (DS.hasTypeSpecifier() || DS.getTypeQualifiers() ||
DS.getStorageClassSpecLoc().isValid() || DS.isInlineSpecified() ||
- DS.isNoreturnSpecified() || DS.isConstexprSpecified()) {
+ DS.isNoreturnSpecified() || DS.hasConstexprSpecifier()) {
BadSpecifierDiagnoser Diagnoser(
*this, D.getIdentifierLoc(),
diag::err_deduction_guide_invalid_specifier);
@@ -8997,6 +9055,9 @@ void Sema::ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace) {
PopDeclContext();
if (Namespc->hasAttr<VisibilityAttr>())
PopPragmaVisibility(true, RBrace);
+ // If this namespace contains an export-declaration, export it now.
+ if (DeferredExportedNamespaces.erase(Namespc))
+ Dcl->setModuleOwnershipKind(Decl::ModuleOwnershipKind::VisibleWhenImported);
}
CXXRecordDecl *Sema::getStdBadAlloc() const {
@@ -9316,13 +9377,17 @@ static bool IsUsingDirectiveInToplevelContext(DeclContext *CurContext) {
namespace {
// Callback to only accept typo corrections that are namespaces.
-class NamespaceValidatorCCC : public CorrectionCandidateCallback {
+class NamespaceValidatorCCC final : public CorrectionCandidateCallback {
public:
bool ValidateCandidate(const TypoCorrection &candidate) override {
if (NamedDecl *ND = candidate.getCorrectionDecl())
return isa<NamespaceDecl>(ND) || isa<NamespaceAliasDecl>(ND);
return false;
}
+
+ std::unique_ptr<CorrectionCandidateCallback> clone() override {
+ return llvm::make_unique<NamespaceValidatorCCC>(*this);
+ }
};
}
@@ -9332,9 +9397,9 @@ static bool TryNamespaceTypoCorrection(Sema &S, LookupResult &R, Scope *Sc,
SourceLocation IdentLoc,
IdentifierInfo *Ident) {
R.clear();
+ NamespaceValidatorCCC CCC{};
if (TypoCorrection Corrected =
- S.CorrectTypo(R.getLookupNameInfo(), R.getLookupKind(), Sc, &SS,
- llvm::make_unique<NamespaceValidatorCCC>(),
+ S.CorrectTypo(R.getLookupNameInfo(), R.getLookupKind(), Sc, &SS, CCC,
Sema::CTK_ErrorRecovery)) {
if (DeclContext *DC = S.computeDeclContext(SS, false)) {
std::string CorrectedStr(Corrected.getAsString(S.getLangOpts()));
@@ -9737,7 +9802,7 @@ UsingShadowDecl *Sema::BuildUsingShadowDecl(Scope *S,
NonTemplateTarget = TargetTD->getTemplatedDecl();
UsingShadowDecl *Shadow;
- if (isa<CXXConstructorDecl>(NonTemplateTarget)) {
+ if (NonTemplateTarget && isa<CXXConstructorDecl>(NonTemplateTarget)) {
bool IsVirtualBase =
isVirtualDirectBase(cast<CXXRecordDecl>(CurContext),
UD->getQualifier()->getAsRecordDecl());
@@ -9829,7 +9894,7 @@ static CXXBaseSpecifier *findDirectBaseWithType(CXXRecordDecl *Derived,
}
namespace {
-class UsingValidatorCCC : public CorrectionCandidateCallback {
+class UsingValidatorCCC final : public CorrectionCandidateCallback {
public:
UsingValidatorCCC(bool HasTypenameKeyword, bool IsInstantiation,
NestedNameSpecifier *NNS, CXXRecordDecl *RequireMemberOf)
@@ -9899,6 +9964,10 @@ public:
return !HasTypenameKeyword;
}
+ std::unique_ptr<CorrectionCandidateCallback> clone() override {
+ return llvm::make_unique<UsingValidatorCCC>(*this);
+ }
+
private:
bool HasTypenameKeyword;
bool IsInstantiation;
@@ -10055,12 +10124,11 @@ NamedDecl *Sema::BuildUsingDeclaration(
isa<TranslationUnitDecl>(LookupContext) &&
getSourceManager().isInSystemHeader(UsingLoc))
return nullptr;
- if (TypoCorrection Corrected = CorrectTypo(
- R.getLookupNameInfo(), R.getLookupKind(), S, &SS,
- llvm::make_unique<UsingValidatorCCC>(
- HasTypenameKeyword, IsInstantiation, SS.getScopeRep(),
- dyn_cast<CXXRecordDecl>(CurContext)),
- CTK_ErrorRecovery)) {
+ UsingValidatorCCC CCC(HasTypenameKeyword, IsInstantiation, SS.getScopeRep(),
+ dyn_cast<CXXRecordDecl>(CurContext));
+ if (TypoCorrection Corrected =
+ CorrectTypo(R.getLookupNameInfo(), R.getLookupKind(), S, &SS, CCC,
+ CTK_ErrorRecovery)) {
// We reject candidates where DroppedSpecifier == true, hence the
// literal '0' below.
diagnoseTypo(Corrected, PDiag(diag::err_no_member_suggest)
@@ -10808,6 +10876,28 @@ struct ComputingExceptionSpec {
};
}
+bool Sema::tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec) {
+ llvm::APSInt Result;
+ ExprResult Converted = CheckConvertedConstantExpression(
+ ExplicitSpec.getExpr(), Context.BoolTy, Result, CCEK_ExplicitBool);
+ ExplicitSpec.setExpr(Converted.get());
+ if (Converted.isUsable() && !Converted.get()->isValueDependent()) {
+ ExplicitSpec.setKind(Result.getBoolValue()
+ ? ExplicitSpecKind::ResolvedTrue
+ : ExplicitSpecKind::ResolvedFalse);
+ return true;
+ }
+ ExplicitSpec.setKind(ExplicitSpecKind::Unresolved);
+ return false;
+}
+
+ExplicitSpecifier Sema::ActOnExplicitBoolSpecifier(Expr *ExplicitExpr) {
+ ExplicitSpecifier ES(ExplicitExpr, ExplicitSpecKind::Unresolved);
+ if (!ExplicitExpr->isTypeDependent())
+ tryResolveExplicitSpecifier(ES);
+ return ES;
+}
+
static Sema::ImplicitExceptionSpecification
ComputeDefaultedSpecialMemberExceptionSpec(
Sema &S, SourceLocation Loc, CXXMethodDecl *MD, Sema::CXXSpecialMember CSM,
@@ -10956,9 +11046,10 @@ CXXConstructorDecl *Sema::DeclareImplicitDefaultConstructor(
= Context.DeclarationNames.getCXXConstructorName(ClassType);
DeclarationNameInfo NameInfo(Name, ClassLoc);
CXXConstructorDecl *DefaultCon = CXXConstructorDecl::Create(
- Context, ClassDecl, ClassLoc, NameInfo, /*Type*/QualType(),
- /*TInfo=*/nullptr, /*isExplicit=*/false, /*isInline=*/true,
- /*isImplicitlyDeclared=*/true, Constexpr);
+ Context, ClassDecl, ClassLoc, NameInfo, /*Type*/ QualType(),
+ /*TInfo=*/nullptr, ExplicitSpecifier(),
+ /*isInline=*/true, /*isImplicitlyDeclared=*/true,
+ Constexpr ? CSK_constexpr : CSK_unspecified);
DefaultCon->setAccess(AS_public);
DefaultCon->setDefaulted();
@@ -10976,7 +11067,7 @@ CXXConstructorDecl *Sema::DeclareImplicitDefaultConstructor(
DefaultCon->setTrivial(ClassDecl->hasTrivialDefaultConstructor());
// Note that we have declared this constructor.
- ++ASTContext::NumImplicitDefaultConstructorsDeclared;
+ ++getASTContext().NumImplicitDefaultConstructorsDeclared;
Scope *S = getScopeForContext(ClassDecl);
CheckImplicitSpecialMemberDeclaration(S, DefaultCon);
@@ -11077,8 +11168,9 @@ Sema::findInheritingConstructor(SourceLocation Loc,
CXXConstructorDecl *DerivedCtor = CXXConstructorDecl::Create(
Context, Derived, UsingLoc, NameInfo, TInfo->getType(), TInfo,
- BaseCtor->isExplicit(), /*Inline=*/true,
- /*ImplicitlyDeclared=*/true, Constexpr,
+ BaseCtor->getExplicitSpecifier(), /*isInline=*/true,
+ /*isImplicitlyDeclared=*/true,
+ Constexpr ? BaseCtor->getConstexprKind() : CSK_unspecified,
InheritedConstructor(Shadow, BaseCtor));
if (Shadow->isInvalidDecl())
DerivedCtor->setInvalidDecl();
@@ -11098,7 +11190,7 @@ Sema::findInheritingConstructor(SourceLocation Loc,
Context.getTrivialTypeSourceInfo(FPT->getParamType(I), UsingLoc);
ParmVarDecl *PD = ParmVarDecl::Create(
Context, DerivedCtor, UsingLoc, UsingLoc, /*IdentifierInfo=*/nullptr,
- FPT->getParamType(I), TInfo, SC_None, /*DefaultArg=*/nullptr);
+ FPT->getParamType(I), TInfo, SC_None, /*DefArg=*/nullptr);
PD->setScopeInfo(0, I);
PD->setImplicit();
// Ensure attributes are propagated onto parameters (this matters for
@@ -11249,7 +11341,7 @@ CXXDestructorDecl *Sema::DeclareImplicitDestructor(CXXRecordDecl *ClassDecl) {
ClassDecl->hasTrivialDestructorForCall());
// Note that we have declared this destructor.
- ++ASTContext::NumImplicitDestructorsDeclared;
+ ++getASTContext().NumImplicitDestructorsDeclared;
Scope *S = getScopeForContext(ClassDecl);
CheckImplicitSpecialMemberDeclaration(S, Destructor);
@@ -11319,7 +11411,6 @@ void Sema::ActOnFinishCXXMemberDecls() {
if (Record->isInvalidDecl()) {
DelayedOverridingExceptionSpecChecks.clear();
DelayedEquivalentExceptionSpecChecks.clear();
- DelayedDefaultedMemberExceptionSpecs.clear();
return;
}
checkForMultipleExportedDefaultConstructors(*this, Record);
@@ -11399,7 +11490,7 @@ class RefBuilder: public ExprBuilder {
public:
Expr *build(Sema &S, SourceLocation Loc) const override {
- return assertNotNull(S.BuildDeclRefExpr(Var, VarType, VK_LValue, Loc).get());
+ return assertNotNull(S.BuildDeclRefExpr(Var, VarType, VK_LValue, Loc));
}
RefBuilder(VarDecl *Var, QualType VarType)
@@ -11551,7 +11642,7 @@ buildMemcpyForAssignmentOp(Sema &S, SourceLocation Loc, QualType T,
Expr *CallArgs[] = {
To, From, IntegerLiteral::Create(S.Context, Size, SizeType, Loc)
};
- ExprResult Call = S.ActOnCallExpr(/*Scope=*/nullptr, MemCpyRef.get(),
+ ExprResult Call = S.BuildCallExpr(/*Scope=*/nullptr, MemCpyRef.get(),
Loc, CallArgs, Loc);
assert(!Call.isInvalid() && "Call to __builtin_memcpy cannot fail!");
@@ -11660,7 +11751,7 @@ buildSingleCopyAssignRecursively(Sema &S, SourceLocation Loc, QualType T,
// Create the reference to operator=.
ExprResult OpEqualRef
- = S.BuildMemberReferenceExpr(To.build(S, Loc), T, Loc, /*isArrow=*/false,
+ = S.BuildMemberReferenceExpr(To.build(S, Loc), T, Loc, /*IsArrow=*/false,
SS, /*TemplateKWLoc=*/SourceLocation(),
/*FirstQualifierInScope=*/nullptr,
OpLookup,
@@ -11810,14 +11901,13 @@ CXXMethodDecl *Sema::DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl) {
return nullptr;
QualType ArgType = Context.getTypeDeclType(ClassDecl);
+ if (Context.getLangOpts().OpenCLCPlusPlus)
+ ArgType = Context.getAddrSpaceQualType(ArgType, LangAS::opencl_generic);
QualType RetType = Context.getLValueReferenceType(ArgType);
bool Const = ClassDecl->implicitCopyAssignmentHasConstParam();
if (Const)
ArgType = ArgType.withConst();
- if (Context.getLangOpts().OpenCLCPlusPlus)
- ArgType = Context.getAddrSpaceQualType(ArgType, LangAS::opencl_generic);
-
ArgType = Context.getLValueReferenceType(ArgType);
bool Constexpr = defaultedSpecialMemberIsConstexpr(*this, ClassDecl,
@@ -11829,10 +11919,11 @@ CXXMethodDecl *Sema::DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl) {
DeclarationName Name = Context.DeclarationNames.getCXXOperatorName(OO_Equal);
SourceLocation ClassLoc = ClassDecl->getLocation();
DeclarationNameInfo NameInfo(Name, ClassLoc);
- CXXMethodDecl *CopyAssignment =
- CXXMethodDecl::Create(Context, ClassDecl, ClassLoc, NameInfo, QualType(),
- /*TInfo=*/nullptr, /*StorageClass=*/SC_None,
- /*isInline=*/true, Constexpr, SourceLocation());
+ CXXMethodDecl *CopyAssignment = CXXMethodDecl::Create(
+ Context, ClassDecl, ClassLoc, NameInfo, QualType(),
+ /*TInfo=*/nullptr, /*StorageClass=*/SC_None,
+ /*isInline=*/true, Constexpr ? CSK_constexpr : CSK_unspecified,
+ SourceLocation());
CopyAssignment->setAccess(AS_public);
CopyAssignment->setDefaulted();
CopyAssignment->setImplicit();
@@ -11860,7 +11951,7 @@ CXXMethodDecl *Sema::DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl) {
: ClassDecl->hasTrivialCopyAssignment());
// Note that we have added this copy-assignment operator.
- ++ASTContext::NumImplicitCopyAssignmentOperatorsDeclared;
+ ++getASTContext().NumImplicitCopyAssignmentOperatorsDeclared;
Scope *S = getScopeForContext(ClassDecl);
CheckImplicitSpecialMemberDeclaration(S, CopyAssignment);
@@ -12009,7 +12100,7 @@ void Sema::DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
DerefBuilder DerefThis(This);
CastBuilder To(DerefThis,
Context.getQualifiedType(
- BaseType, CopyAssignOperator->getTypeQualifiers()),
+ BaseType, CopyAssignOperator->getMethodQualifiers()),
VK_LValue, BasePath);
// Build the copy.
@@ -12135,6 +12226,8 @@ CXXMethodDecl *Sema::DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl) {
// constructor rules.
QualType ArgType = Context.getTypeDeclType(ClassDecl);
+ if (Context.getLangOpts().OpenCLCPlusPlus)
+ ArgType = Context.getAddrSpaceQualType(ArgType, LangAS::opencl_generic);
QualType RetType = Context.getLValueReferenceType(ArgType);
ArgType = Context.getRValueReferenceType(ArgType);
@@ -12147,10 +12240,11 @@ CXXMethodDecl *Sema::DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl) {
DeclarationName Name = Context.DeclarationNames.getCXXOperatorName(OO_Equal);
SourceLocation ClassLoc = ClassDecl->getLocation();
DeclarationNameInfo NameInfo(Name, ClassLoc);
- CXXMethodDecl *MoveAssignment =
- CXXMethodDecl::Create(Context, ClassDecl, ClassLoc, NameInfo, QualType(),
- /*TInfo=*/nullptr, /*StorageClass=*/SC_None,
- /*isInline=*/true, Constexpr, SourceLocation());
+ CXXMethodDecl *MoveAssignment = CXXMethodDecl::Create(
+ Context, ClassDecl, ClassLoc, NameInfo, QualType(),
+ /*TInfo=*/nullptr, /*StorageClass=*/SC_None,
+ /*isInline=*/true, Constexpr ? CSK_constexpr : CSK_unspecified,
+ SourceLocation());
MoveAssignment->setAccess(AS_public);
MoveAssignment->setDefaulted();
MoveAssignment->setImplicit();
@@ -12181,7 +12275,7 @@ CXXMethodDecl *Sema::DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl) {
: ClassDecl->hasTrivialMoveAssignment());
// Note that we have added this copy-assignment operator.
- ++ASTContext::NumImplicitMoveAssignmentOperatorsDeclared;
+ ++getASTContext().NumImplicitMoveAssignmentOperatorsDeclared;
Scope *S = getScopeForContext(ClassDecl);
CheckImplicitSpecialMemberDeclaration(S, MoveAssignment);
@@ -12374,7 +12468,7 @@ void Sema::DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
// Implicitly cast "this" to the appropriately-qualified base type.
CastBuilder To(DerefThis,
Context.getQualifiedType(
- BaseType, MoveAssignOperator->getTypeQualifiers()),
+ BaseType, MoveAssignOperator->getMethodQualifiers()),
VK_LValue, BasePath);
// Build the move.
@@ -12529,8 +12623,10 @@ CXXConstructorDecl *Sema::DeclareImplicitCopyConstructor(
// member of its class.
CXXConstructorDecl *CopyConstructor = CXXConstructorDecl::Create(
Context, ClassDecl, ClassLoc, NameInfo, QualType(), /*TInfo=*/nullptr,
- /*isExplicit=*/false, /*isInline=*/true, /*isImplicitlyDeclared=*/true,
- Constexpr);
+ ExplicitSpecifier(),
+ /*isInline=*/true,
+ /*isImplicitlyDeclared=*/true,
+ Constexpr ? CSK_constexpr : CSK_unspecified);
CopyConstructor->setAccess(AS_public);
CopyConstructor->setDefaulted();
@@ -12564,7 +12660,7 @@ CXXConstructorDecl *Sema::DeclareImplicitCopyConstructor(
: ClassDecl->hasTrivialCopyConstructorForCall()));
// Note that we have declared this constructor.
- ++ASTContext::NumImplicitCopyConstructorsDeclared;
+ ++getASTContext().NumImplicitCopyConstructorsDeclared;
Scope *S = getScopeForContext(ClassDecl);
CheckImplicitSpecialMemberDeclaration(S, CopyConstructor);
@@ -12659,8 +12755,10 @@ CXXConstructorDecl *Sema::DeclareImplicitMoveConstructor(
// member of its class.
CXXConstructorDecl *MoveConstructor = CXXConstructorDecl::Create(
Context, ClassDecl, ClassLoc, NameInfo, QualType(), /*TInfo=*/nullptr,
- /*isExplicit=*/false, /*isInline=*/true, /*isImplicitlyDeclared=*/true,
- Constexpr);
+ ExplicitSpecifier(),
+ /*isInline=*/true,
+ /*isImplicitlyDeclared=*/true,
+ Constexpr ? CSK_constexpr : CSK_unspecified);
MoveConstructor->setAccess(AS_public);
MoveConstructor->setDefaulted();
@@ -12694,7 +12792,7 @@ CXXConstructorDecl *Sema::DeclareImplicitMoveConstructor(
: ClassDecl->hasTrivialMoveConstructorForCall()));
// Note that we have declared this constructor.
- ++ASTContext::NumImplicitMoveConstructorsDeclared;
+ ++getASTContext().NumImplicitMoveConstructorsDeclared;
Scope *S = getScopeForContext(ClassDecl);
CheckImplicitSpecialMemberDeclaration(S, MoveConstructor);
@@ -12798,7 +12896,7 @@ void Sema::DefineImplicitLambdaToFunctionPointerConversion(
// Construct the body of the conversion function { return __invoke; }.
Expr *FunctionRef = BuildDeclRefExpr(Invoker, Invoker->getType(),
- VK_LValue, Conv->getLocation()).get();
+ VK_LValue, Conv->getLocation());
assert(FunctionRef && "Can't refer to __invoke function?");
Stmt *Return = BuildReturnStmt(Conv->getLocation(), FunctionRef).get();
Conv->setBody(CompoundStmt::Create(Context, Return, Conv->getLocation(),
@@ -12981,7 +13079,7 @@ ExprResult Sema::BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field) {
// If we already have the in-class initializer nothing needs to be done.
if (Field->getInClassInitializer())
- return CXXDefaultInitExpr::Create(Context, Loc, Field);
+ return CXXDefaultInitExpr::Create(Context, Loc, Field, CurContext);
// If we might have already tried and failed to instantiate, don't try again.
if (Field->isInvalidDecl())
@@ -13022,7 +13120,7 @@ ExprResult Sema::BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field) {
Field->setInvalidDecl();
return ExprError();
}
- return CXXDefaultInitExpr::Create(Context, Loc, Field);
+ return CXXDefaultInitExpr::Create(Context, Loc, Field, CurContext);
}
// DR1351:
@@ -13061,12 +13159,17 @@ void Sema::FinalizeVarWithDestructor(VarDecl *VD, const RecordType *Record) {
return;
CXXDestructorDecl *Destructor = LookupDestructor(ClassDecl);
- MarkFunctionReferenced(VD->getLocation(), Destructor);
- CheckDestructorAccess(VD->getLocation(), Destructor,
- PDiag(diag::err_access_dtor_var)
- << VD->getDeclName()
- << VD->getType());
- DiagnoseUseOfDecl(Destructor, VD->getLocation());
+
+ // If this is an array, we'll require the destructor during initialization, so
+ // we can skip over this. We still want to emit exit-time destructor warnings
+ // though.
+ if (!VD->getType()->isArrayType()) {
+ MarkFunctionReferenced(VD->getLocation(), Destructor);
+ CheckDestructorAccess(VD->getLocation(), Destructor,
+ PDiag(diag::err_access_dtor_var)
+ << VD->getDeclName() << VD->getType());
+ DiagnoseUseOfDecl(Destructor, VD->getLocation());
+ }
if (Destructor->isTrivial()) return;
if (!VD->hasGlobalStorage()) return;
@@ -13169,7 +13272,7 @@ CheckOperatorNewDeleteTypes(Sema &SemaRef, const FunctionDecl *FnDecl,
diag::err_operator_new_delete_dependent_result_type)
<< FnDecl->getDeclName() << ExpectedResultType;
- // OpenCL C++: the operator is valid on any address space.
+ // The operator is valid on any address space for OpenCL.
if (SemaRef.getLangOpts().OpenCLCPlusPlus) {
if (auto *PtrTy = ResultType->getAs<PointerType>()) {
ResultType = RemoveAddressSpaceFromPtr(SemaRef, PtrTy);
@@ -13202,7 +13305,7 @@ CheckOperatorNewDeleteTypes(Sema &SemaRef, const FunctionDecl *FnDecl,
// Check that the first parameter type is what we expect.
if (SemaRef.getLangOpts().OpenCLCPlusPlus) {
- // OpenCL C++: the operator is valid on any address space.
+ // The operator is valid on any address space for OpenCL.
if (auto *PtrTy =
FnDecl->getParamDecl(0)->getType()->getAs<PointerType>()) {
FirstParamType = RemoveAddressSpaceFromPtr(SemaRef, PtrTy);
@@ -13910,8 +14013,6 @@ Decl *Sema::BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
ExprResult Converted = PerformContextuallyConvertToBool(AssertExpr);
if (Converted.isInvalid())
Failed = true;
- else
- Converted = ConstantExpr::Create(Context, Converted.get());
llvm::APSInt Cond;
if (!Failed && VerifyIntegerConstantExpression(Converted.get(), &Cond,
@@ -15155,7 +15256,8 @@ void Sema::MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
}
void Sema::MarkVirtualMembersReferenced(SourceLocation Loc,
- const CXXRecordDecl *RD) {
+ const CXXRecordDecl *RD,
+ bool ConstexprOnly) {
// Mark all functions which will appear in RD's vtable as used.
CXXFinalOverriderMap FinalOverriders;
RD->getFinalOverriders(FinalOverriders);
@@ -15170,7 +15272,7 @@ void Sema::MarkVirtualMembersReferenced(SourceLocation Loc,
// C++ [basic.def.odr]p2:
// [...] A virtual member function is used if it is not pure. [...]
- if (!Overrider->isPure())
+ if (!Overrider->isPure() && (!ConstexprOnly || Overrider->isConstexpr()))
MarkFunctionReferenced(Loc, Overrider);
}
}
@@ -15387,6 +15489,7 @@ bool Sema::checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method) {
case EST_Uninstantiated:
case EST_Unevaluated:
case EST_BasicNoexcept:
+ case EST_NoThrow:
case EST_DynamicNone:
case EST_MSAny:
case EST_None:
diff --git a/lib/Sema/SemaDeclObjC.cpp b/lib/Sema/SemaDeclObjC.cpp
index 3746bdad0358..e629837eb71d 100644
--- a/lib/Sema/SemaDeclObjC.cpp
+++ b/lib/Sema/SemaDeclObjC.cpp
@@ -1,9 +1,8 @@
//===--- SemaDeclObjC.cpp - Semantic Analysis for ObjC Declarations -------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -360,6 +359,7 @@ HasExplicitOwnershipAttr(Sema &S, ParmVarDecl *Param) {
/// ActOnStartOfObjCMethodDef - This routine sets up parameters; invisible
/// and user declared, in the method definition's AST.
void Sema::ActOnStartOfObjCMethodDef(Scope *FnBodyScope, Decl *D) {
+ ImplicitlyRetainedSelfLocs.clear();
assert((getCurMethodDecl() == nullptr) && "Methodparsing confused");
ObjCMethodDecl *MDecl = dyn_cast_or_null<ObjCMethodDecl>(D);
@@ -500,7 +500,7 @@ namespace {
// Callback to only accept typo corrections that are Objective-C classes.
// If an ObjCInterfaceDecl* is given to the constructor, then the validation
// function will reject corrections to that class.
-class ObjCInterfaceValidatorCCC : public CorrectionCandidateCallback {
+class ObjCInterfaceValidatorCCC final : public CorrectionCandidateCallback {
public:
ObjCInterfaceValidatorCCC() : CurrentIDecl(nullptr) {}
explicit ObjCInterfaceValidatorCCC(ObjCInterfaceDecl *IDecl)
@@ -511,6 +511,10 @@ class ObjCInterfaceValidatorCCC : public CorrectionCandidateCallback {
return ID && !declaresSameEntity(ID, CurrentIDecl);
}
+ std::unique_ptr<CorrectionCandidateCallback> clone() override {
+ return llvm::make_unique<ObjCInterfaceValidatorCCC>(*this);
+ }
+
private:
ObjCInterfaceDecl *CurrentIDecl;
};
@@ -550,11 +554,10 @@ ActOnSuperClassOfClassInterface(Scope *S,
if (!PrevDecl) {
// Try to correct for a typo in the superclass name without correcting
// to the class we're defining.
+ ObjCInterfaceValidatorCCC CCC(IDecl);
if (TypoCorrection Corrected = CorrectTypo(
- DeclarationNameInfo(SuperName, SuperLoc),
- LookupOrdinaryName, TUScope,
- nullptr, llvm::make_unique<ObjCInterfaceValidatorCCC>(IDecl),
- CTK_ErrorRecovery)) {
+ DeclarationNameInfo(SuperName, SuperLoc), LookupOrdinaryName,
+ TUScope, nullptr, CCC, CTK_ErrorRecovery)) {
diagnoseTypo(Corrected, PDiag(diag::err_undef_superclass_suggest)
<< SuperName << ClassName);
PrevDecl = Corrected.getCorrectionDeclAs<ObjCInterfaceDecl>();
@@ -1293,11 +1296,10 @@ Sema::FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer,
for (const IdentifierLocPair &Pair : ProtocolId) {
ObjCProtocolDecl *PDecl = LookupProtocol(Pair.first, Pair.second);
if (!PDecl) {
+ DeclFilterCCC<ObjCProtocolDecl> CCC{};
TypoCorrection Corrected = CorrectTypo(
- DeclarationNameInfo(Pair.first, Pair.second),
- LookupObjCProtocolName, TUScope, nullptr,
- llvm::make_unique<DeclFilterCCC<ObjCProtocolDecl>>(),
- CTK_ErrorRecovery);
+ DeclarationNameInfo(Pair.first, Pair.second), LookupObjCProtocolName,
+ TUScope, nullptr, CCC, CTK_ErrorRecovery);
if ((PDecl = Corrected.getCorrectionDeclAs<ObjCProtocolDecl>()))
diagnoseTypo(Corrected, PDiag(diag::err_undeclared_protocol_suggest)
<< Pair.first);
@@ -1335,7 +1337,8 @@ Sema::FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer,
namespace {
// Callback to only accept typo corrections that are either
// Objective-C protocols or valid Objective-C type arguments.
-class ObjCTypeArgOrProtocolValidatorCCC : public CorrectionCandidateCallback {
+class ObjCTypeArgOrProtocolValidatorCCC final
+ : public CorrectionCandidateCallback {
ASTContext &Context;
Sema::LookupNameKind LookupKind;
public:
@@ -1382,6 +1385,10 @@ class ObjCTypeArgOrProtocolValidatorCCC : public CorrectionCandidateCallback {
return false;
}
+
+ std::unique_ptr<CorrectionCandidateCallback> clone() override {
+ return llvm::make_unique<ObjCTypeArgOrProtocolValidatorCCC>(*this);
+ }
};
} // end anonymous namespace
@@ -1580,7 +1587,7 @@ void Sema::actOnObjCTypeArgsOrProtocolQualifiers(
// add the '*'.
if (type->getAs<ObjCInterfaceType>()) {
SourceLocation starLoc = getLocForEndOfToken(loc);
- D.AddTypeInfo(DeclaratorChunk::getPointer(/*typeQuals=*/0, starLoc,
+ D.AddTypeInfo(DeclaratorChunk::getPointer(/*TypeQuals=*/0, starLoc,
SourceLocation(),
SourceLocation(),
SourceLocation(),
@@ -1671,12 +1678,10 @@ void Sema::actOnObjCTypeArgsOrProtocolQualifiers(
}
// Perform typo correction on the name.
- TypoCorrection corrected = CorrectTypo(
- DeclarationNameInfo(identifiers[i], identifierLocs[i]), lookupKind, S,
- nullptr,
- llvm::make_unique<ObjCTypeArgOrProtocolValidatorCCC>(Context,
- lookupKind),
- CTK_ErrorRecovery);
+ ObjCTypeArgOrProtocolValidatorCCC CCC(Context, lookupKind);
+ TypoCorrection corrected =
+ CorrectTypo(DeclarationNameInfo(identifiers[i], identifierLocs[i]),
+ lookupKind, S, nullptr, CCC, CTK_ErrorRecovery);
if (corrected) {
// Did we find a protocol?
if (auto proto = corrected.getCorrectionDeclAs<ObjCProtocolDecl>()) {
@@ -1888,7 +1893,8 @@ Decl *Sema::ActOnStartCategoryInterface(
Decl *Sema::ActOnStartCategoryImplementation(
SourceLocation AtCatImplLoc,
IdentifierInfo *ClassName, SourceLocation ClassLoc,
- IdentifierInfo *CatName, SourceLocation CatLoc) {
+ IdentifierInfo *CatName, SourceLocation CatLoc,
+ const ParsedAttributesView &Attrs) {
ObjCInterfaceDecl *IDecl = getObjCInterfaceDecl(ClassName, ClassLoc, true);
ObjCCategoryDecl *CatIDecl = nullptr;
if (IDecl && IDecl->hasDefinition()) {
@@ -1916,6 +1922,9 @@ Decl *Sema::ActOnStartCategoryImplementation(
CDecl->setInvalidDecl();
}
+ ProcessDeclAttributeList(TUScope, CDecl, Attrs);
+ AddPragmaAttributes(TUScope, CDecl);
+
// FIXME: PushOnScopeChains?
CurContext->addDecl(CDecl);
@@ -1951,7 +1960,8 @@ Decl *Sema::ActOnStartClassImplementation(
SourceLocation AtClassImplLoc,
IdentifierInfo *ClassName, SourceLocation ClassLoc,
IdentifierInfo *SuperClassname,
- SourceLocation SuperClassLoc) {
+ SourceLocation SuperClassLoc,
+ const ParsedAttributesView &Attrs) {
ObjCInterfaceDecl *IDecl = nullptr;
// Check for another declaration kind with the same name.
NamedDecl *PrevDecl
@@ -1968,9 +1978,10 @@ Decl *Sema::ActOnStartClassImplementation(
} else {
// We did not find anything with the name ClassName; try to correct for
// typos in the class name.
- TypoCorrection Corrected = CorrectTypo(
- DeclarationNameInfo(ClassName, ClassLoc), LookupOrdinaryName, TUScope,
- nullptr, llvm::make_unique<ObjCInterfaceValidatorCCC>(), CTK_NonError);
+ ObjCInterfaceValidatorCCC CCC{};
+ TypoCorrection Corrected =
+ CorrectTypo(DeclarationNameInfo(ClassName, ClassLoc),
+ LookupOrdinaryName, TUScope, nullptr, CCC, CTK_NonError);
if (Corrected.getCorrectionDeclAs<ObjCInterfaceDecl>()) {
// Suggest the (potentially) correct interface name. Don't provide a
// code-modification hint or use the typo name for recovery, because
@@ -2044,6 +2055,9 @@ Decl *Sema::ActOnStartClassImplementation(
ObjCImplementationDecl::Create(Context, CurContext, IDecl, SDecl,
ClassLoc, AtClassImplLoc, SuperClassLoc);
+ ProcessDeclAttributeList(TUScope, IMPDecl, Attrs);
+ AddPragmaAttributes(TUScope, IMPDecl);
+
if (CheckObjCDeclScope(IMPDecl))
return ActOnObjCContainerStartDefinition(IMPDecl);
@@ -4047,6 +4061,9 @@ Decl *Sema::ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods,
}
}
+ if (IDecl->hasAttr<ObjCClassStubAttr>())
+ Diag(IC->getLocation(), diag::err_implementation_of_class_stub);
+
if (LangOpts.ObjCRuntime.isNonFragile()) {
while (IDecl->getSuperClass()) {
DiagnoseDuplicateIvars(IDecl, IDecl->getSuperClass());
@@ -4075,6 +4092,10 @@ Decl *Sema::ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods,
Diag(Super->getLocation(), diag::note_class_declared);
}
}
+
+ if (IntfDecl->hasAttr<ObjCClassStubAttr>() &&
+ !IntfDecl->hasAttr<ObjCSubclassingRestrictedAttr>())
+ Diag(IntfDecl->getLocation(), diag::err_class_stub_subclassing_mismatch);
}
DiagnoseVariableSizedIvars(*this, OCD);
if (isInterfaceDeclKind) {
@@ -4152,13 +4173,12 @@ namespace {
/// overrides.
class OverrideSearch {
public:
- Sema &S;
- ObjCMethodDecl *Method;
+ const ObjCMethodDecl *Method;
llvm::SmallSetVector<ObjCMethodDecl*, 4> Overridden;
bool Recursive;
public:
- OverrideSearch(Sema &S, ObjCMethodDecl *method) : S(S), Method(method) {
+ OverrideSearch(Sema &S, const ObjCMethodDecl *method) : Method(method) {
Selector selector = method->getSelector();
// Bypass this search if we've never seen an instance/class method
@@ -4172,19 +4192,20 @@ public:
if (it == S.MethodPool.end())
return;
}
- ObjCMethodList &list =
+ const ObjCMethodList &list =
method->isInstanceMethod() ? it->second.first : it->second.second;
if (!list.getMethod()) return;
- ObjCContainerDecl *container
+ const ObjCContainerDecl *container
= cast<ObjCContainerDecl>(method->getDeclContext());
// Prevent the search from reaching this container again. This is
// important with categories, which override methods from the
// interface and each other.
- if (ObjCCategoryDecl *Category = dyn_cast<ObjCCategoryDecl>(container)) {
+ if (const ObjCCategoryDecl *Category =
+ dyn_cast<ObjCCategoryDecl>(container)) {
searchFromContainer(container);
- if (ObjCInterfaceDecl *Interface = Category->getClassInterface())
+ if (const ObjCInterfaceDecl *Interface = Category->getClassInterface())
searchFromContainer(Interface);
} else {
searchFromContainer(container);
@@ -4196,7 +4217,7 @@ public:
iterator end() const { return Overridden.end(); }
private:
- void searchFromContainer(ObjCContainerDecl *container) {
+ void searchFromContainer(const ObjCContainerDecl *container) {
if (container->isInvalidDecl()) return;
switch (container->getDeclKind()) {
@@ -4212,7 +4233,7 @@ private:
}
}
- void searchFrom(ObjCProtocolDecl *protocol) {
+ void searchFrom(const ObjCProtocolDecl *protocol) {
if (!protocol->hasDefinition())
return;
@@ -4221,14 +4242,14 @@ private:
search(protocol->getReferencedProtocols());
}
- void searchFrom(ObjCCategoryDecl *category) {
+ void searchFrom(const ObjCCategoryDecl *category) {
// A method in a category declaration overrides declarations from
// the main class and from protocols the category references.
// The main class is handled in the constructor.
search(category->getReferencedProtocols());
}
- void searchFrom(ObjCCategoryImplDecl *impl) {
+ void searchFrom(const ObjCCategoryImplDecl *impl) {
// A method in a category definition that has a category
// declaration overrides declarations from the category
// declaration.
@@ -4238,12 +4259,12 @@ private:
search(Interface);
// Otherwise it overrides declarations from the class.
- } else if (ObjCInterfaceDecl *Interface = impl->getClassInterface()) {
+ } else if (const auto *Interface = impl->getClassInterface()) {
search(Interface);
}
}
- void searchFrom(ObjCInterfaceDecl *iface) {
+ void searchFrom(const ObjCInterfaceDecl *iface) {
// A method in a class declaration overrides declarations from
if (!iface->hasDefinition())
return;
@@ -4260,20 +4281,19 @@ private:
search(iface->getReferencedProtocols());
}
- void searchFrom(ObjCImplementationDecl *impl) {
+ void searchFrom(const ObjCImplementationDecl *impl) {
// A method in a class implementation overrides declarations from
// the class interface.
- if (ObjCInterfaceDecl *Interface = impl->getClassInterface())
+ if (const auto *Interface = impl->getClassInterface())
search(Interface);
}
void search(const ObjCProtocolList &protocols) {
- for (ObjCProtocolList::iterator i = protocols.begin(), e = protocols.end();
- i != e; ++i)
- search(*i);
+ for (const auto *Proto : protocols)
+ search(Proto);
}
- void search(ObjCContainerDecl *container) {
+ void search(const ObjCContainerDecl *container) {
// Check for a method in this container which matches this selector.
ObjCMethodDecl *meth = container->getMethod(Method->getSelector(),
Method->isInstanceMethod(),
@@ -4299,6 +4319,8 @@ private:
void Sema::CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
ObjCInterfaceDecl *CurrentClass,
ResultTypeCompatibilityKind RTC) {
+ if (!ObjCMethod)
+ return;
// Search for overridden methods and merge information down from them.
OverrideSearch overrides(*this, ObjCMethod);
// Keep track if the method overrides any method in the class's base classes,
@@ -4307,10 +4329,7 @@ void Sema::CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
// For this info, a method in an implementation is not considered as
// overriding the same method in the interface or its categories.
bool hasOverriddenMethodsInBaseOrProtocol = false;
- for (OverrideSearch::iterator
- i = overrides.begin(), e = overrides.end(); i != e; ++i) {
- ObjCMethodDecl *overridden = *i;
-
+ for (ObjCMethodDecl *overridden : overrides) {
if (!hasOverriddenMethodsInBaseOrProtocol) {
if (isa<ObjCProtocolDecl>(overridden->getDeclContext()) ||
CurrentClass != overridden->getClassInterface() ||
@@ -4337,9 +4356,7 @@ void Sema::CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
if (CategCount > 1 ||
!isa<ObjCCategoryImplDecl>(overridden->getDeclContext())) {
OverrideSearch overrides(*this, overridden);
- for (OverrideSearch::iterator
- OI= overrides.begin(), OE= overrides.end(); OI!=OE; ++OI) {
- ObjCMethodDecl *SuperOverridden = *OI;
+ for (ObjCMethodDecl *SuperOverridden : overrides) {
if (isa<ObjCProtocolDecl>(SuperOverridden->getDeclContext()) ||
CurrentClass != SuperOverridden->getClassInterface()) {
hasOverriddenMethodsInBaseOrProtocol = true;
diff --git a/lib/Sema/SemaExceptionSpec.cpp b/lib/Sema/SemaExceptionSpec.cpp
index e0850feaffc6..9fd924a8cad0 100644
--- a/lib/Sema/SemaExceptionSpec.cpp
+++ b/lib/Sema/SemaExceptionSpec.cpp
@@ -1,9 +1,8 @@
//===--- SemaExceptionSpec.cpp - C++ Exception Specifications ---*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -382,6 +381,11 @@ bool Sema::CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New) {
// when declaring a replaceable global allocation function.
DiagID = diag::ext_missing_exception_specification;
ReturnValueOnError = false;
+ } else if (ESI.Type == EST_NoThrow) {
+ // Allow missing attribute 'nothrow' in redeclarations, since this is a very
+ // common omission.
+ DiagID = diag::ext_missing_exception_specification;
+ ReturnValueOnError = false;
} else {
DiagID = diag::err_missing_exception_specification;
ReturnValueOnError = true;
@@ -422,8 +426,14 @@ bool Sema::CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New) {
OldProto->getNoexceptExpr()->printPretty(OS, nullptr, getPrintingPolicy());
OS << ")";
break;
-
- default:
+ case EST_NoThrow:
+ OS <<"__attribute__((nothrow))";
+ break;
+ case EST_None:
+ case EST_MSAny:
+ case EST_Unevaluated:
+ case EST_Uninstantiated:
+ case EST_Unparsed:
llvm_unreachable("This spec type is compatible with none.");
}
@@ -734,6 +744,7 @@ bool Sema::handlerCanCatch(QualType HandlerType, QualType ExceptionType) {
bool Sema::CheckExceptionSpecSubset(const PartialDiagnostic &DiagID,
const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
+ const PartialDiagnostic &NoThrowDiagID,
const FunctionProtoType *Superset,
SourceLocation SuperLoc,
const FunctionProtoType *Subset,
@@ -780,6 +791,16 @@ bool Sema::CheckExceptionSpecSubset(const PartialDiagnostic &DiagID,
return CheckParamExceptionSpec(NestedDiagID, NoteID, Superset, SuperLoc,
Subset, SubLoc);
+ // Allow __declspec(nothrow) to be missing on redeclaration as an extension in
+ // some cases.
+ if (NoThrowDiagID.getDiagID() != 0 && SubCanThrow == CT_Can &&
+ SuperCanThrow == CT_Cannot && SuperEST == EST_NoThrow) {
+ Diag(SubLoc, NoThrowDiagID);
+ if (NoteID.getDiagID() != 0)
+ Diag(SuperLoc, NoteID);
+ return true;
+ }
+
// If the subset contains everything or the superset contains nothing, we've
// failed.
if ((SubCanThrow == CT_Can && SubEST != EST_Dynamic) ||
@@ -909,9 +930,9 @@ bool Sema::CheckExceptionSpecCompatibility(Expr *From, QualType ToType) {
// void (*q)(void (*) throw(int)) = p;
// }
// ... because it might be instantiated with T=int.
- return CheckExceptionSpecSubset(PDiag(DiagID), PDiag(NestedDiagID), PDiag(),
- ToFunc, From->getSourceRange().getBegin(),
- FromFunc, SourceLocation()) &&
+ return CheckExceptionSpecSubset(
+ PDiag(DiagID), PDiag(NestedDiagID), PDiag(), PDiag(), ToFunc,
+ From->getSourceRange().getBegin(), FromFunc, SourceLocation()) &&
!getLangOpts().CPlusPlus17;
}
@@ -943,6 +964,7 @@ bool Sema::CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
return CheckExceptionSpecSubset(PDiag(DiagID),
PDiag(diag::err_deep_exception_specs_differ),
PDiag(diag::note_overridden_virtual_function),
+ PDiag(diag::ext_override_exception_spec),
Old->getType()->getAs<FunctionProtoType>(),
Old->getLocation(),
New->getType()->getAs<FunctionProtoType>(),
@@ -1179,6 +1201,7 @@ CanThrowResult Sema::canThrow(const Expr *E) {
case Expr::CoyieldExprClass:
case Expr::CXXConstCastExprClass:
case Expr::CXXReinterpretCastExprClass:
+ case Expr::BuiltinBitCastExprClass:
case Expr::CXXStdInitializerListExprClass:
case Expr::DesignatedInitExprClass:
case Expr::DesignatedInitUpdateExprClass:
@@ -1290,6 +1313,7 @@ CanThrowResult Sema::canThrow(const Expr *E) {
case Expr::PredefinedExprClass:
case Expr::SizeOfPackExprClass:
case Expr::StringLiteralClass:
+ case Expr::SourceLocExprClass:
// These expressions can never throw.
return CT_Cannot;
diff --git a/lib/Sema/SemaExpr.cpp b/lib/Sema/SemaExpr.cpp
index d5416d4d057c..d8869ffe945a 100644
--- a/lib/Sema/SemaExpr.cpp
+++ b/lib/Sema/SemaExpr.cpp
@@ -1,9 +1,8 @@
//===--- SemaExpr.cpp - Semantic Analysis for Expressions -----------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -311,6 +310,19 @@ bool Sema::DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
return true;
}
+ // [OpenMP 5.0], 2.19.7.3. declare mapper Directive, Restrictions
+ // List-items in map clauses on this construct may only refer to the declared
+ // variable var and entities that could be referenced by a procedure defined
+ // at the same location
+ auto *DMD = dyn_cast<OMPDeclareMapperDecl>(CurContext);
+ if (LangOpts.OpenMP && DMD && !CurContext->containsDecl(D) &&
+ isa<VarDecl>(D)) {
+ Diag(Loc, diag::err_omp_declare_mapper_wrong_var)
+ << DMD->getVarName().getAsString();
+ Diag(D->getLocation(), diag::note_entity_declared_at) << D;
+ return true;
+ }
+
DiagnoseAvailabilityOfDecl(D, Locs, UnknownObjCClass, ObjCPropertyAccess,
AvoidPartialAvailabilityChecks, ClassReceiver);
@@ -321,17 +333,6 @@ bool Sema::DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
return false;
}
-/// Retrieve the message suffix that should be added to a
-/// diagnostic complaining about the given function being deleted or
-/// unavailable.
-std::string Sema::getDeletedOrUnavailableSuffix(const FunctionDecl *FD) {
- std::string Message;
- if (FD->getAvailability(&Message))
- return ": " + Message;
-
- return std::string();
-}
-
/// DiagnoseSentinelCalls - This routine checks whether a call or
/// message-send is to a declaration with the sentinel attribute, and
/// if so, it checks that the requirements of the sentinel are
@@ -624,15 +625,20 @@ ExprResult Sema::DefaultLvalueConversion(Expr *E) {
Context.getTargetInfo().getCXXABI().isMicrosoft())
(void)isCompleteType(E->getExprLoc(), T);
- UpdateMarkingForLValueToRValue(E);
+ ExprResult Res = CheckLValueToRValueConversionOperand(E);
+ if (Res.isInvalid())
+ return Res;
+ E = Res.get();
// Loading a __weak object implicitly retains the value, so we need a cleanup to
// balance that.
if (E->getType().getObjCLifetime() == Qualifiers::OCL_Weak)
Cleanup.setExprNeedsCleanups(true);
- ExprResult Res = ImplicitCastExpr::Create(Context, T, CK_LValueToRValue, E,
- nullptr, VK_RValue);
+ // C++ [conv.lval]p3:
+ // If T is cv std::nullptr_t, the result is a null pointer constant.
+ CastKind CK = T->isNullPtrType() ? CK_NullToPointer : CK_LValueToRValue;
+ Res = ImplicitCastExpr::Create(Context, T, CK, E, nullptr, VK_RValue);
// C11 6.3.2.1p2:
// ... if the lvalue has atomic type, the value has the non-atomic version
@@ -738,33 +744,20 @@ ExprResult Sema::DefaultArgumentPromotion(Expr *E) {
return ExprError();
E = Res.get();
- QualType ScalarTy = Ty;
- unsigned NumElts = 0;
- if (const ExtVectorType *VecTy = Ty->getAs<ExtVectorType>()) {
- NumElts = VecTy->getNumElements();
- ScalarTy = VecTy->getElementType();
- }
-
// If this is a 'float' or '__fp16' (CVR qualified or typedef)
// promote to double.
// Note that default argument promotion applies only to float (and
// half/fp16); it does not apply to _Float16.
- const BuiltinType *BTy = ScalarTy->getAs<BuiltinType>();
+ const BuiltinType *BTy = Ty->getAs<BuiltinType>();
if (BTy && (BTy->getKind() == BuiltinType::Half ||
BTy->getKind() == BuiltinType::Float)) {
if (getLangOpts().OpenCL &&
!getOpenCLOptions().isEnabled("cl_khr_fp64")) {
- if (BTy->getKind() == BuiltinType::Half) {
- QualType Ty = Context.FloatTy;
- if (NumElts != 0)
- Ty = Context.getExtVectorType(Ty, NumElts);
- E = ImpCastExprToType(E, Ty, CK_FloatingCast).get();
- }
+ if (BTy->getKind() == BuiltinType::Half) {
+ E = ImpCastExprToType(E, Context.FloatTy, CK_FloatingCast).get();
+ }
} else {
- QualType Ty = Context.DoubleTy;
- if (NumElts != 0)
- Ty = Context.getExtVectorType(Ty, NumElts);
- E = ImpCastExprToType(E, Ty, CK_FloatingCast).get();
+ E = ImpCastExprToType(E, Context.DoubleTy, CK_FloatingCast).get();
}
}
@@ -923,12 +916,13 @@ ExprResult Sema::DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
UnqualifiedId Name;
Name.setIdentifier(PP.getIdentifierInfo("__builtin_trap"),
E->getBeginLoc());
- ExprResult TrapFn = ActOnIdExpression(TUScope, SS, TemplateKWLoc,
- Name, true, false);
+ ExprResult TrapFn = ActOnIdExpression(TUScope, SS, TemplateKWLoc, Name,
+ /*HasTrailingLParen=*/true,
+ /*IsAddressOfOperand=*/false);
if (TrapFn.isInvalid())
return ExprError();
- ExprResult Call = ActOnCallExpr(TUScope, TrapFn.get(), E->getBeginLoc(),
+ ExprResult Call = BuildCallExpr(TUScope, TrapFn.get(), E->getBeginLoc(),
None, E->getEndLoc());
if (Call.isInvalid())
return ExprError();
@@ -1089,8 +1083,8 @@ static QualType handleFloatConversion(Sema &S, ExprResult &LHS,
LHSType = S.Context.FloatTy;
return handleIntToFloatConversion(S, LHS, RHS, LHSType, RHSType,
- /*convertFloat=*/!IsCompAssign,
- /*convertInt=*/ true);
+ /*ConvertFloat=*/!IsCompAssign,
+ /*ConvertInt=*/ true);
}
assert(RHSFloat);
return handleIntToFloatConversion(S, RHS, LHS, RHSType, LHSType,
@@ -1250,6 +1244,93 @@ static QualType handleComplexIntConversion(Sema &S, ExprResult &LHS,
return ComplexType;
}
+/// Return the rank of a given fixed point or integer type. The value itself
+/// doesn't matter, but the values must be increasing with proper increasing
+/// rank as described in N1169 4.1.1.
+static unsigned GetFixedPointRank(QualType Ty) {
+ const auto *BTy = Ty->getAs<BuiltinType>();
+ assert(BTy && "Expected a builtin type.");
+
+ switch (BTy->getKind()) {
+ case BuiltinType::ShortFract:
+ case BuiltinType::UShortFract:
+ case BuiltinType::SatShortFract:
+ case BuiltinType::SatUShortFract:
+ return 1;
+ case BuiltinType::Fract:
+ case BuiltinType::UFract:
+ case BuiltinType::SatFract:
+ case BuiltinType::SatUFract:
+ return 2;
+ case BuiltinType::LongFract:
+ case BuiltinType::ULongFract:
+ case BuiltinType::SatLongFract:
+ case BuiltinType::SatULongFract:
+ return 3;
+ case BuiltinType::ShortAccum:
+ case BuiltinType::UShortAccum:
+ case BuiltinType::SatShortAccum:
+ case BuiltinType::SatUShortAccum:
+ return 4;
+ case BuiltinType::Accum:
+ case BuiltinType::UAccum:
+ case BuiltinType::SatAccum:
+ case BuiltinType::SatUAccum:
+ return 5;
+ case BuiltinType::LongAccum:
+ case BuiltinType::ULongAccum:
+ case BuiltinType::SatLongAccum:
+ case BuiltinType::SatULongAccum:
+ return 6;
+ default:
+ if (BTy->isInteger())
+ return 0;
+ llvm_unreachable("Unexpected fixed point or integer type");
+ }
+}
+
+/// handleFixedPointConversion - Fixed point operations between fixed
+/// point types and integers or other fixed point types do not fall under
+/// usual arithmetic conversion since these conversions could result in loss
+/// of precsision (N1169 4.1.4). These operations should be calculated with
+/// the full precision of their result type (N1169 4.1.6.2.1).
+static QualType handleFixedPointConversion(Sema &S, QualType LHSTy,
+ QualType RHSTy) {
+ assert((LHSTy->isFixedPointType() || RHSTy->isFixedPointType()) &&
+ "Expected at least one of the operands to be a fixed point type");
+ assert((LHSTy->isFixedPointOrIntegerType() ||
+ RHSTy->isFixedPointOrIntegerType()) &&
+ "Special fixed point arithmetic operation conversions are only "
+ "applied to ints or other fixed point types");
+
+ // If one operand has signed fixed-point type and the other operand has
+ // unsigned fixed-point type, then the unsigned fixed-point operand is
+ // converted to its corresponding signed fixed-point type and the resulting
+ // type is the type of the converted operand.
+ if (RHSTy->isSignedFixedPointType() && LHSTy->isUnsignedFixedPointType())
+ LHSTy = S.Context.getCorrespondingSignedFixedPointType(LHSTy);
+ else if (RHSTy->isUnsignedFixedPointType() && LHSTy->isSignedFixedPointType())
+ RHSTy = S.Context.getCorrespondingSignedFixedPointType(RHSTy);
+
+ // The result type is the type with the highest rank, whereby a fixed-point
+ // conversion rank is always greater than an integer conversion rank; if the
+ // type of either of the operands is a saturating fixedpoint type, the result
+ // type shall be the saturating fixed-point type corresponding to the type
+ // with the highest rank; the resulting value is converted (taking into
+ // account rounding and overflow) to the precision of the resulting type.
+ // Same ranks between signed and unsigned types are resolved earlier, so both
+ // types are either signed or both unsigned at this point.
+ unsigned LHSTyRank = GetFixedPointRank(LHSTy);
+ unsigned RHSTyRank = GetFixedPointRank(RHSTy);
+
+ QualType ResultTy = LHSTyRank > RHSTyRank ? LHSTy : RHSTy;
+
+ if (LHSTy->isSaturatedFixedPointType() || RHSTy->isSaturatedFixedPointType())
+ ResultTy = S.Context.getCorrespondingSaturatedType(ResultTy);
+
+ return ResultTy;
+}
+
/// UsualArithmeticConversions - Performs various conversions that are common to
/// binary operators (C99 6.3.1.8). If both operands aren't arithmetic, this
/// routine returns the first non-arithmetic type found. The client is
@@ -1322,12 +1403,14 @@ QualType Sema::UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
return handleComplexIntConversion(*this, LHS, RHS, LHSType, RHSType,
IsCompAssign);
+ if (LHSType->isFixedPointType() || RHSType->isFixedPointType())
+ return handleFixedPointConversion(*this, LHSType, RHSType);
+
// Finally, we have two differing integer types.
return handleIntegerConversion<doIntegralCast, doIntegralCast>
(*this, LHS, RHS, LHSType, RHSType, IsCompAssign);
}
-
//===----------------------------------------------------------------------===//
// Semantic Analysis for various Expression Types
//===----------------------------------------------------------------------===//
@@ -1446,9 +1529,9 @@ Sema::CreateGenericSelectionExpr(SourceLocation KeyLoc,
// If we determined that the generic selection is result-dependent, don't
// try to compute the result expression.
if (IsResultDependent)
- return new (Context) GenericSelectionExpr(
- Context, KeyLoc, ControllingExpr, Types, Exprs, DefaultLoc, RParenLoc,
- ContainsUnexpandedParameterPack);
+ return GenericSelectionExpr::Create(Context, KeyLoc, ControllingExpr, Types,
+ Exprs, DefaultLoc, RParenLoc,
+ ContainsUnexpandedParameterPack);
SmallVector<unsigned, 1> CompatIndices;
unsigned DefaultIndex = -1U;
@@ -1499,7 +1582,7 @@ Sema::CreateGenericSelectionExpr(SourceLocation KeyLoc,
unsigned ResultIndex =
CompatIndices.size() ? CompatIndices[0] : DefaultIndex;
- return new (Context) GenericSelectionExpr(
+ return GenericSelectionExpr::Create(
Context, KeyLoc, ControllingExpr, Types, Exprs, DefaultLoc, RParenLoc,
ContainsUnexpandedParameterPack, ResultIndex);
}
@@ -1605,20 +1688,8 @@ Sema::ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope) {
Diag(RemovalDiagLoc, RemovalDiag);
}
-
- QualType CharTyConst = CharTy;
- // A C++ string literal has a const-qualified element type (C++ 2.13.4p1).
- if (getLangOpts().CPlusPlus || getLangOpts().ConstStrings)
- CharTyConst.addConst();
-
- CharTyConst = Context.adjustStringLiteralBaseType(CharTyConst);
-
- // Get an array type for the string, according to C99 6.4.5. This includes
- // the nul terminator character as well as the string length for pascal
- // strings.
- QualType StrTy = Context.getConstantArrayType(
- CharTyConst, llvm::APInt(32, Literal.GetNumStringChars() + 1),
- ArrayType::Normal, 0);
+ QualType StrTy =
+ Context.getStringLiteralArrayType(CharTy, Literal.GetNumStringChars());
// Pass &StringTokLocs[0], StringTokLocs.size() to factory!
StringLiteral *Lit = StringLiteral::Create(Context, Literal.GetString(),
@@ -1696,7 +1767,7 @@ Sema::ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope) {
llvm_unreachable("unexpected literal operator lookup result");
}
-ExprResult
+DeclRefExpr *
Sema::BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
SourceLocation Loc,
const CXXScopeSpec *SS) {
@@ -1704,36 +1775,54 @@ Sema::BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
return BuildDeclRefExpr(D, Ty, VK, NameInfo, SS);
}
+DeclRefExpr *
+Sema::BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
+ const DeclarationNameInfo &NameInfo,
+ const CXXScopeSpec *SS, NamedDecl *FoundD,
+ SourceLocation TemplateKWLoc,
+ const TemplateArgumentListInfo *TemplateArgs) {
+ NestedNameSpecifierLoc NNS =
+ SS ? SS->getWithLocInContext(Context) : NestedNameSpecifierLoc();
+ return BuildDeclRefExpr(D, Ty, VK, NameInfo, NNS, FoundD, TemplateKWLoc,
+ TemplateArgs);
+}
+
+NonOdrUseReason Sema::getNonOdrUseReasonInCurrentContext(ValueDecl *D) {
+ // A declaration named in an unevaluated operand never constitutes an odr-use.
+ if (isUnevaluatedContext())
+ return NOUR_Unevaluated;
+
+ // C++2a [basic.def.odr]p4:
+ // A variable x whose name appears as a potentially-evaluated expression e
+ // is odr-used by e unless [...] x is a reference that is usable in
+ // constant expressions.
+ if (VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ if (VD->getType()->isReferenceType() &&
+ !(getLangOpts().OpenMP && isOpenMPCapturedDecl(D)) &&
+ VD->isUsableInConstantExpressions(Context))
+ return NOUR_Constant;
+ }
+
+ // All remaining non-variable cases constitute an odr-use. For variables, we
+ // need to wait and see how the expression is used.
+ return NOUR_None;
+}
+
/// BuildDeclRefExpr - Build an expression that references a
/// declaration that does not require a closure capture.
-ExprResult
+DeclRefExpr *
Sema::BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
- const CXXScopeSpec *SS, NamedDecl *FoundD,
+ NestedNameSpecifierLoc NNS, NamedDecl *FoundD,
+ SourceLocation TemplateKWLoc,
const TemplateArgumentListInfo *TemplateArgs) {
bool RefersToCapturedVariable =
isa<VarDecl>(D) &&
NeedToCaptureVariable(cast<VarDecl>(D), NameInfo.getLoc());
- DeclRefExpr *E;
- if (isa<VarTemplateSpecializationDecl>(D)) {
- VarTemplateSpecializationDecl *VarSpec =
- cast<VarTemplateSpecializationDecl>(D);
-
- E = DeclRefExpr::Create(Context, SS ? SS->getWithLocInContext(Context)
- : NestedNameSpecifierLoc(),
- VarSpec->getTemplateKeywordLoc(), D,
- RefersToCapturedVariable, NameInfo.getLoc(), Ty, VK,
- FoundD, TemplateArgs);
- } else {
- assert(!TemplateArgs && "No template arguments for non-variable"
- " template specialization references");
- E = DeclRefExpr::Create(Context, SS ? SS->getWithLocInContext(Context)
- : NestedNameSpecifierLoc(),
- SourceLocation(), D, RefersToCapturedVariable,
- NameInfo, Ty, VK, FoundD);
- }
-
+ DeclRefExpr *E = DeclRefExpr::Create(
+ Context, NNS, TemplateKWLoc, D, RefersToCapturedVariable, NameInfo, Ty,
+ VK, FoundD, TemplateArgs, getNonOdrUseReasonInCurrentContext(D));
MarkDeclRefReferenced(E);
if (getLangOpts().ObjCWeak && isa<VarDecl>(D) &&
@@ -1828,11 +1917,10 @@ static void emitEmptyLookupTypoDiagnostic(
/// Diagnose an empty lookup.
///
/// \return false if new lookup candidates were found
-bool
-Sema::DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
- std::unique_ptr<CorrectionCandidateCallback> CCC,
- TemplateArgumentListInfo *ExplicitTemplateArgs,
- ArrayRef<Expr *> Args, TypoExpr **Out) {
+bool Sema::DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
+ CorrectionCandidateCallback &CCC,
+ TemplateArgumentListInfo *ExplicitTemplateArgs,
+ ArrayRef<Expr *> Args, TypoExpr **Out) {
DeclarationName Name = R.getLookupName();
unsigned diagnostic = diag::err_undeclared_var_use;
@@ -1921,7 +2009,7 @@ Sema::DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
assert(!ExplicitTemplateArgs &&
"Diagnosing an empty lookup with explicit template args!");
*Out = CorrectTypoDelayed(
- R.getLookupNameInfo(), R.getLookupKind(), S, &SS, std::move(CCC),
+ R.getLookupNameInfo(), R.getLookupKind(), S, &SS, CCC,
[=](const TypoCorrection &TC) {
emitEmptyLookupTypoDiagnostic(TC, *this, SS, Name, TypoLoc, Args,
diagnostic, diagnostic_suggest);
@@ -1929,9 +2017,9 @@ Sema::DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
nullptr, CTK_ErrorRecovery);
if (*Out)
return true;
- } else if (S && (Corrected =
- CorrectTypo(R.getLookupNameInfo(), R.getLookupKind(), S,
- &SS, std::move(CCC), CTK_ErrorRecovery))) {
+ } else if (S &&
+ (Corrected = CorrectTypo(R.getLookupNameInfo(), R.getLookupKind(),
+ S, &SS, CCC, CTK_ErrorRecovery))) {
std::string CorrectedStr(Corrected.getAsString(getLangOpts()));
bool DroppedSpecifier =
Corrected.WillReplaceSpecifier() && Name.getAsString() == CorrectedStr;
@@ -1988,8 +2076,9 @@ Sema::DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
// is in the wrong place to recover. Suggest the typo
// correction, but don't make it a fix-it since we're not going
// to recover well anyway.
- AcceptableWithoutRecovery =
- isa<TypeDecl>(UnderlyingND) || isa<ObjCInterfaceDecl>(UnderlyingND);
+ AcceptableWithoutRecovery = isa<TypeDecl>(UnderlyingND) ||
+ getAsTypeTemplateDecl(UnderlyingND) ||
+ isa<ObjCInterfaceDecl>(UnderlyingND);
} else {
// FIXME: We found a keyword. Suggest it, but don't provide a fix-it
// because we aren't able to recover.
@@ -2062,7 +2151,7 @@ recoverFromMSUnqualifiedLookup(Sema &S, ASTContext &Context,
return CXXDependentScopeMemberExpr::Create(
Context, /*This=*/nullptr, ThisType, /*IsArrow=*/true,
/*Op=*/SourceLocation(), NestedNameSpecifierLoc(), TemplateKWLoc,
- /*FirstQualifierInScope=*/nullptr, NameInfo, TemplateArgs);
+ /*FirstQualifierFoundInScope=*/nullptr, NameInfo, TemplateArgs);
}
// Synthesize a fake NNS that points to the derived class. This will
@@ -2080,7 +2169,7 @@ ExprResult
Sema::ActOnIdExpression(Scope *S, CXXScopeSpec &SS,
SourceLocation TemplateKWLoc, UnqualifiedId &Id,
bool HasTrailingLParen, bool IsAddressOfOperand,
- std::unique_ptr<CorrectionCandidateCallback> CCC,
+ CorrectionCandidateCallback *CCC,
bool IsInlineAsmIdentifier, Token *KeywordReplacement) {
assert(!(IsAddressOfOperand && HasTrailingLParen) &&
"cannot be direct & operand and have a trailing lparen");
@@ -2144,8 +2233,10 @@ Sema::ActOnIdExpression(Scope *S, CXXScopeSpec &SS,
// this becomes a performance hit, we can work harder to preserve those
// results until we get here but it's likely not worth it.
bool MemberOfUnknownSpecialization;
+ AssumedTemplateKind AssumedTemplate;
if (LookupTemplateName(R, S, SS, QualType(), /*EnteringContext=*/false,
- MemberOfUnknownSpecialization, TemplateKWLoc))
+ MemberOfUnknownSpecialization, TemplateKWLoc,
+ &AssumedTemplate))
return ExprError();
if (MemberOfUnknownSpecialization ||
@@ -2202,9 +2293,9 @@ Sema::ActOnIdExpression(Scope *S, CXXScopeSpec &SS,
// If this name wasn't predeclared and if this is not a function
// call, diagnose the problem.
TypoExpr *TE = nullptr;
- auto DefaultValidator = llvm::make_unique<CorrectionCandidateCallback>(
- II, SS.isValid() ? SS.getScopeRep() : nullptr);
- DefaultValidator->IsAddressOfOperand = IsAddressOfOperand;
+ DefaultFilterCCC DefaultValidator(II, SS.isValid() ? SS.getScopeRep()
+ : nullptr);
+ DefaultValidator.IsAddressOfOperand = IsAddressOfOperand;
assert((!CCC || CCC->IsAddressOfOperand == IsAddressOfOperand) &&
"Typo correction callback misconfigured");
if (CCC) {
@@ -2216,9 +2307,8 @@ Sema::ActOnIdExpression(Scope *S, CXXScopeSpec &SS,
// FIXME: DiagnoseEmptyLookup produces bad diagnostics if we're looking for
// a template name, but we happen to have always already looked up the name
// before we get here if it must be a template name.
- if (DiagnoseEmptyLookup(S, SS, R,
- CCC ? std::move(CCC) : std::move(DefaultValidator),
- nullptr, None, &TE)) {
+ if (DiagnoseEmptyLookup(S, SS, R, CCC ? *CCC : DefaultValidator, nullptr,
+ None, &TE)) {
if (TE && KeywordReplacement) {
auto &State = getTypoExprState(TE);
auto BestTC = State.Consumer->getNextCorrection();
@@ -2472,8 +2562,10 @@ Sema::LookupInObjCMethod(LookupResult &Lookup, Scope *S,
SelfName.setKind(UnqualifiedIdKind::IK_ImplicitSelfParam);
CXXScopeSpec SelfScopeSpec;
SourceLocation TemplateKWLoc;
- ExprResult SelfExpr = ActOnIdExpression(S, SelfScopeSpec, TemplateKWLoc,
- SelfName, false, false);
+ ExprResult SelfExpr =
+ ActOnIdExpression(S, SelfScopeSpec, TemplateKWLoc, SelfName,
+ /*HasTrailingLParen=*/false,
+ /*IsAddressOfOperand=*/false);
if (SelfExpr.isInvalid())
return ExprError();
@@ -2497,11 +2589,9 @@ Sema::LookupInObjCMethod(LookupResult &Lookup, Scope *S,
!Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, Loc))
getCurFunction()->recordUseOfWeak(Result);
}
- if (getLangOpts().ObjCAutoRefCount) {
- if (CurContext->isClosure())
- Diag(Loc, diag::warn_implicitly_retains_self)
- << FixItHint::CreateInsertion(Loc, "self->");
- }
+ if (getLangOpts().ObjCAutoRefCount)
+ if (const BlockDecl *BD = CurContext->getInnermostBlockDecl())
+ ImplicitlyRetainedSelfLocs.push_back({Loc, BD});
return Result;
}
@@ -2572,10 +2662,15 @@ Sema::PerformObjectMemberConversion(Expr *From,
bool PointerConversions = false;
if (isa<FieldDecl>(Member)) {
DestRecordType = Context.getCanonicalType(Context.getTypeDeclType(RD));
+ auto FromPtrType = FromType->getAs<PointerType>();
+ DestRecordType = Context.getAddrSpaceQualType(
+ DestRecordType, FromPtrType
+ ? FromType->getPointeeType().getAddressSpace()
+ : FromType.getAddressSpace());
- if (FromType->getAs<PointerType>()) {
+ if (FromPtrType) {
DestType = Context.getPointerType(DestRecordType);
- FromRecordType = FromType->getPointeeType();
+ FromRecordType = FromPtrType->getPointeeType();
PointerConversions = true;
} else {
DestType = DestRecordType;
@@ -2905,7 +3000,6 @@ ExprResult Sema::BuildDeclarationNameExpr(
// These shouldn't make it here.
case Decl::ObjCAtDefsField:
- case Decl::ObjCIvar:
llvm_unreachable("forming non-member reference to ivar?");
// Enum constants are always r-values and never references.
@@ -2913,6 +3007,7 @@ ExprResult Sema::BuildDeclarationNameExpr(
case Decl::EnumConstant:
case Decl::UnresolvedUsingValue:
case Decl::OMPDeclareReduction:
+ case Decl::OMPDeclareMapper:
valueKind = VK_RValue;
break;
@@ -2922,6 +3017,7 @@ ExprResult Sema::BuildDeclarationNameExpr(
// exist in the high-level semantics.
case Decl::Field:
case Decl::IndirectField:
+ case Decl::ObjCIvar:
assert(getLangOpts().CPlusPlus &&
"building reference to field in C?");
@@ -2986,9 +3082,11 @@ ExprResult Sema::BuildDeclarationNameExpr(
// FIXME: Support lambda-capture of BindingDecls, once CWG actually
// decides how that's supposed to work.
auto *BD = cast<BindingDecl>(VD);
- if (BD->getDeclContext()->isFunctionOrMethod() &&
- BD->getDeclContext() != CurContext)
- diagnoseUncapturableValueReference(*this, Loc, BD, CurContext);
+ if (BD->getDeclContext() != CurContext) {
+ auto *DD = dyn_cast_or_null<VarDecl>(BD->getDecomposedDecl());
+ if (DD && DD->hasLocalStorage())
+ diagnoseUncapturableValueReference(*this, Loc, BD, CurContext);
+ }
break;
}
@@ -3066,6 +3164,7 @@ ExprResult Sema::BuildDeclarationNameExpr(
}
return BuildDeclRefExpr(VD, type, valueKind, NameInfo, &SS, FoundD,
+ /*FIXME: TemplateKWLoc*/ SourceLocation(),
TemplateArgs);
}
}
@@ -3975,32 +4074,11 @@ static void captureVariablyModifiedType(ASTContext &Context, QualType T,
// Unknown size indication requires no size computation.
// Otherwise, evaluate and record it.
- if (auto Size = VAT->getSizeExpr()) {
- if (!CSI->isVLATypeCaptured(VAT)) {
- RecordDecl *CapRecord = nullptr;
- if (auto LSI = dyn_cast<LambdaScopeInfo>(CSI)) {
- CapRecord = LSI->Lambda;
- } else if (auto CRSI = dyn_cast<CapturedRegionScopeInfo>(CSI)) {
- CapRecord = CRSI->TheRecordDecl;
- }
- if (CapRecord) {
- auto ExprLoc = Size->getExprLoc();
- auto SizeType = Context.getSizeType();
- // Build the non-static data member.
- auto Field =
- FieldDecl::Create(Context, CapRecord, ExprLoc, ExprLoc,
- /*Id*/ nullptr, SizeType, /*TInfo*/ nullptr,
- /*BW*/ nullptr, /*Mutable*/ false,
- /*InitStyle*/ ICIS_NoInit);
- Field->setImplicit(true);
- Field->setAccess(AS_private);
- Field->setCapturedVLAType(VAT);
- CapRecord->addDecl(Field);
-
- CSI->addVLATypeCapture(ExprLoc, SizeType);
- }
- }
- }
+ auto Size = VAT->getSizeExpr();
+ if (Size && !CSI->isVLATypeCaptured(VAT) &&
+ (isa<CapturedRegionScopeInfo>(CSI) || isa<LambdaScopeInfo>(CSI)))
+ CSI->addVLATypeCapture(Size->getExprLoc(), VAT, Context.getSizeType());
+
T = VAT->getElementType();
break;
}
@@ -4014,6 +4092,7 @@ static void captureVariablyModifiedType(ASTContext &Context, QualType T,
case Type::Attributed:
case Type::SubstTemplateTypeParm:
case Type::PackExpansion:
+ case Type::MacroQualified:
// Keep walking after single level desugaring.
T = T.getSingleStepDesugaredType(Context);
break;
@@ -4658,6 +4737,33 @@ Sema::CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
assert(VK == VK_RValue || LangOpts.CPlusPlus ||
!ResultType.isCForbiddenLValueType());
+ if (LHSExp->IgnoreParenImpCasts()->getType()->isVariablyModifiedType() &&
+ FunctionScopes.size() > 1) {
+ if (auto *TT =
+ LHSExp->IgnoreParenImpCasts()->getType()->getAs<TypedefType>()) {
+ for (auto I = FunctionScopes.rbegin(),
+ E = std::prev(FunctionScopes.rend());
+ I != E; ++I) {
+ auto *CSI = dyn_cast<CapturingScopeInfo>(*I);
+ if (CSI == nullptr)
+ break;
+ DeclContext *DC = nullptr;
+ if (auto *LSI = dyn_cast<LambdaScopeInfo>(CSI))
+ DC = LSI->CallOperator;
+ else if (auto *CRSI = dyn_cast<CapturedRegionScopeInfo>(CSI))
+ DC = CRSI->TheCapturedDecl;
+ else if (auto *BSI = dyn_cast<BlockScopeInfo>(CSI))
+ DC = BSI->TheDecl;
+ if (DC) {
+ if (DC->containsDecl(TT->getDecl()))
+ break;
+ captureVariablyModifiedType(
+ Context, LHSExp->IgnoreParenImpCasts()->getType(), CSI);
+ }
+ }
+ }
+ }
+
return new (Context)
ArraySubscriptExpr(LHSExp, RHSExp, ResultType, VK, OK, RLoc);
}
@@ -4778,6 +4884,8 @@ bool Sema::CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
// We already type-checked the argument, so we know it works.
// Just mark all of the declarations in this potentially-evaluated expression
// as being "referenced".
+ EnterExpressionEvaluationContext EvalContext(
+ *this, ExpressionEvaluationContext::PotentiallyEvaluated, Param);
MarkDeclarationsReferencedInExpr(Param->getDefaultArg(),
/*SkipLocalVariables=*/true);
return false;
@@ -4787,7 +4895,7 @@ ExprResult Sema::BuildCXXDefaultArgExpr(SourceLocation CallLoc,
FunctionDecl *FD, ParmVarDecl *Param) {
if (CheckCXXDefaultArgExpr(CallLoc, FD, Param))
return ExprError();
- return CXXDefaultArgExpr::Create(Context, CallLoc, Param);
+ return CXXDefaultArgExpr::Create(Context, CallLoc, Param, CurContext);
}
Sema::VariadicCallType
@@ -4810,7 +4918,7 @@ Sema::getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto,
}
namespace {
-class FunctionCallCCC : public FunctionCallFilterCCC {
+class FunctionCallCCC final : public FunctionCallFilterCCC {
public:
FunctionCallCCC(Sema &SemaRef, const IdentifierInfo *FuncName,
unsigned NumArgs, MemberExpr *ME)
@@ -4826,6 +4934,10 @@ public:
return FunctionCallFilterCCC::ValidateCandidate(candidate);
}
+ std::unique_ptr<CorrectionCandidateCallback> clone() override {
+ return llvm::make_unique<FunctionCallCCC>(*this);
+ }
+
private:
const IdentifierInfo *const FunctionName;
};
@@ -4838,11 +4950,10 @@ static TypoCorrection TryTypoCorrectionForCall(Sema &S, Expr *Fn,
DeclarationName FuncName = FDecl->getDeclName();
SourceLocation NameLoc = ME ? ME->getMemberLoc() : Fn->getBeginLoc();
+ FunctionCallCCC CCC(S, FuncName.getAsIdentifierInfo(), Args.size(), ME);
if (TypoCorrection Corrected = S.CorrectTypo(
DeclarationNameInfo(FuncName, NameLoc), Sema::LookupOrdinaryName,
- S.getScopeForContext(S.CurContext), nullptr,
- llvm::make_unique<FunctionCallCCC>(S, FuncName.getAsIdentifierInfo(),
- Args.size(), ME),
+ S.getScopeForContext(S.CurContext), nullptr, CCC,
Sema::CTK_ErrorRecovery)) {
if (NamedDecl *ND = Corrected.getFoundDecl()) {
if (Corrected.isOverloaded()) {
@@ -5049,8 +5160,7 @@ bool Sema::GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
} else {
assert(Param && "can't use default arguments without a known callee");
- ExprResult ArgExpr =
- BuildCXXDefaultArgExpr(CallLoc, FDecl, Param);
+ ExprResult ArgExpr = BuildCXXDefaultArgExpr(CallLoc, FDecl, Param);
if (ArgExpr.isInvalid())
return true;
@@ -5140,15 +5250,29 @@ Sema::CheckStaticArrayArgument(SourceLocation CallLoc,
return;
const ConstantArrayType *ArgCAT =
- Context.getAsConstantArrayType(ArgExpr->IgnoreParenImpCasts()->getType());
+ Context.getAsConstantArrayType(ArgExpr->IgnoreParenCasts()->getType());
if (!ArgCAT)
return;
- if (ArgCAT->getSize().ult(CAT->getSize())) {
+ if (getASTContext().hasSameUnqualifiedType(CAT->getElementType(),
+ ArgCAT->getElementType())) {
+ if (ArgCAT->getSize().ult(CAT->getSize())) {
+ Diag(CallLoc, diag::warn_static_array_too_small)
+ << ArgExpr->getSourceRange()
+ << (unsigned)ArgCAT->getSize().getZExtValue()
+ << (unsigned)CAT->getSize().getZExtValue() << 0;
+ DiagnoseCalleeStaticArrayParam(*this, Param);
+ }
+ return;
+ }
+
+ Optional<CharUnits> ArgSize =
+ getASTContext().getTypeSizeInCharsIfKnown(ArgCAT);
+ Optional<CharUnits> ParmSize = getASTContext().getTypeSizeInCharsIfKnown(CAT);
+ if (ArgSize && ParmSize && *ArgSize < *ParmSize) {
Diag(CallLoc, diag::warn_static_array_too_small)
- << ArgExpr->getSourceRange()
- << (unsigned) ArgCAT->getSize().getZExtValue()
- << (unsigned) CAT->getSize().getZExtValue();
+ << ArgExpr->getSourceRange() << (unsigned)ArgSize->getQuantity()
+ << (unsigned)ParmSize->getQuantity() << 1;
DiagnoseCalleeStaticArrayParam(*this, Param);
}
}
@@ -5236,7 +5360,7 @@ static bool checkArgsForPlaceholders(Sema &S, MultiExprArg args) {
/// FunctionDecl is returned.
/// TODO: Handle pointer return types.
static FunctionDecl *rewriteBuiltinFunctionDecl(Sema *Sema, ASTContext &Context,
- const FunctionDecl *FDecl,
+ FunctionDecl *FDecl,
MultiExprArg ArgExprs) {
QualType DeclType = FDecl->getType();
@@ -5284,7 +5408,7 @@ static FunctionDecl *rewriteBuiltinFunctionDecl(Sema *Sema, ASTContext &Context,
FunctionProtoType::ExtProtoInfo EPI;
QualType OverloadTy = Context.getFunctionType(FT->getReturnType(),
OverloadParams, EPI);
- DeclContext *Parent = Context.getTranslationUnitDecl();
+ DeclContext *Parent = FDecl->getParent();
FunctionDecl *OverloadDecl = FunctionDecl::Create(Context, Parent,
FDecl->getLocation(),
FDecl->getLocation(),
@@ -5415,10 +5539,33 @@ tryImplicitlyCaptureThisIfImplicitMemberFunctionAccessWithDependentArgs(
}
}
-/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
+ExprResult Sema::ActOnCallExpr(Scope *Scope, Expr *Fn, SourceLocation LParenLoc,
+ MultiExprArg ArgExprs, SourceLocation RParenLoc,
+ Expr *ExecConfig) {
+ ExprResult Call =
+ BuildCallExpr(Scope, Fn, LParenLoc, ArgExprs, RParenLoc, ExecConfig);
+ if (Call.isInvalid())
+ return Call;
+
+ // Diagnose uses of the C++20 "ADL-only template-id call" feature in earlier
+ // language modes.
+ if (auto *ULE = dyn_cast<UnresolvedLookupExpr>(Fn)) {
+ if (ULE->hasExplicitTemplateArgs() &&
+ ULE->decls_begin() == ULE->decls_end()) {
+ Diag(Fn->getExprLoc(), getLangOpts().CPlusPlus2a
+ ? diag::warn_cxx17_compat_adl_only_template_id
+ : diag::ext_adl_only_template_id)
+ << ULE->getName();
+ }
+ }
+
+ return Call;
+}
+
+/// BuildCallExpr - Handle a call to Fn with the specified array of arguments.
/// This provides the location of the left/right parens and a list of comma
/// locations.
-ExprResult Sema::ActOnCallExpr(Scope *Scope, Expr *Fn, SourceLocation LParenLoc,
+ExprResult Sema::BuildCallExpr(Scope *Scope, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig, bool IsExecConfig) {
// Since this might be a postfix expression, get rid of ParenListExprs.
@@ -5521,8 +5668,8 @@ ExprResult Sema::ActOnCallExpr(Scope *Scope, Expr *Fn, SourceLocation LParenLoc,
}
}
- if (isa<DeclRefExpr>(NakedFn)) {
- NDecl = cast<DeclRefExpr>(NakedFn)->getDecl();
+ if (auto *DRE = dyn_cast<DeclRefExpr>(NakedFn)) {
+ NDecl = DRE->getDecl();
FunctionDecl *FDecl = dyn_cast<FunctionDecl>(NDecl);
if (FDecl && FDecl->getBuiltinID()) {
@@ -5534,7 +5681,8 @@ ExprResult Sema::ActOnCallExpr(Scope *Scope, Expr *Fn, SourceLocation LParenLoc,
NDecl = FDecl;
Fn = DeclRefExpr::Create(
Context, FDecl->getQualifierLoc(), SourceLocation(), FDecl, false,
- SourceLocation(), FDecl->getType(), Fn->getValueKind(), FDecl);
+ SourceLocation(), FDecl->getType(), Fn->getValueKind(), FDecl,
+ nullptr, DRE->isNonOdrUse());
}
}
} else if (isa<MemberExpr>(NakedFn))
@@ -5646,28 +5794,29 @@ ExprResult Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
// CheckBuiltinFunctionCall below just after creation of the call expression.
const FunctionType *FuncT = nullptr;
if (!BuiltinID || !Context.BuiltinInfo.hasCustomTypechecking(BuiltinID)) {
- retry:
+ retry:
if (const PointerType *PT = Fn->getType()->getAs<PointerType>()) {
// C99 6.5.2.2p1 - "The expression that denotes the called function shall
// have type pointer to function".
FuncT = PT->getPointeeType()->getAs<FunctionType>();
if (!FuncT)
return ExprError(Diag(LParenLoc, diag::err_typecheck_call_not_function)
- << Fn->getType() << Fn->getSourceRange());
+ << Fn->getType() << Fn->getSourceRange());
} else if (const BlockPointerType *BPT =
- Fn->getType()->getAs<BlockPointerType>()) {
+ Fn->getType()->getAs<BlockPointerType>()) {
FuncT = BPT->getPointeeType()->castAs<FunctionType>();
} else {
// Handle calls to expressions of unknown-any type.
if (Fn->getType() == Context.UnknownAnyTy) {
ExprResult rewrite = rebuildUnknownAnyFunction(*this, Fn);
- if (rewrite.isInvalid()) return ExprError();
+ if (rewrite.isInvalid())
+ return ExprError();
Fn = rewrite.get();
goto retry;
}
- return ExprError(Diag(LParenLoc, diag::err_typecheck_call_not_function)
- << Fn->getType() << Fn->getSourceRange());
+ return ExprError(Diag(LParenLoc, diag::err_typecheck_call_not_function)
+ << Fn->getType() << Fn->getSourceRange());
}
}
@@ -5690,18 +5839,36 @@ ExprResult Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
}
if (!getLangOpts().CPlusPlus) {
+ // Forget about the nulled arguments since typo correction
+ // do not handle them well.
+ TheCall->shrinkNumArgs(Args.size());
// C cannot always handle TypoExpr nodes in builtin calls and direct
// function calls as their argument checking don't necessarily handle
// dependent types properly, so make sure any TypoExprs have been
// dealt with.
ExprResult Result = CorrectDelayedTyposInExpr(TheCall);
if (!Result.isUsable()) return ExprError();
+ CallExpr *TheOldCall = TheCall;
TheCall = dyn_cast<CallExpr>(Result.get());
+ bool CorrectedTypos = TheCall != TheOldCall;
if (!TheCall) return Result;
- // TheCall at this point has max(Args.size(), NumParams) arguments,
- // with extra arguments nulled. We don't want to introduce nulled
- // arguments in Args and so we only take the first Args.size() arguments.
- Args = llvm::makeArrayRef(TheCall->getArgs(), Args.size());
+ Args = llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs());
+
+ // A new call expression node was created if some typos were corrected.
+ // However it may not have been constructed with enough storage. In this
+ // case, rebuild the node with enough storage. The waste of space is
+ // immaterial since this only happens when some typos were corrected.
+ if (CorrectedTypos && Args.size() < NumParams) {
+ if (Config)
+ TheCall = CUDAKernelCallExpr::Create(
+ Context, Fn, cast<CallExpr>(Config), Args, ResultTy, VK_RValue,
+ RParenLoc, NumParams);
+ else
+ TheCall = CallExpr::Create(Context, Fn, Args, ResultTy, VK_RValue,
+ RParenLoc, NumParams, UsesADL);
+ }
+ // We can now handle the nulled arguments for the default arguments.
+ TheCall->setNumArgsUnsafe(std::max<unsigned>(Args.size(), NumParams));
}
// Bail out early if calling a builtin with custom type checking.
@@ -5805,6 +5972,8 @@ ExprResult Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
if (CheckFunctionCall(FDecl, TheCall, Proto))
return ExprError();
+ checkFortifiedBuiltinMemoryFunction(FDecl, TheCall);
+
if (BuiltinID)
return CheckBuiltinFunctionCall(FDecl, BuiltinID, TheCall);
} else if (NDecl) {
@@ -5897,7 +6066,7 @@ Sema::BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo,
ILE->setInit(i, ConstantExpr::Create(Context, Init));
}
- Expr *E = new (Context) CompoundLiteralExpr(LParenLoc, TInfo, literalType,
+ auto *E = new (Context) CompoundLiteralExpr(LParenLoc, TInfo, literalType,
VK, LiteralExpr, isFileScope);
if (isFileScope) {
if (!LiteralExpr->isTypeDependent() &&
@@ -5915,6 +6084,19 @@ Sema::BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo,
return ExprError();
}
+ // Compound literals that have automatic storage duration are destroyed at
+ // the end of the scope. Emit diagnostics if it is or contains a C union type
+ // that is non-trivial to destruct.
+ if (!isFileScope)
+ if (E->getType().hasNonTrivialToPrimitiveDestructCUnion())
+ checkNonTrivialCUnion(E->getType(), E->getExprLoc(),
+ NTCUC_CompoundLiteral, NTCUK_Destruct);
+
+ if (E->getType().hasNonTrivialToPrimitiveDefaultInitializeCUnion() ||
+ E->getType().hasNonTrivialToPrimitiveCopyCUnion())
+ checkNonTrivialCUnionInInitializer(E->getInitializer(),
+ E->getInitializer()->getExprLoc());
+
return MaybeBindToTemporary(E);
}
@@ -6031,6 +6213,7 @@ CastKind Sema::PrepareScalarCast(ExprResult &Src, QualType DestTy) {
case Type::STK_Bool:
return CK_FixedPointToBoolean;
case Type::STK_Integral:
+ return CK_FixedPointToIntegral;
case Type::STK_Floating:
case Type::STK_IntegralComplex:
case Type::STK_FloatingComplex:
@@ -6075,10 +6258,7 @@ CastKind Sema::PrepareScalarCast(ExprResult &Src, QualType DestTy) {
case Type::STK_MemberPointer:
llvm_unreachable("member pointer type in C");
case Type::STK_FixedPoint:
- Diag(Src.get()->getExprLoc(),
- diag::err_unimplemented_conversion_with_fixed_point_type)
- << SrcTy;
- return CK_IntegralCast;
+ return CK_IntegralToFixedPoint;
}
llvm_unreachable("Should have returned before this");
@@ -7128,10 +7308,10 @@ QualType Sema::CheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
// GCC compatibility: soften pointer/integer mismatch. Note that
// null pointers have been filtered out by this point.
if (checkPointerIntegerMismatch(*this, LHS, RHS.get(), QuestionLoc,
- /*isIntFirstExpr=*/true))
+ /*IsIntFirstExpr=*/true))
return RHSTy;
if (checkPointerIntegerMismatch(*this, RHS, LHS.get(), QuestionLoc,
- /*isIntFirstExpr=*/false))
+ /*IsIntFirstExpr=*/false))
return LHSTy;
// Emit a better diagnostic if one of the expressions is a null pointer
@@ -7600,9 +7780,9 @@ checkPointerTypesForAssignment(Sema &S, QualType LHSType, QualType RHSType) {
}
if (!lhq.compatiblyIncludes(rhq)) {
- // Treat address-space mismatches as fatal. TODO: address subspaces
+ // Treat address-space mismatches as fatal.
if (!lhq.isAddressSpaceSupersetOf(rhq))
- ConvTy = Sema::IncompatiblePointerDiscardsQualifiers;
+ return Sema::IncompatiblePointerDiscardsQualifiers;
// It's okay to add or remove GC or lifetime qualifiers when converting to
// and from void*.
@@ -7675,8 +7855,22 @@ checkPointerTypesForAssignment(Sema &S, QualType LHSType, QualType RHSType) {
// level of indirection, this must be the issue.
if (isa<PointerType>(lhptee) && isa<PointerType>(rhptee)) {
do {
- lhptee = cast<PointerType>(lhptee)->getPointeeType().getTypePtr();
- rhptee = cast<PointerType>(rhptee)->getPointeeType().getTypePtr();
+ std::tie(lhptee, lhq) =
+ cast<PointerType>(lhptee)->getPointeeType().split().asPair();
+ std::tie(rhptee, rhq) =
+ cast<PointerType>(rhptee)->getPointeeType().split().asPair();
+
+ // Inconsistent address spaces at this point is invalid, even if the
+ // address spaces would be compatible.
+ // FIXME: This doesn't catch address space mismatches for pointers of
+ // different nesting levels, like:
+ // __local int *** a;
+ // int ** b = a;
+ // It's not clear how to actually determine when such pointers are
+ // invalidly incompatible.
+ if (lhq.getAddressSpace() != rhq.getAddressSpace())
+ return Sema::IncompatibleNestedPointerAddressSpaceMismatch;
+
} while (isa<PointerType>(lhptee) && isa<PointerType>(rhptee));
if (lhptee == rhptee)
@@ -8911,7 +9105,7 @@ static void DiagnoseBadDivideOrRemainderValues(Sema& S, ExprResult &LHS,
QualType Sema::CheckMultiplyDivideOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
bool IsCompAssign, bool IsDiv) {
- checkArithmeticNull(*this, LHS, RHS, Loc, /*isCompare=*/false);
+ checkArithmeticNull(*this, LHS, RHS, Loc, /*IsCompare=*/false);
if (LHS.get()->getType()->isVectorType() ||
RHS.get()->getType()->isVectorType())
@@ -8935,7 +9129,7 @@ QualType Sema::CheckMultiplyDivideOperands(ExprResult &LHS, ExprResult &RHS,
QualType Sema::CheckRemainderOperands(
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign) {
- checkArithmeticNull(*this, LHS, RHS, Loc, /*isCompare=*/false);
+ checkArithmeticNull(*this, LHS, RHS, Loc, /*IsCompare=*/false);
if (LHS.get()->getType()->isVectorType() ||
RHS.get()->getType()->isVectorType()) {
@@ -9224,7 +9418,7 @@ static void diagnosePointerIncompatibility(Sema &S, SourceLocation Loc,
QualType Sema::CheckAdditionOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, BinaryOperatorKind Opc,
QualType* CompLHSTy) {
- checkArithmeticNull(*this, LHS, RHS, Loc, /*isCompare=*/false);
+ checkArithmeticNull(*this, LHS, RHS, Loc, /*IsCompare=*/false);
if (LHS.get()->getType()->isVectorType() ||
RHS.get()->getType()->isVectorType()) {
@@ -9318,7 +9512,7 @@ QualType Sema::CheckAdditionOperands(ExprResult &LHS, ExprResult &RHS,
QualType Sema::CheckSubtractionOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
QualType* CompLHSTy) {
- checkArithmeticNull(*this, LHS, RHS, Loc, /*isCompare=*/false);
+ checkArithmeticNull(*this, LHS, RHS, Loc, /*IsCompare=*/false);
if (LHS.get()->getType()->isVectorType() ||
RHS.get()->getType()->isVectorType()) {
@@ -9464,9 +9658,11 @@ static void DiagnoseBadShiftValues(Sema& S, ExprResult &LHS, ExprResult &RHS,
return;
// When left shifting an ICE which is signed, we can check for overflow which
- // according to C++ has undefined behavior ([expr.shift] 5.8/2). Unsigned
- // integers have defined behavior modulo one more than the maximum value
- // representable in the result type, so never warn for those.
+ // according to C++ standards prior to C++2a has undefined behavior
+ // ([expr.shift] 5.8/2). Unsigned integers have defined behavior modulo one
+ // more than the maximum value representable in the result type, so never
+ // warn for those. (FIXME: Unsigned left-shift overflow in a constant
+ // expression is still probably a bug.)
Expr::EvalResult LHSResult;
if (LHS.get()->isValueDependent() ||
LHSType->hasUnsignedIntegerRepresentation() ||
@@ -9475,8 +9671,9 @@ static void DiagnoseBadShiftValues(Sema& S, ExprResult &LHS, ExprResult &RHS,
llvm::APSInt Left = LHSResult.Val.getInt();
// If LHS does not have a signed type and non-negative value
- // then, the behavior is undefined. Warn about it.
- if (Left.isNegative() && !S.getLangOpts().isSignedOverflowDefined()) {
+ // then, the behavior is undefined before C++2a. Warn about it.
+ if (Left.isNegative() && !S.getLangOpts().isSignedOverflowDefined() &&
+ !S.getLangOpts().CPlusPlus2a) {
S.DiagRuntimeBehavior(Loc, LHS.get(),
S.PDiag(diag::warn_shift_lhs_negative)
<< LHS.get()->getSourceRange());
@@ -9603,7 +9800,7 @@ static QualType checkVectorShift(Sema &S, ExprResult &LHS, ExprResult &RHS,
QualType Sema::CheckShiftOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, BinaryOperatorKind Opc,
bool IsCompAssign) {
- checkArithmeticNull(*this, LHS, RHS, Loc, /*isCompare=*/false);
+ checkArithmeticNull(*this, LHS, RHS, Loc, /*IsCompare=*/false);
// Vector shifts promote their scalar inputs to vector type.
if (LHS.get()->getType()->isVectorType() ||
@@ -9772,7 +9969,7 @@ static bool hasIsEqualMethod(Sema &S, const Expr *LHS, const Expr *RHS) {
Selector IsEqualSel = S.NSAPIObj->getIsEqualSelector();
ObjCMethodDecl *Method = S.LookupMethodInObjectType(IsEqualSel,
InterfaceType,
- /*instance=*/true);
+ /*IsInstance=*/true);
if (!Method) {
if (Type->isObjCIdType()) {
// For 'id', just check the global pool.
@@ -9781,7 +9978,7 @@ static bool hasIsEqualMethod(Sema &S, const Expr *LHS, const Expr *RHS) {
} else {
// Check protocols.
Method = S.LookupMethodInQualifiedType(IsEqualSel, Type,
- /*instance=*/true);
+ /*IsInstance=*/true);
}
}
@@ -10281,7 +10478,7 @@ QualType Sema::CheckCompareOperands(ExprResult &LHS, ExprResult &RHS,
return QualType();
}
- checkArithmeticNull(*this, LHS, RHS, Loc, /*isCompare=*/true);
+ checkArithmeticNull(*this, LHS, RHS, Loc, /*IsCompare=*/true);
// Handle vector comparisons separately.
if (LHS.get()->getType()->isVectorType() ||
@@ -10663,7 +10860,7 @@ QualType Sema::CheckCompareOperands(ExprResult &LHS, ExprResult &RHS,
return computeResultTy();
}
- if (getLangOpts().OpenCLVersion >= 200) {
+ if (getLangOpts().OpenCLVersion >= 200 || getLangOpts().OpenCLCPlusPlus) {
if (LHSType->isClkEventT() && RHSType->isClkEventT()) {
return computeResultTy();
}
@@ -10776,7 +10973,7 @@ QualType Sema::CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
if (vType.isNull())
return InvalidOperands(Loc, LHS, RHS);
if (getLangOpts().OpenCL && getLangOpts().OpenCLVersion < 120 &&
- vType->hasFloatingRepresentation())
+ !getLangOpts().OpenCLCPlusPlus && vType->hasFloatingRepresentation())
return InvalidOperands(Loc, LHS, RHS);
// FIXME: The check for C++ here is for GCC compatibility. GCC rejects the
// usage of the logical operators && and || with vectors in C. This
@@ -10791,7 +10988,7 @@ QualType Sema::CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
inline QualType Sema::CheckBitwiseOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
BinaryOperatorKind Opc) {
- checkArithmeticNull(*this, LHS, RHS, Loc, /*isCompare=*/false);
+ checkArithmeticNull(*this, LHS, RHS, Loc, /*IsCompare=*/false);
bool IsCompAssign =
Opc == BO_AndAssign || Opc == BO_OrAssign || Opc == BO_XorAssign;
@@ -12311,6 +12508,14 @@ ExprResult Sema::CreateBuiltinBinOp(SourceLocation OpLoc,
}
}
+ // Diagnose operations on the unsupported types for OpenMP device compilation.
+ if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice) {
+ if (Opc != BO_Assign && Opc != BO_Comma) {
+ checkOpenMPDeviceExpr(LHSExpr);
+ checkOpenMPDeviceExpr(RHSExpr);
+ }
+ }
+
switch (Opc) {
case BO_Assign:
ResultTy = CheckAssignmentOperands(LHS.get(), RHS, OpLoc, QualType());
@@ -12322,6 +12527,29 @@ ExprResult Sema::CreateBuiltinBinOp(SourceLocation OpLoc,
if (!ResultTy.isNull()) {
DiagnoseSelfAssignment(*this, LHS.get(), RHS.get(), OpLoc, true);
DiagnoseSelfMove(LHS.get(), RHS.get(), OpLoc);
+
+ // Avoid copying a block to the heap if the block is assigned to a local
+ // auto variable that is declared in the same scope as the block. This
+ // optimization is unsafe if the local variable is declared in an outer
+ // scope. For example:
+ //
+ // BlockTy b;
+ // {
+ // b = ^{...};
+ // }
+ // // It is unsafe to invoke the block here if it wasn't copied to the
+ // // heap.
+ // b();
+
+ if (auto *BE = dyn_cast<BlockExpr>(RHS.get()->IgnoreParens()))
+ if (auto *DRE = dyn_cast<DeclRefExpr>(LHS.get()->IgnoreParens()))
+ if (auto *VD = dyn_cast<VarDecl>(DRE->getDecl()))
+ if (VD->hasLocalStorage() && getCurScope()->isDeclScope(VD))
+ BE->getBlockDecl()->setCanAvoidCopyToHeap();
+
+ if (LHS.get()->getType().hasNonTrivialToPrimitiveCopyCUnion())
+ checkNonTrivialCUnion(LHS.get()->getType(), LHS.get()->getExprLoc(),
+ NTCUC_Assignment, NTCUK_Copy);
}
RecordModifiableNonNullParam(*this, LHS.get());
break;
@@ -12887,6 +13115,13 @@ ExprResult Sema::CreateBuiltinUnaryOp(SourceLocation OpLoc,
<< Input.get()->getSourceRange());
}
}
+ // Diagnose operations on the unsupported types for OpenMP device compilation.
+ if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice) {
+ if (UnaryOperator::isIncrementDecrementOp(Opc) ||
+ UnaryOperator::isArithmeticOp(Opc))
+ checkOpenMPDeviceExpr(InputExpr);
+ }
+
switch (Opc) {
case UO_PreInc:
case UO_PreDec:
@@ -13005,7 +13240,8 @@ ExprResult Sema::CreateBuiltinUnaryOp(SourceLocation OpLoc,
}
} else if (resultType->isExtVectorType()) {
if (Context.getLangOpts().OpenCL &&
- Context.getLangOpts().OpenCLVersion < 120) {
+ Context.getLangOpts().OpenCLVersion < 120 &&
+ !Context.getLangOpts().OpenCLCPlusPlus) {
// OpenCL v1.1 6.3.h: The logical operator not (!) does not
// operate on vector float types.
QualType T = resultType->getAs<ExtVectorType>()->getElementType();
@@ -13180,29 +13416,6 @@ ExprResult Sema::ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
Context.getPointerType(Context.VoidTy));
}
-/// Given the last statement in a statement-expression, check whether
-/// the result is a producing expression (like a call to an
-/// ns_returns_retained function) and, if so, rebuild it to hoist the
-/// release out of the full-expression. Otherwise, return null.
-/// Cannot fail.
-static Expr *maybeRebuildARCConsumingStmt(Stmt *Statement) {
- // Should always be wrapped with one of these.
- ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(Statement);
- if (!cleanups) return nullptr;
-
- ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(cleanups->getSubExpr());
- if (!cast || cast->getCastKind() != CK_ARCConsumeObject)
- return nullptr;
-
- // Splice out the cast. This shouldn't modify any interesting
- // features of the statement.
- Expr *producer = cast->getSubExpr();
- assert(producer->getType() == cast->getType());
- assert(producer->getValueKind() == cast->getValueKind());
- cleanups->setSubExpr(producer);
- return cleanups;
-}
-
void Sema::ActOnStartStmtExpr() {
PushExpressionEvaluationContext(ExprEvalContexts.back().Context);
}
@@ -13236,47 +13449,12 @@ Sema::ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
QualType Ty = Context.VoidTy;
bool StmtExprMayBindToTemp = false;
if (!Compound->body_empty()) {
- Stmt *LastStmt = Compound->body_back();
- LabelStmt *LastLabelStmt = nullptr;
- // If LastStmt is a label, skip down through into the body.
- while (LabelStmt *Label = dyn_cast<LabelStmt>(LastStmt)) {
- LastLabelStmt = Label;
- LastStmt = Label->getSubStmt();
- }
-
- if (Expr *LastE = dyn_cast<Expr>(LastStmt)) {
- // Do function/array conversion on the last expression, but not
- // lvalue-to-rvalue. However, initialize an unqualified type.
- ExprResult LastExpr = DefaultFunctionArrayConversion(LastE);
- if (LastExpr.isInvalid())
- return ExprError();
- Ty = LastExpr.get()->getType().getUnqualifiedType();
-
- if (!Ty->isDependentType() && !LastExpr.get()->isTypeDependent()) {
- // In ARC, if the final expression ends in a consume, splice
- // the consume out and bind it later. In the alternate case
- // (when dealing with a retainable type), the result
- // initialization will create a produce. In both cases the
- // result will be +1, and we'll need to balance that out with
- // a bind.
- if (Expr *rebuiltLastStmt
- = maybeRebuildARCConsumingStmt(LastExpr.get())) {
- LastExpr = rebuiltLastStmt;
- } else {
- LastExpr = PerformCopyInitialization(
- InitializedEntity::InitializeStmtExprResult(LPLoc, Ty),
- SourceLocation(), LastExpr);
- }
-
- if (LastExpr.isInvalid())
- return ExprError();
- if (LastExpr.get() != nullptr) {
- if (!LastLabelStmt)
- Compound->setLastStmt(LastExpr.get());
- else
- LastLabelStmt->setSubStmt(LastExpr.get());
- StmtExprMayBindToTemp = true;
- }
+ // For GCC compatibility we get the last Stmt excluding trailing NullStmts.
+ if (const auto *LastStmt =
+ dyn_cast<ValueStmt>(Compound->getStmtExprResult())) {
+ if (const Expr *Value = LastStmt->getExprStmt()) {
+ StmtExprMayBindToTemp = true;
+ Ty = Value->getType();
}
}
}
@@ -13289,6 +13467,37 @@ Sema::ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
return ResStmtExpr;
}
+ExprResult Sema::ActOnStmtExprResult(ExprResult ER) {
+ if (ER.isInvalid())
+ return ExprError();
+
+ // Do function/array conversion on the last expression, but not
+ // lvalue-to-rvalue. However, initialize an unqualified type.
+ ER = DefaultFunctionArrayConversion(ER.get());
+ if (ER.isInvalid())
+ return ExprError();
+ Expr *E = ER.get();
+
+ if (E->isTypeDependent())
+ return E;
+
+ // In ARC, if the final expression ends in a consume, splice
+ // the consume out and bind it later. In the alternate case
+ // (when dealing with a retainable type), the result
+ // initialization will create a produce. In both cases the
+ // result will be +1, and we'll need to balance that out with
+ // a bind.
+ auto *Cast = dyn_cast<ImplicitCastExpr>(E);
+ if (Cast && Cast->getCastKind() == CK_ARCConsumeObject)
+ return Cast->getSubExpr();
+
+ // FIXME: Provide a better location for the initialization.
+ return PerformCopyInitialization(
+ InitializedEntity::InitializeStmtExprResult(
+ E->getBeginLoc(), E->getType().getUnqualifiedType()),
+ SourceLocation(), E);
+}
+
ExprResult Sema::BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
TypeSourceInfo *TInfo,
ArrayRef<OffsetOfComponent> Components,
@@ -13573,8 +13782,8 @@ void Sema::ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
// Look for an explicit signature in that function type.
FunctionProtoTypeLoc ExplicitSignature;
- if ((ExplicitSignature =
- Sig->getTypeLoc().getAsAdjusted<FunctionProtoTypeLoc>())) {
+ if ((ExplicitSignature = Sig->getTypeLoc()
+ .getAsAdjusted<FunctionProtoTypeLoc>())) {
// Check whether that explicit signature was synthesized by
// GetTypeForDeclarator. If so, don't save that as part of the
@@ -13691,8 +13900,6 @@ ExprResult Sema::ActOnBlockStmtExpr(SourceLocation CaretLoc,
if (BSI->HasImplicitReturnType)
deduceClosureReturnType(*BSI);
- PopDeclContext();
-
QualType RetTy = Context.VoidTy;
if (!BSI->ReturnType.isNull())
RetTy = BSI->ReturnType;
@@ -13700,18 +13907,6 @@ ExprResult Sema::ActOnBlockStmtExpr(SourceLocation CaretLoc,
bool NoReturn = BD->hasAttr<NoReturnAttr>();
QualType BlockTy;
- // Set the captured variables on the block.
- // FIXME: Share capture structure between BlockDecl and CapturingScopeInfo!
- SmallVector<BlockDecl::Capture, 4> Captures;
- for (Capture &Cap : BSI->Captures) {
- if (Cap.isThisCapture())
- continue;
- BlockDecl::Capture NewCap(Cap.getVariable(), Cap.isBlockCapture(),
- Cap.isNested(), Cap.getInitExpr());
- Captures.push_back(NewCap);
- }
- BD->setCaptures(Context, Captures, BSI->CXXThisCaptureIndex != 0);
-
// If the user wrote a function type in some form, try to use that.
if (!BSI->FunctionType.isNull()) {
const FunctionType *FTy = BSI->FunctionType->getAs<FunctionType>();
@@ -13767,9 +13962,85 @@ ExprResult Sema::ActOnBlockStmtExpr(SourceLocation CaretLoc,
!BD->isDependentContext())
computeNRVO(Body, BSI);
- BlockExpr *Result = new (Context) BlockExpr(BD, BlockTy);
+ if (RetTy.hasNonTrivialToPrimitiveDestructCUnion() ||
+ RetTy.hasNonTrivialToPrimitiveCopyCUnion())
+ checkNonTrivialCUnion(RetTy, BD->getCaretLocation(), NTCUC_FunctionReturn,
+ NTCUK_Destruct|NTCUK_Copy);
+
+ PopDeclContext();
+
+ // Pop the block scope now but keep it alive to the end of this function.
AnalysisBasedWarnings::Policy WP = AnalysisWarnings.getDefaultPolicy();
- PopFunctionScopeInfo(&WP, Result->getBlockDecl(), Result);
+ PoppedFunctionScopePtr ScopeRAII = PopFunctionScopeInfo(&WP, BD, BlockTy);
+
+ // Set the captured variables on the block.
+ SmallVector<BlockDecl::Capture, 4> Captures;
+ for (Capture &Cap : BSI->Captures) {
+ if (Cap.isInvalid() || Cap.isThisCapture())
+ continue;
+
+ VarDecl *Var = Cap.getVariable();
+ Expr *CopyExpr = nullptr;
+ if (getLangOpts().CPlusPlus && Cap.isCopyCapture()) {
+ if (const RecordType *Record =
+ Cap.getCaptureType()->getAs<RecordType>()) {
+ // The capture logic needs the destructor, so make sure we mark it.
+ // Usually this is unnecessary because most local variables have
+ // their destructors marked at declaration time, but parameters are
+ // an exception because it's technically only the call site that
+ // actually requires the destructor.
+ if (isa<ParmVarDecl>(Var))
+ FinalizeVarWithDestructor(Var, Record);
+
+ // Enter a separate potentially-evaluated context while building block
+ // initializers to isolate their cleanups from those of the block
+ // itself.
+ // FIXME: Is this appropriate even when the block itself occurs in an
+ // unevaluated operand?
+ EnterExpressionEvaluationContext EvalContext(
+ *this, ExpressionEvaluationContext::PotentiallyEvaluated);
+
+ SourceLocation Loc = Cap.getLocation();
+
+ ExprResult Result = BuildDeclarationNameExpr(
+ CXXScopeSpec(), DeclarationNameInfo(Var->getDeclName(), Loc), Var);
+
+ // According to the blocks spec, the capture of a variable from
+ // the stack requires a const copy constructor. This is not true
+ // of the copy/move done to move a __block variable to the heap.
+ if (!Result.isInvalid() &&
+ !Result.get()->getType().isConstQualified()) {
+ Result = ImpCastExprToType(Result.get(),
+ Result.get()->getType().withConst(),
+ CK_NoOp, VK_LValue);
+ }
+
+ if (!Result.isInvalid()) {
+ Result = PerformCopyInitialization(
+ InitializedEntity::InitializeBlock(Var->getLocation(),
+ Cap.getCaptureType(), false),
+ Loc, Result.get());
+ }
+
+ // Build a full-expression copy expression if initialization
+ // succeeded and used a non-trivial constructor. Recover from
+ // errors by pretending that the copy isn't necessary.
+ if (!Result.isInvalid() &&
+ !cast<CXXConstructExpr>(Result.get())->getConstructor()
+ ->isTrivial()) {
+ Result = MaybeCreateExprWithCleanups(Result);
+ CopyExpr = Result.get();
+ }
+ }
+ }
+
+ BlockDecl::Capture NewCap(Var, Cap.isBlockCapture(), Cap.isNested(),
+ CopyExpr);
+ Captures.push_back(NewCap);
+ }
+ BD->setCaptures(Context, Captures, BSI->CXXThisCaptureIndex != 0);
+
+ BlockExpr *Result = new (Context) BlockExpr(BD, BlockTy);
// If the block isn't obviously global, i.e. it captures anything at
// all, then we need to do a few things in the surrounding context:
@@ -13817,6 +14088,11 @@ ExprResult Sema::BuildVAArgExpr(SourceLocation BuiltinLoc,
}
}
+ // NVPTX does not support va_arg expression.
+ if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice &&
+ Context.getTargetInfo().getTriple().isNVPTX())
+ targetDiag(E->getBeginLoc(), diag::err_va_arg_in_device);
+
// It might be a __builtin_ms_va_list. (But don't ever mark a va_arg()
// as Microsoft ABI on an actual Microsoft platform, where
// __builtin_ms_va_list and __builtin_va_list are the same.)
@@ -13929,6 +14205,20 @@ ExprResult Sema::ActOnGNUNullExpr(SourceLocation TokenLoc) {
return new (Context) GNUNullExpr(Ty, TokenLoc);
}
+ExprResult Sema::ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind,
+ SourceLocation BuiltinLoc,
+ SourceLocation RPLoc) {
+ return BuildSourceLocExpr(Kind, BuiltinLoc, RPLoc, CurContext);
+}
+
+ExprResult Sema::BuildSourceLocExpr(SourceLocExpr::IdentKind Kind,
+ SourceLocation BuiltinLoc,
+ SourceLocation RPLoc,
+ DeclContext *ParentContext) {
+ return new (Context)
+ SourceLocExpr(Context, Kind, BuiltinLoc, RPLoc, ParentContext);
+}
+
bool Sema::ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&Exp,
bool Diagnose) {
if (!getLangOpts().ObjC)
@@ -14079,6 +14369,9 @@ bool Sema::DiagnoseAssignmentResult(AssignConvertType ConvTy,
case IncompatibleNestedPointerQualifiers:
DiagKind = diag::ext_nested_pointer_qualifier_mismatch;
break;
+ case IncompatibleNestedPointerAddressSpaceMismatch:
+ DiagKind = diag::err_typecheck_incompatible_nested_address_space;
+ break;
case IntToBlockPointer:
DiagKind = diag::err_int_to_block_pointer;
break;
@@ -14300,14 +14593,13 @@ Sema::VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
return ExprError();
}
- if (!isa<ConstantExpr>(E))
- E = ConstantExpr::Create(Context, E);
-
// Circumvent ICE checking in C++11 to avoid evaluating the expression twice
// in the non-ICE case.
if (!getLangOpts().CPlusPlus11 && E->isIntegerConstantExpr(Context)) {
if (Result)
*Result = E->EvaluateKnownConstIntCheckOverflow(Context);
+ if (!isa<ConstantExpr>(E))
+ E = ConstantExpr::Create(Context, E);
return E;
}
@@ -14317,8 +14609,12 @@ Sema::VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
// Try to evaluate the expression, and produce diagnostics explaining why it's
// not a constant expression as a side-effect.
- bool Folded = E->EvaluateAsRValue(EvalResult, Context) &&
- EvalResult.Val.isInt() && !EvalResult.HasSideEffects;
+ bool Folded =
+ E->EvaluateAsRValue(EvalResult, Context, /*isConstantContext*/ true) &&
+ EvalResult.Val.isInt() && !EvalResult.HasSideEffects;
+
+ if (!isa<ConstantExpr>(E))
+ E = ConstantExpr::Create(Context, E, EvalResult.Val);
// In C++11, we can rely on diagnostics being produced for any expression
// which is not a constant expression. If no diagnostics were produced, then
@@ -14368,14 +14664,7 @@ namespace {
// Make sure we redo semantic analysis
bool AlwaysRebuild() { return true; }
-
- // Make sure we handle LabelStmts correctly.
- // FIXME: This does the right thing, but maybe we need a more general
- // fix to TreeTransform?
- StmtResult TransformLabelStmt(LabelStmt *S) {
- S->getDecl()->setStmt(nullptr);
- return BaseTransform::TransformLabelStmt(S);
- }
+ bool ReplacingOriginal() { return true; }
// We need to special-case DeclRefExprs referring to FieldDecls which
// are not part of a member pointer formation; normal TreeTransforming
@@ -14402,9 +14691,12 @@ namespace {
return BaseTransform::TransformUnaryOperator(E);
}
- ExprResult TransformLambdaExpr(LambdaExpr *E) {
- // Lambdas never need to be transformed.
- return E;
+ // The body of a lambda-expression is in a separate expression evaluation
+ // context so never needs to be transformed.
+ // FIXME: Ideally we wouldn't transform the closure type either, and would
+ // just recreate the capture expressions and lambda expression.
+ StmtResult TransformLambdaBody(LambdaExpr *E, Stmt *Body) {
+ return SkipLambdaBody(E, Body);
}
};
}
@@ -14512,13 +14804,6 @@ void Sema::PopExpressionEvaluationContext() {
for (const auto *L : Rec.Lambdas)
Diag(L->getBeginLoc(), D);
- } else {
- // Mark the capture expressions odr-used. This was deferred
- // during lambda expression creation.
- for (auto *Lambda : Rec.Lambdas) {
- for (auto *C : Lambda->capture_inits())
- MarkDeclarationsReferencedInExpr(C);
- }
}
}
@@ -14566,55 +14851,155 @@ ExprResult Sema::HandleExprEvaluationContextForTypeof(Expr *E) {
return TransformToPotentiallyEvaluated(E);
}
-/// Are we within a context in which some evaluation could be performed (be it
-/// constant evaluation or runtime evaluation)? Sadly, this notion is not quite
-/// captured by C++'s idea of an "unevaluated context".
-static bool isEvaluatableContext(Sema &SemaRef) {
+/// Are we in a context that is potentially constant evaluated per C++20
+/// [expr.const]p12?
+static bool isPotentiallyConstantEvaluatedContext(Sema &SemaRef) {
+ /// C++2a [expr.const]p12:
+ // An expression or conversion is potentially constant evaluated if it is
switch (SemaRef.ExprEvalContexts.back().Context) {
- case Sema::ExpressionEvaluationContext::Unevaluated:
- case Sema::ExpressionEvaluationContext::UnevaluatedAbstract:
- // Expressions in this context are never evaluated.
- return false;
-
- case Sema::ExpressionEvaluationContext::UnevaluatedList:
case Sema::ExpressionEvaluationContext::ConstantEvaluated:
+ // -- a manifestly constant-evaluated expression,
case Sema::ExpressionEvaluationContext::PotentiallyEvaluated:
+ case Sema::ExpressionEvaluationContext::PotentiallyEvaluatedIfUsed:
case Sema::ExpressionEvaluationContext::DiscardedStatement:
- // Expressions in this context could be evaluated.
+ // -- a potentially-evaluated expression,
+ case Sema::ExpressionEvaluationContext::UnevaluatedList:
+ // -- an immediate subexpression of a braced-init-list,
+
+ // -- [FIXME] an expression of the form & cast-expression that occurs
+ // within a templated entity
+ // -- a subexpression of one of the above that is not a subexpression of
+ // a nested unevaluated operand.
return true;
- case Sema::ExpressionEvaluationContext::PotentiallyEvaluatedIfUsed:
- // Referenced declarations will only be used if the construct in the
- // containing expression is used, at which point we'll be given another
- // turn to mark them.
+ case Sema::ExpressionEvaluationContext::Unevaluated:
+ case Sema::ExpressionEvaluationContext::UnevaluatedAbstract:
+ // Expressions in this context are never evaluated.
return false;
}
llvm_unreachable("Invalid context");
}
+/// Return true if this function has a calling convention that requires mangling
+/// in the size of the parameter pack.
+static bool funcHasParameterSizeMangling(Sema &S, FunctionDecl *FD) {
+ // These manglings don't do anything on non-Windows or non-x86 platforms, so
+ // we don't need parameter type sizes.
+ const llvm::Triple &TT = S.Context.getTargetInfo().getTriple();
+ if (!TT.isOSWindows() || (TT.getArch() != llvm::Triple::x86 &&
+ TT.getArch() != llvm::Triple::x86_64))
+ return false;
+
+ // If this is C++ and this isn't an extern "C" function, parameters do not
+ // need to be complete. In this case, C++ mangling will apply, which doesn't
+ // use the size of the parameters.
+ if (S.getLangOpts().CPlusPlus && !FD->isExternC())
+ return false;
+
+ // Stdcall, fastcall, and vectorcall need this special treatment.
+ CallingConv CC = FD->getType()->castAs<FunctionType>()->getCallConv();
+ switch (CC) {
+ case CC_X86StdCall:
+ case CC_X86FastCall:
+ case CC_X86VectorCall:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+/// Require that all of the parameter types of function be complete. Normally,
+/// parameter types are only required to be complete when a function is called
+/// or defined, but to mangle functions with certain calling conventions, the
+/// mangler needs to know the size of the parameter list. In this situation,
+/// MSVC doesn't emit an error or instantiate templates. Instead, MSVC mangles
+/// the function as _foo@0, i.e. zero bytes of parameters, which will usually
+/// result in a linker error. Clang doesn't implement this behavior, and instead
+/// attempts to error at compile time.
+static void CheckCompleteParameterTypesForMangler(Sema &S, FunctionDecl *FD,
+ SourceLocation Loc) {
+ class ParamIncompleteTypeDiagnoser : public Sema::TypeDiagnoser {
+ FunctionDecl *FD;
+ ParmVarDecl *Param;
+
+ public:
+ ParamIncompleteTypeDiagnoser(FunctionDecl *FD, ParmVarDecl *Param)
+ : FD(FD), Param(Param) {}
+
+ void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
+ CallingConv CC = FD->getType()->castAs<FunctionType>()->getCallConv();
+ StringRef CCName;
+ switch (CC) {
+ case CC_X86StdCall:
+ CCName = "stdcall";
+ break;
+ case CC_X86FastCall:
+ CCName = "fastcall";
+ break;
+ case CC_X86VectorCall:
+ CCName = "vectorcall";
+ break;
+ default:
+ llvm_unreachable("CC does not need mangling");
+ }
+
+ S.Diag(Loc, diag::err_cconv_incomplete_param_type)
+ << Param->getDeclName() << FD->getDeclName() << CCName;
+ }
+ };
+
+ for (ParmVarDecl *Param : FD->parameters()) {
+ ParamIncompleteTypeDiagnoser Diagnoser(FD, Param);
+ S.RequireCompleteType(Loc, Param->getType(), Diagnoser);
+ }
+}
+
+namespace {
+enum class OdrUseContext {
+ /// Declarations in this context are not odr-used.
+ None,
+ /// Declarations in this context are formally odr-used, but this is a
+ /// dependent context.
+ Dependent,
+ /// Declarations in this context are odr-used but not actually used (yet).
+ FormallyOdrUsed,
+ /// Declarations in this context are used.
+ Used
+};
+}
+
/// Are we within a context in which references to resolved functions or to
/// variables result in odr-use?
-static bool isOdrUseContext(Sema &SemaRef, bool SkipDependentUses = true) {
- // An expression in a template is not really an expression until it's been
- // instantiated, so it doesn't trigger odr-use.
- if (SkipDependentUses && SemaRef.CurContext->isDependentContext())
- return false;
+static OdrUseContext isOdrUseContext(Sema &SemaRef) {
+ OdrUseContext Result;
switch (SemaRef.ExprEvalContexts.back().Context) {
case Sema::ExpressionEvaluationContext::Unevaluated:
case Sema::ExpressionEvaluationContext::UnevaluatedList:
case Sema::ExpressionEvaluationContext::UnevaluatedAbstract:
- case Sema::ExpressionEvaluationContext::DiscardedStatement:
- return false;
+ return OdrUseContext::None;
case Sema::ExpressionEvaluationContext::ConstantEvaluated:
case Sema::ExpressionEvaluationContext::PotentiallyEvaluated:
- return true;
+ Result = OdrUseContext::Used;
+ break;
+
+ case Sema::ExpressionEvaluationContext::DiscardedStatement:
+ Result = OdrUseContext::FormallyOdrUsed;
+ break;
case Sema::ExpressionEvaluationContext::PotentiallyEvaluatedIfUsed:
- return false;
+ // A default argument formally results in odr-use, but doesn't actually
+ // result in a use in any real sense until it itself is used.
+ Result = OdrUseContext::FormallyOdrUsed;
+ break;
}
- llvm_unreachable("Invalid context");
+
+ if (SemaRef.CurContext->isDependentContext())
+ return OdrUseContext::Dependent;
+
+ return Result;
}
static bool isImplicitlyDefinableConstexprFunction(FunctionDecl *Func) {
@@ -14631,6 +15016,10 @@ void Sema::MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
Func->setReferenced();
+ // Recursive functions aren't really used until they're used from some other
+ // context.
+ bool IsRecursiveCall = CurContext == Func;
+
// C++11 [basic.def.odr]p3:
// A function whose name appears as a potentially-evaluated expression is
// odr-used if it is the unique lookup result or the selected member of a
@@ -14638,7 +15027,18 @@ void Sema::MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
//
// We (incorrectly) mark overload resolution as an unevaluated context, so we
// can just check that here.
- bool OdrUse = MightBeOdrUse && isOdrUseContext(*this);
+ OdrUseContext OdrUse =
+ MightBeOdrUse ? isOdrUseContext(*this) : OdrUseContext::None;
+ if (IsRecursiveCall && OdrUse == OdrUseContext::Used)
+ OdrUse = OdrUseContext::FormallyOdrUsed;
+
+ // C++20 [expr.const]p12:
+ // A function [...] is needed for constant evaluation if it is [...] a
+ // constexpr function that is named by an expression that is potentially
+ // constant evaluated
+ bool NeededForConstantEvaluation =
+ isPotentiallyConstantEvaluatedContext(*this) &&
+ isImplicitlyDefinableConstexprFunction(Func);
// Determine whether we require a function definition to exist, per
// C++11 [temp.inst]p3:
@@ -14646,12 +15046,23 @@ void Sema::MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
// instantiated or explicitly specialized, the function template
// specialization is implicitly instantiated when the specialization is
// referenced in a context that requires a function definition to exist.
+ // C++20 [temp.inst]p7:
+ // The existence of a definition of a [...] function is considered to
+ // affect the semantics of the program if the [...] function is needed for
+ // constant evaluation by an expression
+ // C++20 [basic.def.odr]p10:
+ // Every program shall contain exactly one definition of every non-inline
+ // function or variable that is odr-used in that program outside of a
+ // discarded statement
+ // C++20 [special]p1:
+ // The implementation will implicitly define [defaulted special members]
+ // if they are odr-used or needed for constant evaluation.
//
- // That is either when this is an odr-use, or when a usage of a constexpr
- // function occurs within an evaluatable context.
- bool NeedDefinition =
- OdrUse || (isEvaluatableContext(*this) &&
- isImplicitlyDefinableConstexprFunction(Func));
+ // Note that we skip the implicit instantiation of templates that are only
+ // used in unused default arguments or by recursive calls to themselves.
+ // This is formally non-conforming, but seems reasonable in practice.
+ bool NeedDefinition = !IsRecursiveCall && (OdrUse == OdrUseContext::Used ||
+ NeededForConstantEvaluation);
// C++14 [temp.expl.spec]p6:
// If a template [...] is explicitly specialized then that specialization
@@ -14676,123 +15087,168 @@ void Sema::MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
if (FPT && isUnresolvedExceptionSpec(FPT->getExceptionSpecType()))
ResolveExceptionSpec(Loc, FPT);
- // If we don't need to mark the function as used, and we don't need to
- // try to provide a definition, there's nothing more to do.
- if ((Func->isUsed(/*CheckUsedAttr=*/false) || !OdrUse) &&
- (!NeedDefinition || Func->getBody()))
- return;
-
- // Note that this declaration has been used.
- if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(Func)) {
- Constructor = cast<CXXConstructorDecl>(Constructor->getFirstDecl());
- if (Constructor->isDefaulted() && !Constructor->isDeleted()) {
- if (Constructor->isDefaultConstructor()) {
- if (Constructor->isTrivial() && !Constructor->hasAttr<DLLExportAttr>())
+ if (getLangOpts().CUDA)
+ CheckCUDACall(Loc, Func);
+
+ // If we need a definition, try to create one.
+ if (NeedDefinition && !Func->getBody()) {
+ if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(Func)) {
+ Constructor = cast<CXXConstructorDecl>(Constructor->getFirstDecl());
+ if (Constructor->isDefaulted() && !Constructor->isDeleted()) {
+ if (Constructor->isDefaultConstructor()) {
+ if (Constructor->isTrivial() &&
+ !Constructor->hasAttr<DLLExportAttr>())
+ return;
+ DefineImplicitDefaultConstructor(Loc, Constructor);
+ } else if (Constructor->isCopyConstructor()) {
+ DefineImplicitCopyConstructor(Loc, Constructor);
+ } else if (Constructor->isMoveConstructor()) {
+ DefineImplicitMoveConstructor(Loc, Constructor);
+ }
+ } else if (Constructor->getInheritedConstructor()) {
+ DefineInheritingConstructor(Loc, Constructor);
+ }
+ } else if (CXXDestructorDecl *Destructor =
+ dyn_cast<CXXDestructorDecl>(Func)) {
+ Destructor = cast<CXXDestructorDecl>(Destructor->getFirstDecl());
+ if (Destructor->isDefaulted() && !Destructor->isDeleted()) {
+ if (Destructor->isTrivial() && !Destructor->hasAttr<DLLExportAttr>())
return;
- DefineImplicitDefaultConstructor(Loc, Constructor);
- } else if (Constructor->isCopyConstructor()) {
- DefineImplicitCopyConstructor(Loc, Constructor);
- } else if (Constructor->isMoveConstructor()) {
- DefineImplicitMoveConstructor(Loc, Constructor);
+ DefineImplicitDestructor(Loc, Destructor);
}
- } else if (Constructor->getInheritedConstructor()) {
- DefineInheritingConstructor(Loc, Constructor);
- }
- } else if (CXXDestructorDecl *Destructor =
- dyn_cast<CXXDestructorDecl>(Func)) {
- Destructor = cast<CXXDestructorDecl>(Destructor->getFirstDecl());
- if (Destructor->isDefaulted() && !Destructor->isDeleted()) {
- if (Destructor->isTrivial() && !Destructor->hasAttr<DLLExportAttr>())
- return;
- DefineImplicitDestructor(Loc, Destructor);
- }
- if (Destructor->isVirtual() && getLangOpts().AppleKext)
- MarkVTableUsed(Loc, Destructor->getParent());
- } else if (CXXMethodDecl *MethodDecl = dyn_cast<CXXMethodDecl>(Func)) {
- if (MethodDecl->isOverloadedOperator() &&
- MethodDecl->getOverloadedOperator() == OO_Equal) {
- MethodDecl = cast<CXXMethodDecl>(MethodDecl->getFirstDecl());
- if (MethodDecl->isDefaulted() && !MethodDecl->isDeleted()) {
- if (MethodDecl->isCopyAssignmentOperator())
- DefineImplicitCopyAssignment(Loc, MethodDecl);
- else if (MethodDecl->isMoveAssignmentOperator())
- DefineImplicitMoveAssignment(Loc, MethodDecl);
+ if (Destructor->isVirtual() && getLangOpts().AppleKext)
+ MarkVTableUsed(Loc, Destructor->getParent());
+ } else if (CXXMethodDecl *MethodDecl = dyn_cast<CXXMethodDecl>(Func)) {
+ if (MethodDecl->isOverloadedOperator() &&
+ MethodDecl->getOverloadedOperator() == OO_Equal) {
+ MethodDecl = cast<CXXMethodDecl>(MethodDecl->getFirstDecl());
+ if (MethodDecl->isDefaulted() && !MethodDecl->isDeleted()) {
+ if (MethodDecl->isCopyAssignmentOperator())
+ DefineImplicitCopyAssignment(Loc, MethodDecl);
+ else if (MethodDecl->isMoveAssignmentOperator())
+ DefineImplicitMoveAssignment(Loc, MethodDecl);
+ }
+ } else if (isa<CXXConversionDecl>(MethodDecl) &&
+ MethodDecl->getParent()->isLambda()) {
+ CXXConversionDecl *Conversion =
+ cast<CXXConversionDecl>(MethodDecl->getFirstDecl());
+ if (Conversion->isLambdaToBlockPointerConversion())
+ DefineImplicitLambdaToBlockPointerConversion(Loc, Conversion);
+ else
+ DefineImplicitLambdaToFunctionPointerConversion(Loc, Conversion);
+ } else if (MethodDecl->isVirtual() && getLangOpts().AppleKext)
+ MarkVTableUsed(Loc, MethodDecl->getParent());
+ }
+
+ // Implicit instantiation of function templates and member functions of
+ // class templates.
+ if (Func->isImplicitlyInstantiable()) {
+ TemplateSpecializationKind TSK =
+ Func->getTemplateSpecializationKindForInstantiation();
+ SourceLocation PointOfInstantiation = Func->getPointOfInstantiation();
+ bool FirstInstantiation = PointOfInstantiation.isInvalid();
+ if (FirstInstantiation) {
+ PointOfInstantiation = Loc;
+ Func->setTemplateSpecializationKind(TSK, PointOfInstantiation);
+ } else if (TSK != TSK_ImplicitInstantiation) {
+ // Use the point of use as the point of instantiation, instead of the
+ // point of explicit instantiation (which we track as the actual point
+ // of instantiation). This gives better backtraces in diagnostics.
+ PointOfInstantiation = Loc;
}
- } else if (isa<CXXConversionDecl>(MethodDecl) &&
- MethodDecl->getParent()->isLambda()) {
- CXXConversionDecl *Conversion =
- cast<CXXConversionDecl>(MethodDecl->getFirstDecl());
- if (Conversion->isLambdaToBlockPointerConversion())
- DefineImplicitLambdaToBlockPointerConversion(Loc, Conversion);
- else
- DefineImplicitLambdaToFunctionPointerConversion(Loc, Conversion);
- } else if (MethodDecl->isVirtual() && getLangOpts().AppleKext)
- MarkVTableUsed(Loc, MethodDecl->getParent());
- }
-
- // Recursive functions should be marked when used from another function.
- // FIXME: Is this really right?
- if (CurContext == Func) return;
-
- // Implicit instantiation of function templates and member functions of
- // class templates.
- if (Func->isImplicitlyInstantiable()) {
- TemplateSpecializationKind TSK = Func->getTemplateSpecializationKind();
- SourceLocation PointOfInstantiation = Func->getPointOfInstantiation();
- bool FirstInstantiation = PointOfInstantiation.isInvalid();
- if (FirstInstantiation) {
- PointOfInstantiation = Loc;
- Func->setTemplateSpecializationKind(TSK, PointOfInstantiation);
- } else if (TSK != TSK_ImplicitInstantiation) {
- // Use the point of use as the point of instantiation, instead of the
- // point of explicit instantiation (which we track as the actual point of
- // instantiation). This gives better backtraces in diagnostics.
- PointOfInstantiation = Loc;
- }
-
- if (FirstInstantiation || TSK != TSK_ImplicitInstantiation ||
- Func->isConstexpr()) {
- if (isa<CXXRecordDecl>(Func->getDeclContext()) &&
- cast<CXXRecordDecl>(Func->getDeclContext())->isLocalClass() &&
- CodeSynthesisContexts.size())
- PendingLocalImplicitInstantiations.push_back(
- std::make_pair(Func, PointOfInstantiation));
- else if (Func->isConstexpr())
- // Do not defer instantiations of constexpr functions, to avoid the
- // expression evaluator needing to call back into Sema if it sees a
- // call to such a function.
- InstantiateFunctionDefinition(PointOfInstantiation, Func);
- else {
- Func->setInstantiationIsPending(true);
- PendingInstantiations.push_back(std::make_pair(Func,
- PointOfInstantiation));
- // Notify the consumer that a function was implicitly instantiated.
- Consumer.HandleCXXImplicitFunctionInstantiation(Func);
+
+ if (FirstInstantiation || TSK != TSK_ImplicitInstantiation ||
+ Func->isConstexpr()) {
+ if (isa<CXXRecordDecl>(Func->getDeclContext()) &&
+ cast<CXXRecordDecl>(Func->getDeclContext())->isLocalClass() &&
+ CodeSynthesisContexts.size())
+ PendingLocalImplicitInstantiations.push_back(
+ std::make_pair(Func, PointOfInstantiation));
+ else if (Func->isConstexpr())
+ // Do not defer instantiations of constexpr functions, to avoid the
+ // expression evaluator needing to call back into Sema if it sees a
+ // call to such a function.
+ InstantiateFunctionDefinition(PointOfInstantiation, Func);
+ else {
+ Func->setInstantiationIsPending(true);
+ PendingInstantiations.push_back(
+ std::make_pair(Func, PointOfInstantiation));
+ // Notify the consumer that a function was implicitly instantiated.
+ Consumer.HandleCXXImplicitFunctionInstantiation(Func);
+ }
+ }
+ } else {
+ // Walk redefinitions, as some of them may be instantiable.
+ for (auto i : Func->redecls()) {
+ if (!i->isUsed(false) && i->isImplicitlyInstantiable())
+ MarkFunctionReferenced(Loc, i, MightBeOdrUse);
}
- }
- } else {
- // Walk redefinitions, as some of them may be instantiable.
- for (auto i : Func->redecls()) {
- if (!i->isUsed(false) && i->isImplicitlyInstantiable())
- MarkFunctionReferenced(Loc, i, OdrUse);
}
}
- if (!OdrUse) return;
+ // If this is the first "real" use, act on that.
+ if (OdrUse == OdrUseContext::Used && !Func->isUsed(/*CheckUsedAttr=*/false)) {
+ // Keep track of used but undefined functions.
+ if (!Func->isDefined()) {
+ if (mightHaveNonExternalLinkage(Func))
+ UndefinedButUsed.insert(std::make_pair(Func->getCanonicalDecl(), Loc));
+ else if (Func->getMostRecentDecl()->isInlined() &&
+ !LangOpts.GNUInline &&
+ !Func->getMostRecentDecl()->hasAttr<GNUInlineAttr>())
+ UndefinedButUsed.insert(std::make_pair(Func->getCanonicalDecl(), Loc));
+ else if (isExternalWithNoLinkageType(Func))
+ UndefinedButUsed.insert(std::make_pair(Func->getCanonicalDecl(), Loc));
+ }
+
+ // Some x86 Windows calling conventions mangle the size of the parameter
+ // pack into the name. Computing the size of the parameters requires the
+ // parameter types to be complete. Check that now.
+ if (funcHasParameterSizeMangling(*this, Func))
+ CheckCompleteParameterTypesForMangler(*this, Func, Loc);
- // Keep track of used but undefined functions.
- if (!Func->isDefined()) {
- if (mightHaveNonExternalLinkage(Func))
- UndefinedButUsed.insert(std::make_pair(Func->getCanonicalDecl(), Loc));
- else if (Func->getMostRecentDecl()->isInlined() &&
- !LangOpts.GNUInline &&
- !Func->getMostRecentDecl()->hasAttr<GNUInlineAttr>())
- UndefinedButUsed.insert(std::make_pair(Func->getCanonicalDecl(), Loc));
- else if (isExternalWithNoLinkageType(Func))
- UndefinedButUsed.insert(std::make_pair(Func->getCanonicalDecl(), Loc));
+ Func->markUsed(Context);
+
+ if (LangOpts.OpenMP && LangOpts.OpenMPIsDevice)
+ checkOpenMPDeviceFunction(Loc, Func);
}
+}
- Func->markUsed(Context);
+/// Directly mark a variable odr-used. Given a choice, prefer to use
+/// MarkVariableReferenced since it does additional checks and then
+/// calls MarkVarDeclODRUsed.
+/// If the variable must be captured:
+/// - if FunctionScopeIndexToStopAt is null, capture it in the CurContext
+/// - else capture it in the DeclContext that maps to the
+/// *FunctionScopeIndexToStopAt on the FunctionScopeInfo stack.
+static void
+MarkVarDeclODRUsed(VarDecl *Var, SourceLocation Loc, Sema &SemaRef,
+ const unsigned *const FunctionScopeIndexToStopAt = nullptr) {
+ // Keep track of used but undefined variables.
+ // FIXME: We shouldn't suppress this warning for static data members.
+ if (Var->hasDefinition(SemaRef.Context) == VarDecl::DeclarationOnly &&
+ (!Var->isExternallyVisible() || Var->isInline() ||
+ SemaRef.isExternalWithNoLinkageType(Var)) &&
+ !(Var->isStaticDataMember() && Var->hasInit())) {
+ SourceLocation &old = SemaRef.UndefinedButUsed[Var->getCanonicalDecl()];
+ if (old.isInvalid())
+ old = Loc;
+ }
+ QualType CaptureType, DeclRefType;
+ if (SemaRef.LangOpts.OpenMP)
+ SemaRef.tryCaptureOpenMPLambdas(Var);
+ SemaRef.tryCaptureVariable(Var, Loc, Sema::TryCapture_Implicit,
+ /*EllipsisLoc*/ SourceLocation(),
+ /*BuildAndDiagnose*/ true,
+ CaptureType, DeclRefType,
+ FunctionScopeIndexToStopAt);
+
+ Var->markUsed(SemaRef.Context);
+}
+
+void Sema::MarkCaptureUsedInEnclosingContext(VarDecl *Capture,
+ SourceLocation Loc,
+ unsigned CapturingScopeIndex) {
+ MarkVarDeclODRUsed(Capture, Loc, *this, &CapturingScopeIndex);
}
static void
@@ -14958,31 +15414,35 @@ static bool captureInBlock(BlockScopeInfo *BSI, VarDecl *Var,
QualType &CaptureType,
QualType &DeclRefType,
const bool Nested,
- Sema &S) {
- Expr *CopyExpr = nullptr;
+ Sema &S, bool Invalid) {
bool ByRef = false;
// Blocks are not allowed to capture arrays, excepting OpenCL.
// OpenCL v2.0 s1.12.5 (revision 40): arrays are captured by reference
// (decayed to pointers).
- if (!S.getLangOpts().OpenCL && CaptureType->isArrayType()) {
+ if (!Invalid && !S.getLangOpts().OpenCL && CaptureType->isArrayType()) {
if (BuildAndDiagnose) {
S.Diag(Loc, diag::err_ref_array_type);
S.Diag(Var->getLocation(), diag::note_previous_decl)
<< Var->getDeclName();
+ Invalid = true;
+ } else {
+ return false;
}
- return false;
}
// Forbid the block-capture of autoreleasing variables.
- if (CaptureType.getObjCLifetime() == Qualifiers::OCL_Autoreleasing) {
+ if (!Invalid &&
+ CaptureType.getObjCLifetime() == Qualifiers::OCL_Autoreleasing) {
if (BuildAndDiagnose) {
S.Diag(Loc, diag::err_arc_autoreleasing_capture)
<< /*block*/ 0;
S.Diag(Var->getLocation(), diag::note_previous_decl)
<< Var->getDeclName();
+ Invalid = true;
+ } else {
+ return false;
}
- return false;
}
// Warn about implicitly autoreleasing indirect parameters captured by blocks.
@@ -15005,7 +15465,7 @@ static bool captureInBlock(BlockScopeInfo *BSI, VarDecl *Var,
QualType PointeeTy = PT->getPointeeType();
- if (PointeeTy->getAs<ObjCObjectPointerType>() &&
+ if (!Invalid && PointeeTy->getAs<ObjCObjectPointerType>() &&
PointeeTy.getObjCLifetime() == Qualifiers::OCL_Autoreleasing &&
!IsObjCOwnershipAttributedType(PointeeTy)) {
if (BuildAndDiagnose) {
@@ -15026,54 +15486,14 @@ static bool captureInBlock(BlockScopeInfo *BSI, VarDecl *Var,
// Block capture by copy introduces 'const'.
CaptureType = CaptureType.getNonReferenceType().withConst();
DeclRefType = CaptureType;
-
- if (S.getLangOpts().CPlusPlus && BuildAndDiagnose) {
- if (const RecordType *Record = DeclRefType->getAs<RecordType>()) {
- // The capture logic needs the destructor, so make sure we mark it.
- // Usually this is unnecessary because most local variables have
- // their destructors marked at declaration time, but parameters are
- // an exception because it's technically only the call site that
- // actually requires the destructor.
- if (isa<ParmVarDecl>(Var))
- S.FinalizeVarWithDestructor(Var, Record);
-
- // Enter a new evaluation context to insulate the copy
- // full-expression.
- EnterExpressionEvaluationContext scope(
- S, Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
-
- // According to the blocks spec, the capture of a variable from
- // the stack requires a const copy constructor. This is not true
- // of the copy/move done to move a __block variable to the heap.
- Expr *DeclRef = new (S.Context) DeclRefExpr(
- S.Context, Var, Nested, DeclRefType.withConst(), VK_LValue, Loc);
-
- ExprResult Result
- = S.PerformCopyInitialization(
- InitializedEntity::InitializeBlock(Var->getLocation(),
- CaptureType, false),
- Loc, DeclRef);
-
- // Build a full-expression copy expression if initialization
- // succeeded and used a non-trivial constructor. Recover from
- // errors by pretending that the copy isn't necessary.
- if (!Result.isInvalid() &&
- !cast<CXXConstructExpr>(Result.get())->getConstructor()
- ->isTrivial()) {
- Result = S.MaybeCreateExprWithCleanups(Result);
- CopyExpr = Result.get();
- }
- }
- }
}
// Actually capture the variable.
if (BuildAndDiagnose)
- BSI->addCapture(Var, HasBlocksAttr, ByRef, Nested, Loc,
- SourceLocation(), CaptureType, CopyExpr);
-
- return true;
+ BSI->addCapture(Var, HasBlocksAttr, ByRef, Nested, Loc, SourceLocation(),
+ CaptureType, Invalid);
+ return !Invalid;
}
@@ -15085,7 +15505,7 @@ static bool captureInCapturedRegion(CapturedRegionScopeInfo *RSI,
QualType &CaptureType,
QualType &DeclRefType,
const bool RefersToCapturedVariable,
- Sema &S) {
+ Sema &S, bool Invalid) {
// By default, capture variables by reference.
bool ByRef = true;
// Using an LValue reference type is consistent with Lambdas (see below).
@@ -15105,69 +15525,12 @@ static bool captureInCapturedRegion(CapturedRegionScopeInfo *RSI,
else
CaptureType = DeclRefType;
- Expr *CopyExpr = nullptr;
- if (BuildAndDiagnose) {
- // The current implementation assumes that all variables are captured
- // by references. Since there is no capture by copy, no expression
- // evaluation will be needed.
- RecordDecl *RD = RSI->TheRecordDecl;
-
- FieldDecl *Field
- = FieldDecl::Create(S.Context, RD, Loc, Loc, nullptr, CaptureType,
- S.Context.getTrivialTypeSourceInfo(CaptureType, Loc),
- nullptr, false, ICIS_NoInit);
- Field->setImplicit(true);
- Field->setAccess(AS_private);
- RD->addDecl(Field);
- if (S.getLangOpts().OpenMP && RSI->CapRegionKind == CR_OpenMP)
- S.setOpenMPCaptureKind(Field, Var, RSI->OpenMPLevel);
-
- CopyExpr = new (S.Context) DeclRefExpr(
- S.Context, Var, RefersToCapturedVariable, DeclRefType, VK_LValue, Loc);
- Var->setReferenced(true);
- Var->markUsed(S.Context);
- }
-
// Actually capture the variable.
if (BuildAndDiagnose)
- RSI->addCapture(Var, /*isBlock*/false, ByRef, RefersToCapturedVariable, Loc,
- SourceLocation(), CaptureType, CopyExpr);
-
+ RSI->addCapture(Var, /*isBlock*/ false, ByRef, RefersToCapturedVariable,
+ Loc, SourceLocation(), CaptureType, Invalid);
- return true;
-}
-
-/// Create a field within the lambda class for the variable
-/// being captured.
-static void addAsFieldToClosureType(Sema &S, LambdaScopeInfo *LSI,
- QualType FieldType, QualType DeclRefType,
- SourceLocation Loc,
- bool RefersToCapturedVariable) {
- CXXRecordDecl *Lambda = LSI->Lambda;
-
- // Build the non-static data member.
- FieldDecl *Field
- = FieldDecl::Create(S.Context, Lambda, Loc, Loc, nullptr, FieldType,
- S.Context.getTrivialTypeSourceInfo(FieldType, Loc),
- nullptr, false, ICIS_NoInit);
- // If the variable being captured has an invalid type, mark the lambda class
- // as invalid as well.
- if (!FieldType->isDependentType()) {
- if (S.RequireCompleteType(Loc, FieldType, diag::err_field_incomplete)) {
- Lambda->setInvalidDecl();
- Field->setInvalidDecl();
- } else {
- NamedDecl *Def;
- FieldType->isIncompleteType(&Def);
- if (Def && Def->isInvalidDecl()) {
- Lambda->setInvalidDecl();
- Field->setInvalidDecl();
- }
- }
- }
- Field->setImplicit(true);
- Field->setAccess(AS_private);
- Lambda->addDecl(Field);
+ return !Invalid;
}
/// Capture the given variable in the lambda.
@@ -15181,8 +15544,7 @@ static bool captureInLambda(LambdaScopeInfo *LSI,
const Sema::TryCaptureKind Kind,
SourceLocation EllipsisLoc,
const bool IsTopScope,
- Sema &S) {
-
+ Sema &S, bool Invalid) {
// Determine whether we are capturing by reference or by value.
bool ByRef = false;
if (IsTopScope && Kind != Sema::TryCapture_Implicit) {
@@ -15223,34 +15585,31 @@ static bool captureInLambda(LambdaScopeInfo *LSI,
}
// Forbid the lambda copy-capture of autoreleasing variables.
- if (CaptureType.getObjCLifetime() == Qualifiers::OCL_Autoreleasing) {
+ if (!Invalid &&
+ CaptureType.getObjCLifetime() == Qualifiers::OCL_Autoreleasing) {
if (BuildAndDiagnose) {
S.Diag(Loc, diag::err_arc_autoreleasing_capture) << /*lambda*/ 1;
S.Diag(Var->getLocation(), diag::note_previous_decl)
<< Var->getDeclName();
+ Invalid = true;
+ } else {
+ return false;
}
- return false;
}
// Make sure that by-copy captures are of a complete and non-abstract type.
- if (BuildAndDiagnose) {
+ if (!Invalid && BuildAndDiagnose) {
if (!CaptureType->isDependentType() &&
S.RequireCompleteType(Loc, CaptureType,
diag::err_capture_of_incomplete_type,
Var->getDeclName()))
- return false;
-
- if (S.RequireNonAbstractType(Loc, CaptureType,
- diag::err_capture_of_abstract_type))
- return false;
+ Invalid = true;
+ else if (S.RequireNonAbstractType(Loc, CaptureType,
+ diag::err_capture_of_abstract_type))
+ Invalid = true;
}
}
- // Capture this variable in the lambda.
- if (BuildAndDiagnose)
- addAsFieldToClosureType(S, LSI, CaptureType, DeclRefType, Loc,
- RefersToCapturedVariable);
-
// Compute the type of a reference to this captured variable.
if (ByRef)
DeclRefType = CaptureType.getNonReferenceType();
@@ -15267,10 +15626,10 @@ static bool captureInLambda(LambdaScopeInfo *LSI,
// Add the capture.
if (BuildAndDiagnose)
- LSI->addCapture(Var, /*IsBlock=*/false, ByRef, RefersToCapturedVariable,
- Loc, EllipsisLoc, CaptureType, /*CopyExpr=*/nullptr);
+ LSI->addCapture(Var, /*isBlock=*/false, ByRef, RefersToCapturedVariable,
+ Loc, EllipsisLoc, CaptureType, Invalid);
- return true;
+ return !Invalid;
}
bool Sema::tryCaptureVariable(
@@ -15304,7 +15663,9 @@ bool Sema::tryCaptureVariable(
// Capture global variables if it is required to use private copy of this
// variable.
bool IsGlobal = !Var->hasLocalStorage();
- if (IsGlobal && !(LangOpts.OpenMP && isOpenMPCapturedDecl(Var)))
+ if (IsGlobal &&
+ !(LangOpts.OpenMP && isOpenMPCapturedDecl(Var, /*CheckScopeInfo=*/true,
+ MaxFunctionScopesIndex)))
return true;
Var = Var->getCanonicalDecl();
@@ -15366,11 +15727,6 @@ bool Sema::tryCaptureVariable(
}
return true;
}
- // Certain capturing entities (lambdas, blocks etc.) are not allowed to capture
- // certain types of variables (unnamed, variably modified types etc.)
- // so check for eligibility.
- if (!isVariableCapturable(CSI, Var, ExprLoc, BuildAndDiagnose, *this))
- return true;
// Try to capture variable-length arrays types.
if (Var->getType()->isVariablyModifiedType()) {
@@ -15441,33 +15797,45 @@ bool Sema::tryCaptureVariable(
// requirements, and adding captures if requested.
// If the variable had already been captured previously, we start capturing
// at the lambda nested within that one.
+ bool Invalid = false;
for (unsigned I = ++FunctionScopesIndex, N = MaxFunctionScopesIndex + 1; I != N;
++I) {
CapturingScopeInfo *CSI = cast<CapturingScopeInfo>(FunctionScopes[I]);
+ // Certain capturing entities (lambdas, blocks etc.) are not allowed to capture
+ // certain types of variables (unnamed, variably modified types etc.)
+ // so check for eligibility.
+ if (!Invalid)
+ Invalid =
+ !isVariableCapturable(CSI, Var, ExprLoc, BuildAndDiagnose, *this);
+
+ // After encountering an error, if we're actually supposed to capture, keep
+ // capturing in nested contexts to suppress any follow-on diagnostics.
+ if (Invalid && !BuildAndDiagnose)
+ return true;
+
if (BlockScopeInfo *BSI = dyn_cast<BlockScopeInfo>(CSI)) {
- if (!captureInBlock(BSI, Var, ExprLoc,
- BuildAndDiagnose, CaptureType,
- DeclRefType, Nested, *this))
- return true;
+ Invalid = !captureInBlock(BSI, Var, ExprLoc, BuildAndDiagnose, CaptureType,
+ DeclRefType, Nested, *this, Invalid);
Nested = true;
} else if (CapturedRegionScopeInfo *RSI = dyn_cast<CapturedRegionScopeInfo>(CSI)) {
- if (!captureInCapturedRegion(RSI, Var, ExprLoc,
- BuildAndDiagnose, CaptureType,
- DeclRefType, Nested, *this))
- return true;
+ Invalid = !captureInCapturedRegion(RSI, Var, ExprLoc, BuildAndDiagnose,
+ CaptureType, DeclRefType, Nested,
+ *this, Invalid);
Nested = true;
} else {
LambdaScopeInfo *LSI = cast<LambdaScopeInfo>(CSI);
- if (!captureInLambda(LSI, Var, ExprLoc,
- BuildAndDiagnose, CaptureType,
+ Invalid =
+ !captureInLambda(LSI, Var, ExprLoc, BuildAndDiagnose, CaptureType,
DeclRefType, Nested, Kind, EllipsisLoc,
- /*IsTopScope*/I == N - 1, *this))
- return true;
+ /*IsTopScope*/ I == N - 1, *this, Invalid);
Nested = true;
}
+
+ if (Invalid && !BuildAndDiagnose)
+ return true;
}
- return false;
+ return Invalid;
}
bool Sema::tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
@@ -15500,52 +15868,376 @@ QualType Sema::getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc) {
return DeclRefType;
}
-
-
-// If either the type of the variable or the initializer is dependent,
-// return false. Otherwise, determine whether the variable is a constant
-// expression. Use this if you need to know if a variable that might or
-// might not be dependent is truly a constant expression.
-static inline bool IsVariableNonDependentAndAConstantExpression(VarDecl *Var,
- ASTContext &Context) {
-
- if (Var->getType()->isDependentType())
- return false;
- const VarDecl *DefVD = nullptr;
- Var->getAnyInitializer(DefVD);
- if (!DefVD)
- return false;
- EvaluatedStmt *Eval = DefVD->ensureEvaluatedStmt();
- Expr *Init = cast<Expr>(Eval->Value);
- if (Init->isValueDependent())
- return false;
- return IsVariableAConstantExpression(Var, Context);
+namespace {
+// Helper to copy the template arguments from a DeclRefExpr or MemberExpr.
+// The produced TemplateArgumentListInfo* points to data stored within this
+// object, so should only be used in contexts where the pointer will not be
+// used after the CopiedTemplateArgs object is destroyed.
+class CopiedTemplateArgs {
+ bool HasArgs;
+ TemplateArgumentListInfo TemplateArgStorage;
+public:
+ template<typename RefExpr>
+ CopiedTemplateArgs(RefExpr *E) : HasArgs(E->hasExplicitTemplateArgs()) {
+ if (HasArgs)
+ E->copyTemplateArgumentsInto(TemplateArgStorage);
+ }
+ operator TemplateArgumentListInfo*()
+#ifdef __has_cpp_attribute
+#if __has_cpp_attribute(clang::lifetimebound)
+ [[clang::lifetimebound]]
+#endif
+#endif
+ {
+ return HasArgs ? &TemplateArgStorage : nullptr;
+ }
+};
}
-
-void Sema::UpdateMarkingForLValueToRValue(Expr *E) {
+/// Walk the set of potential results of an expression and mark them all as
+/// non-odr-uses if they satisfy the side-conditions of the NonOdrUseReason.
+///
+/// \return A new expression if we found any potential results, ExprEmpty() if
+/// not, and ExprError() if we diagnosed an error.
+static ExprResult rebuildPotentialResultsAsNonOdrUsed(Sema &S, Expr *E,
+ NonOdrUseReason NOUR) {
// Per C++11 [basic.def.odr], a variable is odr-used "unless it is
// an object that satisfies the requirements for appearing in a
// constant expression (5.19) and the lvalue-to-rvalue conversion (4.1)
// is immediately applied." This function handles the lvalue-to-rvalue
// conversion part.
- MaybeODRUseExprs.erase(E->IgnoreParens());
+ //
+ // If we encounter a node that claims to be an odr-use but shouldn't be, we
+ // transform it into the relevant kind of non-odr-use node and rebuild the
+ // tree of nodes leading to it.
+ //
+ // This is a mini-TreeTransform that only transforms a restricted subset of
+ // nodes (and only certain operands of them).
- // If we are in a lambda, check if this DeclRefExpr or MemberExpr refers
- // to a variable that is a constant expression, and if so, identify it as
- // a reference to a variable that does not involve an odr-use of that
- // variable.
- if (LambdaScopeInfo *LSI = getCurLambda()) {
- Expr *SansParensExpr = E->IgnoreParens();
- VarDecl *Var = nullptr;
- if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(SansParensExpr))
- Var = dyn_cast<VarDecl>(DRE->getFoundDecl());
- else if (MemberExpr *ME = dyn_cast<MemberExpr>(SansParensExpr))
- Var = dyn_cast<VarDecl>(ME->getMemberDecl());
+ // Rebuild a subexpression.
+ auto Rebuild = [&](Expr *Sub) {
+ return rebuildPotentialResultsAsNonOdrUsed(S, Sub, NOUR);
+ };
+
+ // Check whether a potential result satisfies the requirements of NOUR.
+ auto IsPotentialResultOdrUsed = [&](NamedDecl *D) {
+ // Any entity other than a VarDecl is always odr-used whenever it's named
+ // in a potentially-evaluated expression.
+ auto *VD = dyn_cast<VarDecl>(D);
+ if (!VD)
+ return true;
+
+ // C++2a [basic.def.odr]p4:
+ // A variable x whose name appears as a potentially-evalauted expression
+ // e is odr-used by e unless
+ // -- x is a reference that is usable in constant expressions, or
+ // -- x is a variable of non-reference type that is usable in constant
+ // expressions and has no mutable subobjects, and e is an element of
+ // the set of potential results of an expression of
+ // non-volatile-qualified non-class type to which the lvalue-to-rvalue
+ // conversion is applied, or
+ // -- x is a variable of non-reference type, and e is an element of the
+ // set of potential results of a discarded-value expression to which
+ // the lvalue-to-rvalue conversion is not applied
+ //
+ // We check the first bullet and the "potentially-evaluated" condition in
+ // BuildDeclRefExpr. We check the type requirements in the second bullet
+ // in CheckLValueToRValueConversionOperand below.
+ switch (NOUR) {
+ case NOUR_None:
+ case NOUR_Unevaluated:
+ llvm_unreachable("unexpected non-odr-use-reason");
+
+ case NOUR_Constant:
+ // Constant references were handled when they were built.
+ if (VD->getType()->isReferenceType())
+ return true;
+ if (auto *RD = VD->getType()->getAsCXXRecordDecl())
+ if (RD->hasMutableFields())
+ return true;
+ if (!VD->isUsableInConstantExpressions(S.Context))
+ return true;
+ break;
+
+ case NOUR_Discarded:
+ if (VD->getType()->isReferenceType())
+ return true;
+ break;
+ }
+ return false;
+ };
+
+ // Mark that this expression does not constitute an odr-use.
+ auto MarkNotOdrUsed = [&] {
+ S.MaybeODRUseExprs.erase(E);
+ if (LambdaScopeInfo *LSI = S.getCurLambda())
+ LSI->markVariableExprAsNonODRUsed(E);
+ };
+
+ // C++2a [basic.def.odr]p2:
+ // The set of potential results of an expression e is defined as follows:
+ switch (E->getStmtClass()) {
+ // -- If e is an id-expression, ...
+ case Expr::DeclRefExprClass: {
+ auto *DRE = cast<DeclRefExpr>(E);
+ if (DRE->isNonOdrUse() || IsPotentialResultOdrUsed(DRE->getDecl()))
+ break;
+
+ // Rebuild as a non-odr-use DeclRefExpr.
+ MarkNotOdrUsed();
+ return DeclRefExpr::Create(
+ S.Context, DRE->getQualifierLoc(), DRE->getTemplateKeywordLoc(),
+ DRE->getDecl(), DRE->refersToEnclosingVariableOrCapture(),
+ DRE->getNameInfo(), DRE->getType(), DRE->getValueKind(),
+ DRE->getFoundDecl(), CopiedTemplateArgs(DRE), NOUR);
+ }
+
+ case Expr::FunctionParmPackExprClass: {
+ auto *FPPE = cast<FunctionParmPackExpr>(E);
+ // If any of the declarations in the pack is odr-used, then the expression
+ // as a whole constitutes an odr-use.
+ for (VarDecl *D : *FPPE)
+ if (IsPotentialResultOdrUsed(D))
+ return ExprEmpty();
+
+ // FIXME: Rebuild as a non-odr-use FunctionParmPackExpr? In practice,
+ // nothing cares about whether we marked this as an odr-use, but it might
+ // be useful for non-compiler tools.
+ MarkNotOdrUsed();
+ break;
+ }
+
+ // -- If e is a subscripting operation with an array operand...
+ case Expr::ArraySubscriptExprClass: {
+ auto *ASE = cast<ArraySubscriptExpr>(E);
+ Expr *OldBase = ASE->getBase()->IgnoreImplicit();
+ if (!OldBase->getType()->isArrayType())
+ break;
+ ExprResult Base = Rebuild(OldBase);
+ if (!Base.isUsable())
+ return Base;
+ Expr *LHS = ASE->getBase() == ASE->getLHS() ? Base.get() : ASE->getLHS();
+ Expr *RHS = ASE->getBase() == ASE->getRHS() ? Base.get() : ASE->getRHS();
+ SourceLocation LBracketLoc = ASE->getBeginLoc(); // FIXME: Not stored.
+ return S.ActOnArraySubscriptExpr(nullptr, LHS, LBracketLoc, RHS,
+ ASE->getRBracketLoc());
+ }
+
+ case Expr::MemberExprClass: {
+ auto *ME = cast<MemberExpr>(E);
+ // -- If e is a class member access expression [...] naming a non-static
+ // data member...
+ if (isa<FieldDecl>(ME->getMemberDecl())) {
+ ExprResult Base = Rebuild(ME->getBase());
+ if (!Base.isUsable())
+ return Base;
+ return MemberExpr::Create(
+ S.Context, Base.get(), ME->isArrow(), ME->getOperatorLoc(),
+ ME->getQualifierLoc(), ME->getTemplateKeywordLoc(),
+ ME->getMemberDecl(), ME->getFoundDecl(), ME->getMemberNameInfo(),
+ CopiedTemplateArgs(ME), ME->getType(), ME->getValueKind(),
+ ME->getObjectKind(), ME->isNonOdrUse());
+ }
+
+ if (ME->getMemberDecl()->isCXXInstanceMember())
+ break;
+
+ // -- If e is a class member access expression naming a static data member,
+ // ...
+ if (ME->isNonOdrUse() || IsPotentialResultOdrUsed(ME->getMemberDecl()))
+ break;
+
+ // Rebuild as a non-odr-use MemberExpr.
+ MarkNotOdrUsed();
+ return MemberExpr::Create(
+ S.Context, ME->getBase(), ME->isArrow(), ME->getOperatorLoc(),
+ ME->getQualifierLoc(), ME->getTemplateKeywordLoc(), ME->getMemberDecl(),
+ ME->getFoundDecl(), ME->getMemberNameInfo(), CopiedTemplateArgs(ME),
+ ME->getType(), ME->getValueKind(), ME->getObjectKind(), NOUR);
+ return ExprEmpty();
+ }
+
+ case Expr::BinaryOperatorClass: {
+ auto *BO = cast<BinaryOperator>(E);
+ Expr *LHS = BO->getLHS();
+ Expr *RHS = BO->getRHS();
+ // -- If e is a pointer-to-member expression of the form e1 .* e2 ...
+ if (BO->getOpcode() == BO_PtrMemD) {
+ ExprResult Sub = Rebuild(LHS);
+ if (!Sub.isUsable())
+ return Sub;
+ LHS = Sub.get();
+ // -- If e is a comma expression, ...
+ } else if (BO->getOpcode() == BO_Comma) {
+ ExprResult Sub = Rebuild(RHS);
+ if (!Sub.isUsable())
+ return Sub;
+ RHS = Sub.get();
+ } else {
+ break;
+ }
+ return S.BuildBinOp(nullptr, BO->getOperatorLoc(), BO->getOpcode(),
+ LHS, RHS);
+ }
+
+ // -- If e has the form (e1)...
+ case Expr::ParenExprClass: {
+ auto *PE = cast<ParenExpr>(E);
+ ExprResult Sub = Rebuild(PE->getSubExpr());
+ if (!Sub.isUsable())
+ return Sub;
+ return S.ActOnParenExpr(PE->getLParen(), PE->getRParen(), Sub.get());
+ }
+
+ // -- If e is a glvalue conditional expression, ...
+ // We don't apply this to a binary conditional operator. FIXME: Should we?
+ case Expr::ConditionalOperatorClass: {
+ auto *CO = cast<ConditionalOperator>(E);
+ ExprResult LHS = Rebuild(CO->getLHS());
+ if (LHS.isInvalid())
+ return ExprError();
+ ExprResult RHS = Rebuild(CO->getRHS());
+ if (RHS.isInvalid())
+ return ExprError();
+ if (!LHS.isUsable() && !RHS.isUsable())
+ return ExprEmpty();
+ if (!LHS.isUsable())
+ LHS = CO->getLHS();
+ if (!RHS.isUsable())
+ RHS = CO->getRHS();
+ return S.ActOnConditionalOp(CO->getQuestionLoc(), CO->getColonLoc(),
+ CO->getCond(), LHS.get(), RHS.get());
+ }
+
+ // [Clang extension]
+ // -- If e has the form __extension__ e1...
+ case Expr::UnaryOperatorClass: {
+ auto *UO = cast<UnaryOperator>(E);
+ if (UO->getOpcode() != UO_Extension)
+ break;
+ ExprResult Sub = Rebuild(UO->getSubExpr());
+ if (!Sub.isUsable())
+ return Sub;
+ return S.BuildUnaryOp(nullptr, UO->getOperatorLoc(), UO_Extension,
+ Sub.get());
+ }
+
+ // [Clang extension]
+ // -- If e has the form _Generic(...), the set of potential results is the
+ // union of the sets of potential results of the associated expressions.
+ case Expr::GenericSelectionExprClass: {
+ auto *GSE = cast<GenericSelectionExpr>(E);
+
+ SmallVector<Expr *, 4> AssocExprs;
+ bool AnyChanged = false;
+ for (Expr *OrigAssocExpr : GSE->getAssocExprs()) {
+ ExprResult AssocExpr = Rebuild(OrigAssocExpr);
+ if (AssocExpr.isInvalid())
+ return ExprError();
+ if (AssocExpr.isUsable()) {
+ AssocExprs.push_back(AssocExpr.get());
+ AnyChanged = true;
+ } else {
+ AssocExprs.push_back(OrigAssocExpr);
+ }
+ }
- if (Var && IsVariableNonDependentAndAConstantExpression(Var, Context))
- LSI->markVariableExprAsNonODRUsed(SansParensExpr);
+ return AnyChanged ? S.CreateGenericSelectionExpr(
+ GSE->getGenericLoc(), GSE->getDefaultLoc(),
+ GSE->getRParenLoc(), GSE->getControllingExpr(),
+ GSE->getAssocTypeSourceInfos(), AssocExprs)
+ : ExprEmpty();
}
+
+ // [Clang extension]
+ // -- If e has the form __builtin_choose_expr(...), the set of potential
+ // results is the union of the sets of potential results of the
+ // second and third subexpressions.
+ case Expr::ChooseExprClass: {
+ auto *CE = cast<ChooseExpr>(E);
+
+ ExprResult LHS = Rebuild(CE->getLHS());
+ if (LHS.isInvalid())
+ return ExprError();
+
+ ExprResult RHS = Rebuild(CE->getLHS());
+ if (RHS.isInvalid())
+ return ExprError();
+
+ if (!LHS.get() && !RHS.get())
+ return ExprEmpty();
+ if (!LHS.isUsable())
+ LHS = CE->getLHS();
+ if (!RHS.isUsable())
+ RHS = CE->getRHS();
+
+ return S.ActOnChooseExpr(CE->getBuiltinLoc(), CE->getCond(), LHS.get(),
+ RHS.get(), CE->getRParenLoc());
+ }
+
+ // Step through non-syntactic nodes.
+ case Expr::ConstantExprClass: {
+ auto *CE = cast<ConstantExpr>(E);
+ ExprResult Sub = Rebuild(CE->getSubExpr());
+ if (!Sub.isUsable())
+ return Sub;
+ return ConstantExpr::Create(S.Context, Sub.get());
+ }
+
+ // We could mostly rely on the recursive rebuilding to rebuild implicit
+ // casts, but not at the top level, so rebuild them here.
+ case Expr::ImplicitCastExprClass: {
+ auto *ICE = cast<ImplicitCastExpr>(E);
+ // Only step through the narrow set of cast kinds we expect to encounter.
+ // Anything else suggests we've left the region in which potential results
+ // can be found.
+ switch (ICE->getCastKind()) {
+ case CK_NoOp:
+ case CK_DerivedToBase:
+ case CK_UncheckedDerivedToBase: {
+ ExprResult Sub = Rebuild(ICE->getSubExpr());
+ if (!Sub.isUsable())
+ return Sub;
+ CXXCastPath Path(ICE->path());
+ return S.ImpCastExprToType(Sub.get(), ICE->getType(), ICE->getCastKind(),
+ ICE->getValueKind(), &Path);
+ }
+
+ default:
+ break;
+ }
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ // Can't traverse through this node. Nothing to do.
+ return ExprEmpty();
+}
+
+ExprResult Sema::CheckLValueToRValueConversionOperand(Expr *E) {
+ // Check whether the operand is or contains an object of non-trivial C union
+ // type.
+ if (E->getType().isVolatileQualified() &&
+ (E->getType().hasNonTrivialToPrimitiveDestructCUnion() ||
+ E->getType().hasNonTrivialToPrimitiveCopyCUnion()))
+ checkNonTrivialCUnion(E->getType(), E->getExprLoc(),
+ Sema::NTCUC_LValueToRValueVolatile,
+ NTCUK_Destruct|NTCUK_Copy);
+
+ // C++2a [basic.def.odr]p4:
+ // [...] an expression of non-volatile-qualified non-class type to which
+ // the lvalue-to-rvalue conversion is applied [...]
+ if (E->getType().isVolatileQualified() || E->getType()->getAs<RecordType>())
+ return E;
+
+ ExprResult Result =
+ rebuildPotentialResultsAsNonOdrUsed(*this, E, NOUR_Constant);
+ if (Result.isInvalid())
+ return ExprError();
+ return Result.get() ? Result : E;
}
ExprResult Sema::ActOnConstantExpression(ExprResult Res) {
@@ -15558,45 +16250,62 @@ ExprResult Sema::ActOnConstantExpression(ExprResult Res) {
// deciding whether it is an odr-use, just assume we will apply the
// lvalue-to-rvalue conversion. In the one case where this doesn't happen
// (a non-type template argument), we have special handling anyway.
- UpdateMarkingForLValueToRValue(Res.get());
- return Res;
+ return CheckLValueToRValueConversionOperand(Res.get());
}
void Sema::CleanupVarDeclMarking() {
- for (Expr *E : MaybeODRUseExprs) {
- VarDecl *Var;
- SourceLocation Loc;
- if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
- Var = cast<VarDecl>(DRE->getDecl());
- Loc = DRE->getLocation();
- } else if (MemberExpr *ME = dyn_cast<MemberExpr>(E)) {
- Var = cast<VarDecl>(ME->getMemberDecl());
- Loc = ME->getMemberLoc();
+ // Iterate through a local copy in case MarkVarDeclODRUsed makes a recursive
+ // call.
+ MaybeODRUseExprSet LocalMaybeODRUseExprs;
+ std::swap(LocalMaybeODRUseExprs, MaybeODRUseExprs);
+
+ for (Expr *E : LocalMaybeODRUseExprs) {
+ if (auto *DRE = dyn_cast<DeclRefExpr>(E)) {
+ MarkVarDeclODRUsed(cast<VarDecl>(DRE->getDecl()),
+ DRE->getLocation(), *this);
+ } else if (auto *ME = dyn_cast<MemberExpr>(E)) {
+ MarkVarDeclODRUsed(cast<VarDecl>(ME->getMemberDecl()), ME->getMemberLoc(),
+ *this);
+ } else if (auto *FP = dyn_cast<FunctionParmPackExpr>(E)) {
+ for (VarDecl *VD : *FP)
+ MarkVarDeclODRUsed(VD, FP->getParameterPackLocation(), *this);
} else {
llvm_unreachable("Unexpected expression");
}
-
- MarkVarDeclODRUsed(Var, Loc, *this,
- /*MaxFunctionScopeIndex Pointer*/ nullptr);
}
- MaybeODRUseExprs.clear();
+ assert(MaybeODRUseExprs.empty() &&
+ "MarkVarDeclODRUsed failed to cleanup MaybeODRUseExprs?");
}
-
static void DoMarkVarDeclReferenced(Sema &SemaRef, SourceLocation Loc,
VarDecl *Var, Expr *E) {
- assert((!E || isa<DeclRefExpr>(E) || isa<MemberExpr>(E)) &&
+ assert((!E || isa<DeclRefExpr>(E) || isa<MemberExpr>(E) ||
+ isa<FunctionParmPackExpr>(E)) &&
"Invalid Expr argument to DoMarkVarDeclReferenced");
Var->setReferenced();
- TemplateSpecializationKind TSK = Var->getTemplateSpecializationKind();
+ if (Var->isInvalidDecl())
+ return;
+
+ auto *MSI = Var->getMemberSpecializationInfo();
+ TemplateSpecializationKind TSK = MSI ? MSI->getTemplateSpecializationKind()
+ : Var->getTemplateSpecializationKind();
- bool OdrUseContext = isOdrUseContext(SemaRef);
+ OdrUseContext OdrUse = isOdrUseContext(SemaRef);
bool UsableInConstantExpr =
- Var->isUsableInConstantExpressions(SemaRef.Context);
+ Var->mightBeUsableInConstantExpressions(SemaRef.Context);
+
+ // C++20 [expr.const]p12:
+ // A variable [...] is needed for constant evaluation if it is [...] a
+ // variable whose name appears as a potentially constant evaluated
+ // expression that is either a contexpr variable or is of non-volatile
+ // const-qualified integral type or of reference type
+ bool NeededForConstantEvaluation =
+ isPotentiallyConstantEvaluatedContext(SemaRef) && UsableInConstantExpr;
+
bool NeedDefinition =
- OdrUseContext || (isEvaluatableContext(SemaRef) && UsableInConstantExpr);
+ OdrUse == OdrUseContext::Used || NeededForConstantEvaluation;
VarTemplateSpecializationDecl *VarSpec =
dyn_cast<VarTemplateSpecializationDecl>(Var);
@@ -15623,11 +16332,15 @@ static void DoMarkVarDeclReferenced(Sema &SemaRef, SourceLocation Loc,
(TSK == TSK_ExplicitInstantiationDeclaration && UsableInConstantExpr);
if (TryInstantiating) {
- SourceLocation PointOfInstantiation = Var->getPointOfInstantiation();
+ SourceLocation PointOfInstantiation =
+ MSI ? MSI->getPointOfInstantiation() : Var->getPointOfInstantiation();
bool FirstInstantiation = PointOfInstantiation.isInvalid();
if (FirstInstantiation) {
PointOfInstantiation = Loc;
- Var->setTemplateSpecializationKind(TSK, PointOfInstantiation);
+ if (MSI)
+ MSI->setPointOfInstantiation(PointOfInstantiation);
+ else
+ Var->setTemplateSpecializationKind(TSK, PointOfInstantiation);
}
bool InstantiationDependent = false;
@@ -15656,25 +16369,53 @@ static void DoMarkVarDeclReferenced(Sema &SemaRef, SourceLocation Loc,
}
}
- // Per C++11 [basic.def.odr], a variable is odr-used "unless it satisfies
- // the requirements for appearing in a constant expression (5.19) and, if
- // it is an object, the lvalue-to-rvalue conversion (4.1)
- // is immediately applied." We check the first part here, and
- // Sema::UpdateMarkingForLValueToRValue deals with the second part.
- // Note that we use the C++11 definition everywhere because nothing in
- // C++03 depends on whether we get the C++03 version correct. The second
- // part does not apply to references, since they are not objects.
- if (OdrUseContext && E &&
- IsVariableAConstantExpression(Var, SemaRef.Context)) {
- // A reference initialized by a constant expression can never be
- // odr-used, so simply ignore it.
- if (!Var->getType()->isReferenceType() ||
- (SemaRef.LangOpts.OpenMP && SemaRef.isOpenMPCapturedDecl(Var)))
+ // C++2a [basic.def.odr]p4:
+ // A variable x whose name appears as a potentially-evaluated expression e
+ // is odr-used by e unless
+ // -- x is a reference that is usable in constant expressions
+ // -- x is a variable of non-reference type that is usable in constant
+ // expressions and has no mutable subobjects [FIXME], and e is an
+ // element of the set of potential results of an expression of
+ // non-volatile-qualified non-class type to which the lvalue-to-rvalue
+ // conversion is applied
+ // -- x is a variable of non-reference type, and e is an element of the set
+ // of potential results of a discarded-value expression to which the
+ // lvalue-to-rvalue conversion is not applied [FIXME]
+ //
+ // We check the first part of the second bullet here, and
+ // Sema::CheckLValueToRValueConversionOperand deals with the second part.
+ // FIXME: To get the third bullet right, we need to delay this even for
+ // variables that are not usable in constant expressions.
+
+ // If we already know this isn't an odr-use, there's nothing more to do.
+ if (DeclRefExpr *DRE = dyn_cast_or_null<DeclRefExpr>(E))
+ if (DRE->isNonOdrUse())
+ return;
+ if (MemberExpr *ME = dyn_cast_or_null<MemberExpr>(E))
+ if (ME->isNonOdrUse())
+ return;
+
+ switch (OdrUse) {
+ case OdrUseContext::None:
+ assert((!E || isa<FunctionParmPackExpr>(E)) &&
+ "missing non-odr-use marking for unevaluated decl ref");
+ break;
+
+ case OdrUseContext::FormallyOdrUsed:
+ // FIXME: Ignoring formal odr-uses results in incorrect lambda capture
+ // behavior.
+ break;
+
+ case OdrUseContext::Used:
+ // If we might later find that this expression isn't actually an odr-use,
+ // delay the marking.
+ if (E && Var->isUsableInConstantExpressions(SemaRef.Context))
SemaRef.MaybeODRUseExprs.insert(E);
- } else if (OdrUseContext) {
- MarkVarDeclODRUsed(Var, Loc, SemaRef,
- /*MaxFunctionScopeIndex ptr*/ nullptr);
- } else if (isOdrUseContext(SemaRef, /*SkipDependentUses*/false)) {
+ else
+ MarkVarDeclODRUsed(Var, Loc, SemaRef);
+ break;
+
+ case OdrUseContext::Dependent:
// If this is a dependent context, we don't need to mark variables as
// odr-used, but we may still need to track them for lambda capture.
// FIXME: Do we also need to do this inside dependent typeid expressions
@@ -15695,12 +16436,15 @@ static void DoMarkVarDeclReferenced(Sema &SemaRef, SourceLocation Loc,
// later (ActOnFinishFullExpr) for eventual capture and odr-use marking
// unless the variable is a reference that was initialized by a constant
// expression (this will never need to be captured or odr-used).
+ //
+ // FIXME: We can simplify this a lot after implementing P0588R1.
assert(E && "Capture variable should be used in an expression.");
if (!Var->getType()->isReferenceType() ||
- !IsVariableNonDependentAndAConstantExpression(Var, SemaRef.Context))
+ !Var->isUsableInConstantExpressions(SemaRef.Context))
LSI->addPotentialCapture(E->IgnoreParens());
}
}
+ break;
}
}
@@ -15777,6 +16521,12 @@ void Sema::MarkMemberReferenced(MemberExpr *E) {
MarkExprReferenced(*this, Loc, E->getMemberDecl(), E, MightBeOdrUse);
}
+/// Perform reference-marking and odr-use handling for a FunctionParmPackExpr.
+void Sema::MarkFunctionParmPackReferenced(FunctionParmPackExpr *E) {
+ for (VarDecl *VD : *E)
+ MarkExprReferenced(*this, E->getParameterPackLocation(), VD, E, true);
+}
+
/// Perform marking for a reference to an arbitrary declaration. It
/// marks the declaration referenced, and performs odr-use checking for
/// functions and variables. This method should not be used when building a
@@ -15903,13 +16653,6 @@ namespace {
void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E) {
Visit(E->getExpr());
}
-
- void VisitImplicitCastExpr(ImplicitCastExpr *E) {
- Inherited::VisitImplicitCastExpr(E);
-
- if (E->getCastKind() == CK_LValueToRValue)
- S.UpdateMarkingForLValueToRValue(E->getSubExpr());
- }
};
}
@@ -15939,7 +16682,7 @@ void Sema::MarkDeclarationsReferencedInExpr(Expr *E,
/// behavior of a program, such as passing a non-POD value through an ellipsis.
/// Failure to do so will likely result in spurious diagnostics or failures
/// during overload resolution or within sizeof/alignof/typeof/typeid.
-bool Sema::DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
+bool Sema::DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts,
const PartialDiagnostic &PD) {
switch (ExprEvalContexts.back().Context) {
case ExpressionEvaluationContext::Unevaluated:
@@ -15955,9 +16698,9 @@ bool Sema::DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
case ExpressionEvaluationContext::PotentiallyEvaluated:
case ExpressionEvaluationContext::PotentiallyEvaluatedIfUsed:
- if (Statement && getCurFunctionOrMethodDecl()) {
+ if (!Stmts.empty() && getCurFunctionOrMethodDecl()) {
FunctionScopes.back()->PossiblyUnreachableDiags.
- push_back(sema::PossiblyUnreachableDiag(PD, Loc, Statement));
+ push_back(sema::PossiblyUnreachableDiag(PD, Loc, Stmts));
return true;
}
@@ -15982,6 +16725,12 @@ bool Sema::DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
return false;
}
+bool Sema::DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
+ const PartialDiagnostic &PD) {
+ return DiagRuntimeBehavior(
+ Loc, Statement ? llvm::makeArrayRef(Statement) : llvm::None, PD);
+}
+
bool Sema::CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
CallExpr *CE, FunctionDecl *FD) {
if (ReturnType->isVoidType() || !ReturnType->isIncompleteType())
@@ -16548,13 +17297,11 @@ ExprResult RebuildUnknownAnyExpr::resolveDecl(Expr *E, ValueDecl *VD) {
DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E);
if (DRE && Proto && Proto->getParamTypes().empty() && Proto->isVariadic()) {
SourceLocation Loc = FD->getLocation();
- FunctionDecl *NewFD = FunctionDecl::Create(S.Context,
- FD->getDeclContext(),
- Loc, Loc, FD->getNameInfo().getName(),
- DestType, FD->getTypeSourceInfo(),
- SC_None, false/*isInlineSpecified*/,
- FD->hasPrototype(),
- false/*isConstexprSpecified*/);
+ FunctionDecl *NewFD = FunctionDecl::Create(
+ S.Context, FD->getDeclContext(), Loc, Loc,
+ FD->getNameInfo().getName(), DestType, FD->getTypeSourceInfo(),
+ SC_None, false /*isInlineSpecified*/, FD->hasPrototype(),
+ /*ConstexprKind*/ CSK_unspecified);
if (FD->getQualifier())
NewFD->setQualifierInfo(FD->getQualifierLoc());
@@ -16843,10 +17590,9 @@ ExprResult Sema::ActOnObjCAvailabilityCheckExpr(
StringRef Platform = getASTContext().getTargetInfo().getPlatformName();
- auto Spec = std::find_if(AvailSpecs.begin(), AvailSpecs.end(),
- [&](const AvailabilitySpec &Spec) {
- return Spec.getPlatform() == Platform;
- });
+ auto Spec = llvm::find_if(AvailSpecs, [&](const AvailabilitySpec &Spec) {
+ return Spec.getPlatform() == Platform;
+ });
VersionTuple Version;
if (Spec != AvailSpecs.end())
diff --git a/lib/Sema/SemaExprCXX.cpp b/lib/Sema/SemaExprCXX.cpp
index 8c89a3cee3db..705e3b9bd7fb 100644
--- a/lib/Sema/SemaExprCXX.cpp
+++ b/lib/Sema/SemaExprCXX.cpp
@@ -1,9 +1,8 @@
//===--- SemaExprCXX.cpp - Semantic Analysis for Expressions --------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
@@ -91,7 +90,7 @@ ParsedType Sema::getConstructorName(IdentifierInfo &II,
// When naming a constructor as a member of a dependent context (eg, in a
// friend declaration or an inherited constructor declaration), form an
// unresolved "typename" type.
- if (CurClass->isDependentContext() && !EnteringContext) {
+ if (CurClass->isDependentContext() && !EnteringContext && SS.getScopeRep()) {
QualType T = Context.getDependentNameType(ETK_None, SS.getScopeRep(), &II);
return ParsedType::make(T);
}
@@ -530,7 +529,7 @@ ExprResult Sema::BuildCXXTypeId(QualType TypeInfoType,
ExprResult
Sema::ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc,
bool isType, void *TyOrExpr, SourceLocation RParenLoc) {
- // OpenCL C++ 1.0 s2.9: typeid is not supported.
+ // typeid is not supported in OpenCL.
if (getLangOpts().OpenCLCPlusPlus) {
return ExprError(Diag(OpLoc, diag::err_openclcxx_not_supported)
<< "typeid");
@@ -751,12 +750,10 @@ ExprResult Sema::BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope) {
// Don't report an error if 'throw' is used in system headers.
if (!getLangOpts().CXXExceptions &&
- !getSourceManager().isInSystemHeader(OpLoc) &&
- (!getLangOpts().OpenMPIsDevice ||
- !getLangOpts().OpenMPHostCXXExceptions ||
- isInOpenMPTargetExecutionDirective() ||
- isInOpenMPDeclareTargetContext()))
- Diag(OpLoc, diag::err_exceptions_disabled) << "throw";
+ !getSourceManager().isInSystemHeader(OpLoc) && !getLangOpts().CUDA) {
+ // Delay error emission for the OpenMP device code.
+ targetDiag(OpLoc, diag::err_exceptions_disabled) << "throw";
+ }
// Exceptions aren't allowed in CUDA device code.
if (getLangOpts().CUDA)
@@ -944,6 +941,21 @@ bool Sema::CheckCXXThrowOperand(SourceLocation ThrowLoc,
}
}
+ // Under the Itanium C++ ABI, memory for the exception object is allocated by
+ // the runtime with no ability for the compiler to request additional
+ // alignment. Warn if the exception type requires alignment beyond the minimum
+ // guaranteed by the target C++ runtime.
+ if (Context.getTargetInfo().getCXXABI().isItaniumFamily()) {
+ CharUnits TypeAlign = Context.getTypeAlignInChars(Ty);
+ CharUnits ExnObjAlign = Context.getExnObjectAlignment();
+ if (ExnObjAlign < TypeAlign) {
+ Diag(ThrowLoc, diag::warn_throw_underaligned_obj);
+ Diag(ThrowLoc, diag::note_throw_underaligned_obj)
+ << Ty << (unsigned)TypeAlign.getQuantity()
+ << (unsigned)ExnObjAlign.getQuantity();
+ }
+ }
+
return false;
}
@@ -1122,48 +1134,6 @@ Sema::CXXThisScopeRAII::~CXXThisScopeRAII() {
}
}
-static Expr *captureThis(Sema &S, ASTContext &Context, RecordDecl *RD,
- QualType ThisTy, SourceLocation Loc,
- const bool ByCopy) {
-
- QualType AdjustedThisTy = ThisTy;
- // The type of the corresponding data member (not a 'this' pointer if 'by
- // copy').
- QualType CaptureThisFieldTy = ThisTy;
- if (ByCopy) {
- // If we are capturing the object referred to by '*this' by copy, ignore any
- // cv qualifiers inherited from the type of the member function for the type
- // of the closure-type's corresponding data member and any use of 'this'.
- CaptureThisFieldTy = ThisTy->getPointeeType();
- CaptureThisFieldTy.removeLocalCVRQualifiers(Qualifiers::CVRMask);
- AdjustedThisTy = Context.getPointerType(CaptureThisFieldTy);
- }
-
- FieldDecl *Field = FieldDecl::Create(
- Context, RD, Loc, Loc, nullptr, CaptureThisFieldTy,
- Context.getTrivialTypeSourceInfo(CaptureThisFieldTy, Loc), nullptr, false,
- ICIS_NoInit);
-
- Field->setImplicit(true);
- Field->setAccess(AS_private);
- RD->addDecl(Field);
- Expr *This =
- new (Context) CXXThisExpr(Loc, ThisTy, /*isImplicit*/ true);
- if (ByCopy) {
- Expr *StarThis = S.CreateBuiltinUnaryOp(Loc,
- UO_Deref,
- This).get();
- InitializedEntity Entity = InitializedEntity::InitializeLambdaCapture(
- nullptr, CaptureThisFieldTy, Loc);
- InitializationKind InitKind = InitializationKind::CreateDirect(Loc, Loc, Loc);
- InitializationSequence Init(S, Entity, InitKind, StarThis);
- ExprResult ER = Init.Perform(S, Entity, InitKind, StarThis);
- if (ER.isInvalid()) return nullptr;
- return ER.get();
- }
- return This;
-}
-
bool Sema::CheckCXXThisCapture(SourceLocation Loc, const bool Explicit,
bool BuildAndDiagnose, const unsigned *const FunctionScopeIndexToStopAt,
const bool ByCopy) {
@@ -1253,29 +1223,25 @@ bool Sema::CheckCXXThisCapture(SourceLocation Loc, const bool Explicit,
dyn_cast<LambdaScopeInfo>(FunctionScopes[MaxFunctionScopesIndex])) &&
"Only a lambda can capture the enclosing object (referred to by "
"*this) by copy");
- // FIXME: We need to delay this marking in PotentiallyPotentiallyEvaluated
- // contexts.
QualType ThisTy = getCurrentThisType();
for (int idx = MaxFunctionScopesIndex; NumCapturingClosures;
--idx, --NumCapturingClosures) {
CapturingScopeInfo *CSI = cast<CapturingScopeInfo>(FunctionScopes[idx]);
- Expr *ThisExpr = nullptr;
-
- if (LambdaScopeInfo *LSI = dyn_cast<LambdaScopeInfo>(CSI)) {
- // For lambda expressions, build a field and an initializing expression,
- // and capture the *enclosing object* by copy only if this is the first
- // iteration.
- ThisExpr = captureThis(*this, Context, LSI->Lambda, ThisTy, Loc,
- ByCopy && idx == MaxFunctionScopesIndex);
- } else if (CapturedRegionScopeInfo *RSI
- = dyn_cast<CapturedRegionScopeInfo>(FunctionScopes[idx]))
- ThisExpr =
- captureThis(*this, Context, RSI->TheRecordDecl, ThisTy, Loc,
- false/*ByCopy*/);
+ // The type of the corresponding data member (not a 'this' pointer if 'by
+ // copy').
+ QualType CaptureType = ThisTy;
+ if (ByCopy) {
+ // If we are capturing the object referred to by '*this' by copy, ignore
+ // any cv qualifiers inherited from the type of the member function for
+ // the type of the closure-type's corresponding data member and any use
+ // of 'this'.
+ CaptureType = ThisTy->getPointeeType();
+ CaptureType.removeLocalCVRQualifiers(Qualifiers::CVRMask);
+ }
bool isNested = NumCapturingClosures > 1;
- CSI->addThisCapture(isNested, Loc, ThisExpr, ByCopy);
+ CSI->addThisCapture(isNested, Loc, CaptureType, ByCopy);
}
return false;
}
@@ -1286,10 +1252,20 @@ ExprResult Sema::ActOnCXXThis(SourceLocation Loc) {
/// which the function is called.
QualType ThisTy = getCurrentThisType();
- if (ThisTy.isNull()) return Diag(Loc, diag::err_invalid_this_use);
+ if (ThisTy.isNull())
+ return Diag(Loc, diag::err_invalid_this_use);
+ return BuildCXXThisExpr(Loc, ThisTy, /*IsImplicit=*/false);
+}
+
+Expr *Sema::BuildCXXThisExpr(SourceLocation Loc, QualType Type,
+ bool IsImplicit) {
+ auto *This = new (Context) CXXThisExpr(Loc, Type, IsImplicit);
+ MarkThisReferenced(This);
+ return This;
+}
- CheckCXXThisCapture(Loc);
- return new (Context) CXXThisExpr(Loc, ThisTy, /*isImplicit=*/false);
+void Sema::MarkThisReferenced(CXXThisExpr *This) {
+ CheckCXXThisCapture(This->getExprLoc());
}
bool Sema::isThisOutsideMemberFunctionBody(QualType BaseType) {
@@ -1666,7 +1642,7 @@ Sema::ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen, MultiExprArg PlacementArgs,
SourceLocation PlacementRParen, SourceRange TypeIdParens,
Declarator &D, Expr *Initializer) {
- Expr *ArraySize = nullptr;
+ Optional<Expr *> ArraySize;
// If the specified type is an array, unwrap it and save the expression.
if (D.getNumTypeObjects() > 0 &&
D.getTypeObject(0).Kind == DeclaratorChunk::Array) {
@@ -1677,7 +1653,7 @@ Sema::ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
if (Chunk.Arr.hasStatic)
return ExprError(Diag(Chunk.Loc, diag::err_static_illegal_in_new)
<< D.getSourceRange());
- if (!Chunk.Arr.NumElts)
+ if (!Chunk.Arr.NumElts && !Initializer)
return ExprError(Diag(Chunk.Loc, diag::err_array_new_needs_size)
<< D.getSourceRange());
@@ -1790,7 +1766,7 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
SourceRange TypeIdParens,
QualType AllocType,
TypeSourceInfo *AllocTypeInfo,
- Expr *ArraySize,
+ Optional<Expr *> ArraySize,
SourceRange DirectInitRange,
Expr *Initializer) {
SourceRange TypeRange = AllocTypeInfo->getTypeLoc().getSourceRange();
@@ -1841,9 +1817,11 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
auto *Deduced = AllocType->getContainedDeducedType();
if (Deduced && isa<DeducedTemplateSpecializationType>(Deduced)) {
if (ArraySize)
- return ExprError(Diag(ArraySize->getExprLoc(),
- diag::err_deduced_class_template_compound_type)
- << /*array*/ 2 << ArraySize->getSourceRange());
+ return ExprError(
+ Diag(ArraySize ? (*ArraySize)->getExprLoc() : TypeRange.getBegin(),
+ diag::err_deduced_class_template_compound_type)
+ << /*array*/ 2
+ << (ArraySize ? (*ArraySize)->getSourceRange() : TypeRange));
InitializedEntity Entity
= InitializedEntity::InitializeNew(StartLoc, AllocType);
@@ -1873,11 +1851,12 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
if (Braced && !getLangOpts().CPlusPlus17)
Diag(Initializer->getBeginLoc(), diag::ext_auto_new_list_init)
<< AllocType << TypeRange;
+ Expr *Deduce = Inits[0];
QualType DeducedType;
- if (DeduceAutoType(AllocTypeInfo, Inits[0], DeducedType) == DAR_Failed)
+ if (DeduceAutoType(AllocTypeInfo, Deduce, DeducedType) == DAR_Failed)
return ExprError(Diag(StartLoc, diag::err_auto_new_deduction_failure)
- << AllocType << Inits[0]->getType()
- << TypeRange << Inits[0]->getSourceRange());
+ << AllocType << Deduce->getType()
+ << TypeRange << Deduce->getSourceRange());
if (DeducedType.isNull())
return ExprError();
AllocType = DeducedType;
@@ -1908,8 +1887,9 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
QualType ResultType = Context.getPointerType(AllocType);
- if (ArraySize && ArraySize->getType()->isNonOverloadPlaceholderType()) {
- ExprResult result = CheckPlaceholderExpr(ArraySize);
+ if (ArraySize && *ArraySize &&
+ (*ArraySize)->getType()->isNonOverloadPlaceholderType()) {
+ ExprResult result = CheckPlaceholderExpr(*ArraySize);
if (result.isInvalid()) return ExprError();
ArraySize = result.get();
}
@@ -1921,19 +1901,19 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
// C++1y [expr.new]p6: The expression [...] is implicitly converted to
// std::size_t.
llvm::Optional<uint64_t> KnownArraySize;
- if (ArraySize && !ArraySize->isTypeDependent()) {
+ if (ArraySize && *ArraySize && !(*ArraySize)->isTypeDependent()) {
ExprResult ConvertedSize;
if (getLangOpts().CPlusPlus14) {
assert(Context.getTargetInfo().getIntWidth() && "Builtin type of size 0?");
- ConvertedSize = PerformImplicitConversion(ArraySize, Context.getSizeType(),
+ ConvertedSize = PerformImplicitConversion(*ArraySize, Context.getSizeType(),
AA_Converting);
if (!ConvertedSize.isInvalid() &&
- ArraySize->getType()->getAs<RecordType>())
+ (*ArraySize)->getType()->getAs<RecordType>())
// Diagnose the compatibility of this conversion.
Diag(StartLoc, diag::warn_cxx98_compat_array_size_conversion)
- << ArraySize->getType() << 0 << "'size_t'";
+ << (*ArraySize)->getType() << 0 << "'size_t'";
} else {
class SizeConvertDiagnoser : public ICEConvertDiagnoser {
protected:
@@ -1987,16 +1967,16 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
: diag::ext_array_size_conversion)
<< T << ConvTy->isEnumeralType() << ConvTy;
}
- } SizeDiagnoser(ArraySize);
+ } SizeDiagnoser(*ArraySize);
- ConvertedSize = PerformContextualImplicitConversion(StartLoc, ArraySize,
+ ConvertedSize = PerformContextualImplicitConversion(StartLoc, *ArraySize,
SizeDiagnoser);
}
if (ConvertedSize.isInvalid())
return ExprError();
ArraySize = ConvertedSize.get();
- QualType SizeType = ArraySize->getType();
+ QualType SizeType = (*ArraySize)->getType();
if (!SizeType->isIntegralOrUnscopedEnumerationType())
return ExprError();
@@ -2008,18 +1988,18 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
// Let's see if this is a constant < 0. If so, we reject it out of hand,
// per CWG1464. Otherwise, if it's not a constant, we must have an
// unparenthesized array type.
- if (!ArraySize->isValueDependent()) {
+ if (!(*ArraySize)->isValueDependent()) {
llvm::APSInt Value;
// We've already performed any required implicit conversion to integer or
// unscoped enumeration type.
// FIXME: Per CWG1464, we are required to check the value prior to
// converting to size_t. This will never find a negative array size in
// C++14 onwards, because Value is always unsigned here!
- if (ArraySize->isIntegerConstantExpr(Value, Context)) {
+ if ((*ArraySize)->isIntegerConstantExpr(Value, Context)) {
if (Value.isSigned() && Value.isNegative()) {
- return ExprError(Diag(ArraySize->getBeginLoc(),
+ return ExprError(Diag((*ArraySize)->getBeginLoc(),
diag::err_typecheck_negative_array_size)
- << ArraySize->getSourceRange());
+ << (*ArraySize)->getSourceRange());
}
if (!AllocType->isDependentType()) {
@@ -2027,15 +2007,15 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
ConstantArrayType::getNumAddressingBits(Context, AllocType, Value);
if (ActiveSizeBits > ConstantArrayType::getMaxSizeBits(Context))
return ExprError(
- Diag(ArraySize->getBeginLoc(), diag::err_array_too_large)
- << Value.toString(10) << ArraySize->getSourceRange());
+ Diag((*ArraySize)->getBeginLoc(), diag::err_array_too_large)
+ << Value.toString(10) << (*ArraySize)->getSourceRange());
}
KnownArraySize = Value.getZExtValue();
} else if (TypeIdParens.isValid()) {
// Can't have dynamic array size when the type-id is in parentheses.
- Diag(ArraySize->getBeginLoc(), diag::ext_new_paren_array_nonconst)
- << ArraySize->getSourceRange()
+ Diag((*ArraySize)->getBeginLoc(), diag::ext_new_paren_array_nonconst)
+ << (*ArraySize)->getSourceRange()
<< FixItHint::CreateRemoval(TypeIdParens.getBegin())
<< FixItHint::CreateRemoval(TypeIdParens.getEnd());
@@ -2058,10 +2038,10 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
AllocationFunctionScope Scope = UseGlobal ? AFS_Global : AFS_Both;
if (!AllocType->isDependentType() &&
!Expr::hasAnyTypeDependentArguments(PlacementArgs) &&
- FindAllocationFunctions(StartLoc,
- SourceRange(PlacementLParen, PlacementRParen),
- Scope, Scope, AllocType, ArraySize, PassAlignment,
- PlacementArgs, OperatorNew, OperatorDelete))
+ FindAllocationFunctions(
+ StartLoc, SourceRange(PlacementLParen, PlacementRParen), Scope, Scope,
+ AllocType, ArraySize.hasValue(), PassAlignment, PlacementArgs,
+ OperatorNew, OperatorDelete))
return ExprError();
// If this is an array allocation, compute whether the usual array
@@ -2154,6 +2134,22 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
FullInit = Binder->getSubExpr();
Initializer = FullInit.get();
+
+ // FIXME: If we have a KnownArraySize, check that the array bound of the
+ // initializer is no greater than that constant value.
+
+ if (ArraySize && !*ArraySize) {
+ auto *CAT = Context.getAsConstantArrayType(Initializer->getType());
+ if (CAT) {
+ // FIXME: Track that the array size was inferred rather than explicitly
+ // specified.
+ ArraySize = IntegerLiteral::Create(
+ Context, CAT->getSize(), Context.getSizeType(), TypeRange.getEnd());
+ } else {
+ Diag(TypeRange.getEnd(), diag::err_new_array_size_unknown_from_init)
+ << Initializer->getSourceRange();
+ }
+ }
}
// Mark the new and delete operators as referenced.
@@ -2168,24 +2164,6 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
MarkFunctionReferenced(StartLoc, OperatorDelete);
}
- // C++0x [expr.new]p17:
- // If the new expression creates an array of objects of class type,
- // access and ambiguity control are done for the destructor.
- QualType BaseAllocType = Context.getBaseElementType(AllocType);
- if (ArraySize && !BaseAllocType->isDependentType()) {
- if (const RecordType *BaseRecordType = BaseAllocType->getAs<RecordType>()) {
- if (CXXDestructorDecl *dtor = LookupDestructor(
- cast<CXXRecordDecl>(BaseRecordType->getDecl()))) {
- MarkFunctionReferenced(StartLoc, dtor);
- CheckDestructorAccess(StartLoc, dtor,
- PDiag(diag::err_access_dtor)
- << BaseAllocType);
- if (DiagnoseUseOfDecl(dtor, StartLoc))
- return ExprError();
- }
- }
- }
-
return CXXNewExpr::Create(Context, UseGlobal, OperatorNew, OperatorDelete,
PassAlignment, UsualArrayDeleteWantsSize,
PlacementArgs, TypeIdParens, ArraySize, initStyle,
@@ -2303,8 +2281,8 @@ static bool resolveAllocationOverload(
}
if (Diagnose) {
- S.Diag(R.getNameLoc(), diag::err_ovl_no_viable_function_in_call)
- << R.getLookupName() << Range;
+ PartialDiagnosticAt PD(R.getNameLoc(), S.PDiag(diag::err_ovl_no_viable_function_in_call)
+ << R.getLookupName() << Range);
// If we have aligned candidates, only note the align_val_t candidates
// from AlignedCandidates and the non-align_val_t candidates from
@@ -2319,31 +2297,34 @@ static bool resolveAllocationOverload(
// This was an overaligned allocation, so list the aligned candidates
// first.
Args.insert(Args.begin() + 1, AlignArg);
- AlignedCandidates->NoteCandidates(S, OCD_AllCandidates, Args, "",
+ AlignedCandidates->NoteCandidates(PD, S, OCD_AllCandidates, Args, "",
R.getNameLoc(), IsAligned);
Args.erase(Args.begin() + 1);
- Candidates.NoteCandidates(S, OCD_AllCandidates, Args, "", R.getNameLoc(),
+ Candidates.NoteCandidates(PD, S, OCD_AllCandidates, Args, "", R.getNameLoc(),
IsUnaligned);
} else {
- Candidates.NoteCandidates(S, OCD_AllCandidates, Args);
+ Candidates.NoteCandidates(PD, S, OCD_AllCandidates, Args);
}
}
return true;
case OR_Ambiguous:
if (Diagnose) {
- S.Diag(R.getNameLoc(), diag::err_ovl_ambiguous_call)
- << R.getLookupName() << Range;
- Candidates.NoteCandidates(S, OCD_ViableCandidates, Args);
+ Candidates.NoteCandidates(
+ PartialDiagnosticAt(R.getNameLoc(),
+ S.PDiag(diag::err_ovl_ambiguous_call)
+ << R.getLookupName() << Range),
+ S, OCD_ViableCandidates, Args);
}
return true;
case OR_Deleted: {
if (Diagnose) {
- S.Diag(R.getNameLoc(), diag::err_ovl_deleted_call)
- << Best->Function->isDeleted() << R.getLookupName()
- << S.getDeletedOrUnavailableSuffix(Best->Function) << Range;
- Candidates.NoteCandidates(S, OCD_AllCandidates, Args);
+ Candidates.NoteCandidates(
+ PartialDiagnosticAt(R.getNameLoc(),
+ S.PDiag(diag::err_ovl_deleted_call)
+ << R.getLookupName() << Range),
+ S, OCD_AllCandidates, Args);
}
return true;
}
@@ -2432,7 +2413,11 @@ bool Sema::FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
}
if (getLangOpts().OpenCLCPlusPlus && R.empty()) {
- Diag(StartLoc, diag::err_openclcxx_not_supported) << "default new";
+ if (PlaceArgs.empty()) {
+ Diag(StartLoc, diag::err_openclcxx_not_supported) << "default new";
+ } else {
+ Diag(StartLoc, diag::err_openclcxx_placement_new);
+ }
return true;
}
@@ -2671,8 +2656,8 @@ void Sema::DeclareGlobalNewDelete() {
if (GlobalNewDeleteDeclared)
return;
- // OpenCL C++ 1.0 s2.9: the implicitly declared new and delete operators
- // are not supported.
+ // The implicitly declared new and delete operators
+ // are not supported in OpenCL.
if (getLangOpts().OpenCLCPlusPlus)
return;
@@ -2798,7 +2783,8 @@ void Sema::DeclareGlobalAllocationFunction(DeclarationName Name,
}
}
- FunctionProtoType::ExtProtoInfo EPI;
+ FunctionProtoType::ExtProtoInfo EPI(Context.getDefaultCallingConvention(
+ /*IsVariadic=*/false, /*IsCXXMethod=*/false, /*IsBuiltin=*/true));
QualType BadAllocType;
bool HasBadAllocExceptionSpec
@@ -3506,22 +3492,26 @@ static bool resolveBuiltinNewDeleteOverload(Sema &S, CallExpr *TheCall,
}
case OR_No_Viable_Function:
- S.Diag(R.getNameLoc(), diag::err_ovl_no_viable_function_in_call)
- << R.getLookupName() << Range;
- Candidates.NoteCandidates(S, OCD_AllCandidates, Args);
+ Candidates.NoteCandidates(
+ PartialDiagnosticAt(R.getNameLoc(),
+ S.PDiag(diag::err_ovl_no_viable_function_in_call)
+ << R.getLookupName() << Range),
+ S, OCD_AllCandidates, Args);
return true;
case OR_Ambiguous:
- S.Diag(R.getNameLoc(), diag::err_ovl_ambiguous_call)
- << R.getLookupName() << Range;
- Candidates.NoteCandidates(S, OCD_ViableCandidates, Args);
+ Candidates.NoteCandidates(
+ PartialDiagnosticAt(R.getNameLoc(),
+ S.PDiag(diag::err_ovl_ambiguous_call)
+ << R.getLookupName() << Range),
+ S, OCD_ViableCandidates, Args);
return true;
case OR_Deleted: {
- S.Diag(R.getNameLoc(), diag::err_ovl_deleted_call)
- << Best->Function->isDeleted() << R.getLookupName()
- << S.getDeletedOrUnavailableSuffix(Best->Function) << Range;
- Candidates.NoteCandidates(S, OCD_AllCandidates, Args);
+ Candidates.NoteCandidates(
+ PartialDiagnosticAt(R.getNameLoc(), S.PDiag(diag::err_ovl_deleted_call)
+ << R.getLookupName() << Range),
+ S, OCD_AllCandidates, Args);
return true;
}
}
@@ -3647,12 +3637,9 @@ ExprResult Sema::CheckConditionVariable(VarDecl *ConditionVar,
diag::err_invalid_use_of_array_type)
<< ConditionVar->getSourceRange());
- ExprResult Condition = DeclRefExpr::Create(
- Context, NestedNameSpecifierLoc(), SourceLocation(), ConditionVar,
- /*enclosing*/ false, ConditionVar->getLocation(),
- ConditionVar->getType().getNonReferenceType(), VK_LValue);
-
- MarkDeclRefReferenced(cast<DeclRefExpr>(Condition.get()));
+ ExprResult Condition = BuildDeclRefExpr(
+ ConditionVar, ConditionVar->getType().getNonReferenceType(), VK_LValue,
+ ConditionVar->getLocation());
switch (CK) {
case ConditionKind::Boolean:
@@ -4229,7 +4216,15 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
break;
case ICK_Block_Pointer_Conversion: {
- From = ImpCastExprToType(From, ToType.getUnqualifiedType(), CK_BitCast,
+ LangAS AddrSpaceL =
+ ToType->castAs<BlockPointerType>()->getPointeeType().getAddressSpace();
+ LangAS AddrSpaceR =
+ FromType->castAs<BlockPointerType>()->getPointeeType().getAddressSpace();
+ assert(Qualifiers::isAddressSpaceSupersetOf(AddrSpaceL, AddrSpaceR) &&
+ "Invalid cast");
+ CastKind Kind =
+ AddrSpaceL != AddrSpaceR ? CK_AddressSpaceConversion : CK_BitCast;
+ From = ImpCastExprToType(From, ToType.getUnqualifiedType(), Kind,
VK_RValue, /*BasePath=*/nullptr, CCK).get();
break;
}
@@ -5096,8 +5091,15 @@ static bool EvaluateBinaryTypeTrait(Sema &Self, TypeTrait BTT, QualType LhsT,
assert(Self.Context.hasSameUnqualifiedType(LhsT, RhsT)
== (lhsRecord == rhsRecord));
+ // Unions are never base classes, and never have base classes.
+ // It doesn't matter if they are complete or not. See PR#41843
+ if (lhsRecord && lhsRecord->getDecl()->isUnion())
+ return false;
+ if (rhsRecord && rhsRecord->getDecl()->isUnion())
+ return false;
+
if (lhsRecord == rhsRecord)
- return !lhsRecord->getDecl()->isUnion();
+ return true;
// C++0x [meta.rel]p2:
// If Base and Derived are class types and are different types
@@ -6019,6 +6021,8 @@ mergeExceptionSpecs(Sema &S, FunctionProtoType::ExceptionSpecInfo ESI1,
if (EST2 == EST_NoexceptFalse) return ESI2;
// If either of them is non-throwing, the result is the other.
+ if (EST1 == EST_NoThrow) return ESI2;
+ if (EST2 == EST_NoThrow) return ESI1;
if (EST1 == EST_DynamicNone) return ESI2;
if (EST2 == EST_DynamicNone) return ESI1;
if (EST1 == EST_BasicNoexcept) return ESI2;
@@ -6047,6 +6051,7 @@ mergeExceptionSpecs(Sema &S, FunctionProtoType::ExceptionSpecInfo ESI1,
case EST_DependentNoexcept:
case EST_NoexceptFalse:
case EST_NoexceptTrue:
+ case EST_NoThrow:
llvm_unreachable("handled above");
case EST_Dynamic: {
@@ -6765,9 +6770,12 @@ ExprResult Sema::ActOnStartCXXMemberReference(Scope *S, Expr *Base,
FirstIteration = false;
}
- if (OpKind == tok::arrow &&
- (BaseType->isPointerType() || BaseType->isObjCObjectPointerType()))
- BaseType = BaseType->getPointeeType();
+ if (OpKind == tok::arrow) {
+ if (BaseType->isPointerType())
+ BaseType = BaseType->getPointeeType();
+ else if (auto *AT = Context.getAsArrayType(BaseType))
+ BaseType = AT->getElementType();
+ }
}
// Objective-C properties allow "." access on Objective-C pointer types,
@@ -6855,8 +6863,9 @@ canRecoverDotPseudoDestructorCallsOnPointerObjects(Sema &SemaRef,
QualType DestructedType) {
// If this is a record type, check if its destructor is callable.
if (auto *RD = DestructedType->getAsCXXRecordDecl()) {
- if (CXXDestructorDecl *D = SemaRef.LookupDestructor(RD))
- return SemaRef.CanUseDecl(D, /*TreatUnavailableAsInvalid=*/false);
+ if (RD->hasDefinition())
+ if (CXXDestructorDecl *D = SemaRef.LookupDestructor(RD))
+ return SemaRef.CanUseDecl(D, /*TreatUnavailableAsInvalid=*/false);
return false;
}
@@ -7048,7 +7057,8 @@ ExprResult Sema::ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
TemplateIdAnnotation *TemplateId = SecondTypeName.TemplateId;
ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(),
TemplateId->NumArgs);
- TypeResult T = ActOnTemplateIdType(TemplateId->SS,
+ TypeResult T = ActOnTemplateIdType(S,
+ TemplateId->SS,
TemplateId->TemplateKWLoc,
TemplateId->Template,
TemplateId->Name,
@@ -7100,7 +7110,8 @@ ExprResult Sema::ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
TemplateIdAnnotation *TemplateId = FirstTypeName.TemplateId;
ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(),
TemplateId->NumArgs);
- TypeResult T = ActOnTemplateIdType(TemplateId->SS,
+ TypeResult T = ActOnTemplateIdType(S,
+ TemplateId->SS,
TemplateId->TemplateKWLoc,
TemplateId->Template,
TemplateId->Name,
@@ -7190,12 +7201,12 @@ ExprResult Sema::BuildCXXMemberCallExpr(Expr *E, NamedDecl *FoundDecl,
}
}
- MemberExpr *ME = new (Context) MemberExpr(
- Exp.get(), /*IsArrow=*/false, SourceLocation(), Method, SourceLocation(),
- Context.BoundMemberTy, VK_RValue, OK_Ordinary);
- if (HadMultipleCandidates)
- ME->setHadMultipleCandidates(true);
- MarkMemberReferenced(ME);
+ MemberExpr *ME =
+ BuildMemberExpr(Exp.get(), /*IsArrow=*/false, SourceLocation(),
+ NestedNameSpecifierLoc(), SourceLocation(), Method,
+ DeclAccessPair::make(FoundDecl, FoundDecl->getAccess()),
+ HadMultipleCandidates, DeclarationNameInfo(),
+ Context.BoundMemberTy, VK_RValue, OK_Ordinary);
QualType ResultType = Method->getReturnType();
ExprValueKind VK = Expr::getValueKindForType(ResultType);
@@ -7396,7 +7407,7 @@ static inline bool VariableCanNeverBeAConstantExpression(VarDecl *Var,
return false;
}
- return !IsVariableAConstantExpression(Var, Context);
+ return !Var->isUsableInConstantExpressions(Context);
}
/// Check if the current lambda has any potential captures
@@ -7425,12 +7436,7 @@ static void CheckIfAnyEnclosingLambdasMustCaptureAnyPotentialCaptures(
// All the potentially captureable variables in the current nested
// lambda (within a generic outer lambda), must be captured by an
// outer lambda that is enclosed within a non-dependent context.
- const unsigned NumPotentialCaptures =
- CurrentLSI->getNumPotentialVariableCaptures();
- for (unsigned I = 0; I != NumPotentialCaptures; ++I) {
- Expr *VarExpr = nullptr;
- VarDecl *Var = nullptr;
- CurrentLSI->getPotentialVariableCapture(I, Var, VarExpr);
+ CurrentLSI->visitPotentialCaptures([&] (VarDecl *Var, Expr *VarExpr) {
// If the variable is clearly identified as non-odr-used and the full
// expression is not instantiation dependent, only then do we not
// need to check enclosing lambda's for speculative captures.
@@ -7444,17 +7450,15 @@ static void CheckIfAnyEnclosingLambdasMustCaptureAnyPotentialCaptures(
// }
if (CurrentLSI->isVariableExprMarkedAsNonODRUsed(VarExpr) &&
!IsFullExprInstantiationDependent)
- continue;
+ return;
// If we have a capture-capable lambda for the variable, go ahead and
// capture the variable in that lambda (and all its enclosing lambdas).
if (const Optional<unsigned> Index =
getStackIndexOfNearestEnclosingCaptureCapableLambda(
- S.FunctionScopes, Var, S)) {
- const unsigned FunctionScopeIndexOfCapturableLambda = Index.getValue();
- MarkVarDeclODRUsed(Var, VarExpr->getExprLoc(), S,
- &FunctionScopeIndexOfCapturableLambda);
- }
+ S.FunctionScopes, Var, S))
+ S.MarkCaptureUsedInEnclosingContext(Var, VarExpr->getExprLoc(),
+ Index.getValue());
const bool IsVarNeverAConstantExpression =
VariableCanNeverBeAConstantExpression(Var, S.Context);
if (!IsFullExprInstantiationDependent || IsVarNeverAConstantExpression) {
@@ -7478,7 +7482,7 @@ static void CheckIfAnyEnclosingLambdasMustCaptureAnyPotentialCaptures(
DeclRefType, nullptr);
}
}
- }
+ });
// Check if 'this' needs to be captured.
if (CurrentLSI->hasPotentialThisCapture()) {
diff --git a/lib/Sema/SemaExprMember.cpp b/lib/Sema/SemaExprMember.cpp
index b2b21ba9eefa..c856e37e99e7 100644
--- a/lib/Sema/SemaExprMember.cpp
+++ b/lib/Sema/SemaExprMember.cpp
@@ -1,9 +1,8 @@
//===--- SemaExprMember.cpp - Semantic Analysis for Expressions -----------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -591,7 +590,7 @@ namespace {
// Callback to only accept typo corrections that are either a ValueDecl or a
// FunctionTemplateDecl and are declared in the current record or, for a C++
// classes, one of its base classes.
-class RecordMemberExprValidatorCCC : public CorrectionCandidateCallback {
+class RecordMemberExprValidatorCCC final : public CorrectionCandidateCallback {
public:
explicit RecordMemberExprValidatorCCC(const RecordType *RTy)
: Record(RTy->getDecl()) {
@@ -629,6 +628,10 @@ public:
return false;
}
+ std::unique_ptr<CorrectionCandidateCallback> clone() override {
+ return llvm::make_unique<RecordMemberExprValidatorCCC>(*this);
+ }
+
private:
const RecordDecl *const Record;
};
@@ -697,9 +700,9 @@ static bool LookupMemberExprInRecord(Sema &SemaRef, LookupResult &R,
};
QueryState Q = {R.getSema(), R.getLookupNameInfo(), R.getLookupKind(),
R.redeclarationKind()};
+ RecordMemberExprValidatorCCC CCC(RTy);
TE = SemaRef.CorrectTypoDelayed(
- R.getLookupNameInfo(), R.getLookupKind(), nullptr, &SS,
- llvm::make_unique<RecordMemberExprValidatorCCC>(RTy),
+ R.getLookupNameInfo(), R.getLookupKind(), nullptr, &SS, CCC,
[=, &SemaRef](const TypoCorrection &TC) {
if (TC) {
assert(!TC.isKeyword() &&
@@ -890,18 +893,32 @@ BuildMSPropertyRefExpr(Sema &S, Expr *BaseExpr, bool IsArrow,
NameInfo.getLoc());
}
-/// Build a MemberExpr AST node.
-static MemberExpr *BuildMemberExpr(
- Sema &SemaRef, ASTContext &C, Expr *Base, bool isArrow,
- SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
- ValueDecl *Member, DeclAccessPair FoundDecl,
- const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK,
- ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr) {
- assert((!isArrow || Base->isRValue()) && "-> base must be a pointer rvalue");
- MemberExpr *E = MemberExpr::Create(
- C, Base, isArrow, OpLoc, SS.getWithLocInContext(C), TemplateKWLoc, Member,
- FoundDecl, MemberNameInfo, TemplateArgs, Ty, VK, OK);
- SemaRef.MarkMemberReferenced(E);
+MemberExpr *Sema::BuildMemberExpr(
+ Expr *Base, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec *SS,
+ SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl,
+ bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo,
+ QualType Ty, ExprValueKind VK, ExprObjectKind OK,
+ const TemplateArgumentListInfo *TemplateArgs) {
+ NestedNameSpecifierLoc NNS =
+ SS ? SS->getWithLocInContext(Context) : NestedNameSpecifierLoc();
+ return BuildMemberExpr(Base, IsArrow, OpLoc, NNS, TemplateKWLoc, Member,
+ FoundDecl, HadMultipleCandidates, MemberNameInfo, Ty,
+ VK, OK, TemplateArgs);
+}
+
+MemberExpr *Sema::BuildMemberExpr(
+ Expr *Base, bool IsArrow, SourceLocation OpLoc, NestedNameSpecifierLoc NNS,
+ SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl,
+ bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo,
+ QualType Ty, ExprValueKind VK, ExprObjectKind OK,
+ const TemplateArgumentListInfo *TemplateArgs) {
+ assert((!IsArrow || Base->isRValue()) && "-> base must be a pointer rvalue");
+ MemberExpr *E =
+ MemberExpr::Create(Context, Base, IsArrow, OpLoc, NNS, TemplateKWLoc,
+ Member, FoundDecl, MemberNameInfo, TemplateArgs, Ty,
+ VK, OK, getNonOdrUseReasonInCurrentContext(Member));
+ E->setHadMultipleCandidates(HadMultipleCandidates);
+ MarkMemberReferenced(E);
return E;
}
@@ -1089,8 +1106,7 @@ Sema::BuildMemberReferenceExpr(Expr *BaseExpr, QualType BaseExprType,
SourceLocation Loc = R.getNameLoc();
if (SS.getRange().isValid())
Loc = SS.getRange().getBegin();
- CheckCXXThisCapture(Loc);
- BaseExpr = new (Context) CXXThisExpr(Loc, BaseExprType,/*isImplicit=*/true);
+ BaseExpr = BuildCXXThisExpr(Loc, BaseExprType, /*IsImplicit=*/true);
}
// Check the use of this member.
@@ -1113,10 +1129,10 @@ Sema::BuildMemberReferenceExpr(Expr *BaseExpr, QualType BaseExprType,
OpLoc);
if (VarDecl *Var = dyn_cast<VarDecl>(MemberDecl)) {
- return BuildMemberExpr(*this, Context, BaseExpr, IsArrow, OpLoc, SS,
- TemplateKWLoc, Var, FoundDecl, MemberNameInfo,
- Var->getType().getNonReferenceType(), VK_LValue,
- OK_Ordinary);
+ return BuildMemberExpr(BaseExpr, IsArrow, OpLoc, &SS, TemplateKWLoc, Var,
+ FoundDecl, /*HadMultipleCandidates=*/false,
+ MemberNameInfo, Var->getType().getNonReferenceType(),
+ VK_LValue, OK_Ordinary);
}
if (CXXMethodDecl *MemberFn = dyn_cast<CXXMethodDecl>(MemberDecl)) {
@@ -1130,24 +1146,25 @@ Sema::BuildMemberReferenceExpr(Expr *BaseExpr, QualType BaseExprType,
type = MemberFn->getType();
}
- return BuildMemberExpr(*this, Context, BaseExpr, IsArrow, OpLoc, SS,
- TemplateKWLoc, MemberFn, FoundDecl, MemberNameInfo,
- type, valueKind, OK_Ordinary);
+ return BuildMemberExpr(BaseExpr, IsArrow, OpLoc, &SS, TemplateKWLoc,
+ MemberFn, FoundDecl, /*HadMultipleCandidates=*/false,
+ MemberNameInfo, type, valueKind, OK_Ordinary);
}
assert(!isa<FunctionDecl>(MemberDecl) && "member function not C++ method?");
if (EnumConstantDecl *Enum = dyn_cast<EnumConstantDecl>(MemberDecl)) {
- return BuildMemberExpr(*this, Context, BaseExpr, IsArrow, OpLoc, SS,
- TemplateKWLoc, Enum, FoundDecl, MemberNameInfo,
- Enum->getType(), VK_RValue, OK_Ordinary);
+ return BuildMemberExpr(BaseExpr, IsArrow, OpLoc, &SS, TemplateKWLoc, Enum,
+ FoundDecl, /*HadMultipleCandidates=*/false,
+ MemberNameInfo, Enum->getType(), VK_RValue,
+ OK_Ordinary);
}
if (VarTemplateDecl *VarTempl = dyn_cast<VarTemplateDecl>(MemberDecl)) {
if (VarDecl *Var = getVarTemplateSpecialization(
*this, VarTempl, TemplateArgs, MemberNameInfo, TemplateKWLoc))
- return BuildMemberExpr(*this, Context, BaseExpr, IsArrow, OpLoc, SS,
- TemplateKWLoc, Var, FoundDecl, MemberNameInfo,
- Var->getType().getNonReferenceType(), VK_LValue,
- OK_Ordinary);
+ return BuildMemberExpr(
+ BaseExpr, IsArrow, OpLoc, &SS, TemplateKWLoc, Var, FoundDecl,
+ /*HadMultipleCandidates=*/false, MemberNameInfo,
+ Var->getType().getNonReferenceType(), VK_LValue, OK_Ordinary);
return ExprError();
}
@@ -1332,11 +1349,11 @@ static ExprResult LookupMemberExpr(Sema &S, LookupResult &R,
if (!IV) {
// Attempt to correct for typos in ivar names.
- auto Validator = llvm::make_unique<DeclFilterCCC<ObjCIvarDecl>>();
- Validator->IsObjCIvarLookup = IsArrow;
+ DeclFilterCCC<ObjCIvarDecl> Validator{};
+ Validator.IsObjCIvarLookup = IsArrow;
if (TypoCorrection Corrected = S.CorrectTypo(
R.getLookupNameInfo(), Sema::LookupMemberName, nullptr, nullptr,
- std::move(Validator), Sema::CTK_ErrorRecovery, IDecl)) {
+ Validator, Sema::CTK_ErrorRecovery, IDecl)) {
IV = Corrected.getCorrectionDeclAs<ObjCIvarDecl>();
S.diagnoseTypo(
Corrected,
@@ -1803,9 +1820,10 @@ Sema::BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow,
}
}
- return BuildMemberExpr(*this, Context, Base.get(), IsArrow, OpLoc, SS,
+ return BuildMemberExpr(Base.get(), IsArrow, OpLoc, &SS,
/*TemplateKWLoc=*/SourceLocation(), Field, FoundDecl,
- MemberNameInfo, MemberType, VK, OK);
+ /*HadMultipleCandidates=*/false, MemberNameInfo,
+ MemberType, VK, OK);
}
/// Builds an implicit member access expression. The current context
@@ -1833,8 +1851,7 @@ Sema::BuildImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation Loc = R.getNameLoc();
if (SS.getRange().isValid())
Loc = SS.getRange().getBegin();
- CheckCXXThisCapture(Loc);
- baseExpr = new (Context) CXXThisExpr(loc, ThisTy, /*isImplicit=*/true);
+ baseExpr = BuildCXXThisExpr(loc, ThisTy, /*IsImplicit=*/true);
}
return BuildMemberReferenceExpr(baseExpr, ThisTy,
diff --git a/lib/Sema/SemaExprObjC.cpp b/lib/Sema/SemaExprObjC.cpp
index ed780efd4cf3..040cfdd30c7a 100644
--- a/lib/Sema/SemaExprObjC.cpp
+++ b/lib/Sema/SemaExprObjC.cpp
@@ -1,9 +1,8 @@
//===--- SemaExprObjC.cpp - Semantic Analysis for ObjC Expressions --------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -26,6 +25,7 @@
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/ConvertUTF.h"
using namespace clang;
using namespace sema;
@@ -525,6 +525,30 @@ ExprResult Sema::BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr) {
NSStringPointer = Context.getObjCObjectPointerType(NSStringObject);
}
+ // The boxed expression can be emitted as a compile time constant if it is
+ // a string literal whose character encoding is compatible with UTF-8.
+ if (auto *CE = dyn_cast<ImplicitCastExpr>(ValueExpr))
+ if (CE->getCastKind() == CK_ArrayToPointerDecay)
+ if (auto *SL =
+ dyn_cast<StringLiteral>(CE->getSubExpr()->IgnoreParens())) {
+ assert((SL->isAscii() || SL->isUTF8()) &&
+ "unexpected character encoding");
+ StringRef Str = SL->getString();
+ const llvm::UTF8 *StrBegin = Str.bytes_begin();
+ const llvm::UTF8 *StrEnd = Str.bytes_end();
+ // Check that this is a valid UTF-8 string.
+ if (llvm::isLegalUTF8String(&StrBegin, StrEnd)) {
+ BoxedType = Context.getAttributedType(
+ AttributedType::getNullabilityAttrKind(
+ NullabilityKind::NonNull),
+ NSStringPointer, NSStringPointer);
+ return new (Context) ObjCBoxedExpr(CE, BoxedType, nullptr, SR);
+ }
+
+ Diag(SL->getBeginLoc(), diag::warn_objc_boxing_invalid_utf8_string)
+ << NSStringPointer << SL->getSourceRange();
+ }
+
if (!StringWithUTF8StringMethod) {
IdentifierInfo *II = &Context.Idents.get("stringWithUTF8String");
Selector stringWithUTF8String = Context.Selectors.getUnarySelector(II);
@@ -1073,12 +1097,7 @@ ExprResult Sema::BuildObjCEncodeExpression(SourceLocation AtLoc,
// The type of @encode is the same as the type of the corresponding string,
// which is an array type.
- StrTy = Context.CharTy;
- // A C++ string literal has a const-qualified element type (C++ 2.13.4p1).
- if (getLangOpts().CPlusPlus || getLangOpts().ConstStrings)
- StrTy.addConst();
- StrTy = Context.getConstantArrayType(StrTy, llvm::APInt(32, Str.size()+1),
- ArrayType::Normal, 0);
+ StrTy = Context.getStringLiteralArrayType(Context.CharTy, Str.size());
}
return new (Context) ObjCEncodeExpr(StrTy, EncodedTypeInfo, AtLoc, RParenLoc);
@@ -1922,11 +1941,10 @@ HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
}
// Attempt to correct for typos in property names.
- if (TypoCorrection Corrected =
- CorrectTypo(DeclarationNameInfo(MemberName, MemberLoc),
- LookupOrdinaryName, nullptr, nullptr,
- llvm::make_unique<DeclFilterCCC<ObjCPropertyDecl>>(),
- CTK_ErrorRecovery, IFace, false, OPT)) {
+ DeclFilterCCC<ObjCPropertyDecl> CCC{};
+ if (TypoCorrection Corrected = CorrectTypo(
+ DeclarationNameInfo(MemberName, MemberLoc), LookupOrdinaryName,
+ nullptr, nullptr, CCC, CTK_ErrorRecovery, IFace, false, OPT)) {
DeclarationName TypoResult = Corrected.getCorrection();
if (TypoResult.isIdentifier() &&
TypoResult.getAsIdentifierInfo() == Member) {
@@ -2083,7 +2101,7 @@ ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
namespace {
-class ObjCInterfaceOrSuperCCC : public CorrectionCandidateCallback {
+class ObjCInterfaceOrSuperCCC final : public CorrectionCandidateCallback {
public:
ObjCInterfaceOrSuperCCC(ObjCMethodDecl *Method) {
// Determine whether "super" is acceptable in the current context.
@@ -2095,6 +2113,10 @@ class ObjCInterfaceOrSuperCCC : public CorrectionCandidateCallback {
return candidate.getCorrectionDeclAs<ObjCInterfaceDecl>() ||
candidate.isKeyword("super");
}
+
+ std::unique_ptr<CorrectionCandidateCallback> clone() override {
+ return llvm::make_unique<ObjCInterfaceOrSuperCCC>(*this);
+ }
};
} // end anonymous namespace
@@ -2170,9 +2192,9 @@ Sema::ObjCMessageKind Sema::getObjCMessageKind(Scope *S,
}
}
+ ObjCInterfaceOrSuperCCC CCC(getCurMethodDecl());
if (TypoCorrection Corrected = CorrectTypo(
- Result.getLookupNameInfo(), Result.getLookupKind(), S, nullptr,
- llvm::make_unique<ObjCInterfaceOrSuperCCC>(getCurMethodDecl()),
+ Result.getLookupNameInfo(), Result.getLookupKind(), S, nullptr, CCC,
CTK_ErrorRecovery, nullptr, false, nullptr, false)) {
if (Corrected.isKeyword()) {
// If we've found the keyword "super" (the only keyword that would be
@@ -2806,8 +2828,8 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
} else {
if (ObjCMethodDecl *CurMeth = getCurMethodDecl()) {
if (ObjCInterfaceDecl *ClassDecl = CurMeth->getClassInterface()) {
- // FIXME: Is this correct? Why are we assuming that a message to
- // Class will call a method in the current interface?
+ // As a guess, try looking for the method in the current interface.
+ // This very well may not produce the "right" method.
// First check the public methods in the class interface.
Method = ClassDecl->lookupClassMethod(Sel);
@@ -2815,8 +2837,7 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
if (!Method)
Method = ClassDecl->lookupPrivateClassMethod(Sel);
- if (Method && DiagnoseUseOfDecl(Method, SelectorSlotLocs, nullptr,
- false, false, ClassDecl))
+ if (Method && DiagnoseUseOfDecl(Method, SelectorSlotLocs))
return ExprError();
}
}
@@ -4333,23 +4354,22 @@ Expr *Sema::stripARCUnbridgedCast(Expr *e) {
assert(!gse->isResultDependent());
unsigned n = gse->getNumAssocs();
- SmallVector<Expr*, 4> subExprs(n);
- SmallVector<TypeSourceInfo*, 4> subTypes(n);
- for (unsigned i = 0; i != n; ++i) {
- subTypes[i] = gse->getAssocTypeSourceInfo(i);
- Expr *sub = gse->getAssocExpr(i);
- if (i == gse->getResultIndex())
+ SmallVector<Expr *, 4> subExprs;
+ SmallVector<TypeSourceInfo *, 4> subTypes;
+ subExprs.reserve(n);
+ subTypes.reserve(n);
+ for (const GenericSelectionExpr::Association &assoc : gse->associations()) {
+ subTypes.push_back(assoc.getTypeSourceInfo());
+ Expr *sub = assoc.getAssociationExpr();
+ if (assoc.isSelected())
sub = stripARCUnbridgedCast(sub);
- subExprs[i] = sub;
+ subExprs.push_back(sub);
}
- return new (Context) GenericSelectionExpr(Context, gse->getGenericLoc(),
- gse->getControllingExpr(),
- subTypes, subExprs,
- gse->getDefaultLoc(),
- gse->getRParenLoc(),
- gse->containsUnexpandedParameterPack(),
- gse->getResultIndex());
+ return GenericSelectionExpr::Create(
+ Context, gse->getGenericLoc(), gse->getControllingExpr(), subTypes,
+ subExprs, gse->getDefaultLoc(), gse->getRParenLoc(),
+ gse->containsUnexpandedParameterPack(), gse->getResultIndex());
} else {
assert(isa<ImplicitCastExpr>(e) && "bad form of unbridged cast!");
return cast<ImplicitCastExpr>(e)->getSubExpr();
diff --git a/lib/Sema/SemaFixItUtils.cpp b/lib/Sema/SemaFixItUtils.cpp
index 714fbedf09bc..41a7a90a3727 100644
--- a/lib/Sema/SemaFixItUtils.cpp
+++ b/lib/Sema/SemaFixItUtils.cpp
@@ -1,9 +1,8 @@
//===--- SemaFixItUtils.cpp - Sema FixIts ---------------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Sema/SemaInit.cpp b/lib/Sema/SemaInit.cpp
index 10c0c6bf33b3..bc1069609336 100644
--- a/lib/Sema/SemaInit.cpp
+++ b/lib/Sema/SemaInit.cpp
@@ -1,9 +1,8 @@
//===--- SemaInit.cpp - Semantic Analysis for Initializers ----------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -145,16 +144,43 @@ static StringInitFailureKind IsStringInit(Expr *init, QualType declType,
static void updateStringLiteralType(Expr *E, QualType Ty) {
while (true) {
E->setType(Ty);
- if (isa<StringLiteral>(E) || isa<ObjCEncodeExpr>(E))
+ E->setValueKind(VK_RValue);
+ if (isa<StringLiteral>(E) || isa<ObjCEncodeExpr>(E)) {
break;
- else if (ParenExpr *PE = dyn_cast<ParenExpr>(E))
+ } else if (ParenExpr *PE = dyn_cast<ParenExpr>(E)) {
E = PE->getSubExpr();
- else if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E))
+ } else if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
+ assert(UO->getOpcode() == UO_Extension);
E = UO->getSubExpr();
- else if (GenericSelectionExpr *GSE = dyn_cast<GenericSelectionExpr>(E))
+ } else if (GenericSelectionExpr *GSE = dyn_cast<GenericSelectionExpr>(E)) {
E = GSE->getResultExpr();
- else
+ } else if (ChooseExpr *CE = dyn_cast<ChooseExpr>(E)) {
+ E = CE->getChosenSubExpr();
+ } else {
llvm_unreachable("unexpected expr in string literal init");
+ }
+ }
+}
+
+/// Fix a compound literal initializing an array so it's correctly marked
+/// as an rvalue.
+static void updateGNUCompoundLiteralRValue(Expr *E) {
+ while (true) {
+ E->setValueKind(VK_RValue);
+ if (isa<CompoundLiteralExpr>(E)) {
+ break;
+ } else if (ParenExpr *PE = dyn_cast<ParenExpr>(E)) {
+ E = PE->getSubExpr();
+ } else if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
+ assert(UO->getOpcode() == UO_Extension);
+ E = UO->getSubExpr();
+ } else if (GenericSelectionExpr *GSE = dyn_cast<GenericSelectionExpr>(E)) {
+ E = GSE->getResultExpr();
+ } else if (ChooseExpr *CE = dyn_cast<ChooseExpr>(E)) {
+ E = CE->getChosenSubExpr();
+ } else {
+ llvm_unreachable("unexpected expr in array compound literal init");
+ }
}
}
@@ -1681,6 +1707,30 @@ void InitListChecker::CheckVectorType(const InitializedEntity &Entity,
}
}
+/// Check if the type of a class element has an accessible destructor, and marks
+/// it referenced. Returns true if we shouldn't form a reference to the
+/// destructor.
+///
+/// Aggregate initialization requires a class element's destructor be
+/// accessible per 11.6.1 [dcl.init.aggr]:
+///
+/// The destructor for each element of class type is potentially invoked
+/// (15.4 [class.dtor]) from the context where the aggregate initialization
+/// occurs.
+static bool checkDestructorReference(QualType ElementType, SourceLocation Loc,
+ Sema &SemaRef) {
+ auto *CXXRD = ElementType->getAsCXXRecordDecl();
+ if (!CXXRD)
+ return false;
+
+ CXXDestructorDecl *Destructor = SemaRef.LookupDestructor(CXXRD);
+ SemaRef.CheckDestructorAccess(Loc, Destructor,
+ SemaRef.PDiag(diag::err_access_dtor_temp)
+ << ElementType);
+ SemaRef.MarkFunctionReferenced(Loc, Destructor);
+ return SemaRef.DiagnoseUseOfDecl(Destructor, Loc);
+}
+
void InitListChecker::CheckArrayType(const InitializedEntity &Entity,
InitListExpr *IList, QualType &DeclType,
llvm::APSInt elementIndex,
@@ -1690,6 +1740,14 @@ void InitListChecker::CheckArrayType(const InitializedEntity &Entity,
unsigned &StructuredIndex) {
const ArrayType *arrayType = SemaRef.Context.getAsArrayType(DeclType);
+ if (!VerifyOnly) {
+ if (checkDestructorReference(arrayType->getElementType(),
+ IList->getEndLoc(), SemaRef)) {
+ hadError = true;
+ return;
+ }
+ }
+
// Check for the special-case of initializing an array with a string.
if (Index < IList->getNumInits()) {
if (IsStringInit(IList->getInit(Index), arrayType, SemaRef.Context) ==
@@ -1851,30 +1909,6 @@ bool InitListChecker::CheckFlexibleArrayInit(const InitializedEntity &Entity,
return FlexArrayDiag != diag::ext_flexible_array_init;
}
-/// Check if the type of a class element has an accessible destructor.
-///
-/// Aggregate initialization requires a class element's destructor be
-/// accessible per 11.6.1 [dcl.init.aggr]:
-///
-/// The destructor for each element of class type is potentially invoked
-/// (15.4 [class.dtor]) from the context where the aggregate initialization
-/// occurs.
-static bool hasAccessibleDestructor(QualType ElementType, SourceLocation Loc,
- Sema &SemaRef) {
- auto *CXXRD = ElementType->getAsCXXRecordDecl();
- if (!CXXRD)
- return false;
-
- CXXDestructorDecl *Destructor = SemaRef.LookupDestructor(CXXRD);
- SemaRef.CheckDestructorAccess(Loc, Destructor,
- SemaRef.PDiag(diag::err_access_dtor_temp)
- << ElementType);
- SemaRef.MarkFunctionReferenced(Loc, Destructor);
- if (SemaRef.DiagnoseUseOfDecl(Destructor, Loc))
- return true;
- return false;
-}
-
void InitListChecker::CheckStructUnionTypes(
const InitializedEntity &Entity, InitListExpr *IList, QualType DeclType,
CXXRecordDecl::base_class_range Bases, RecordDecl::field_iterator Field,
@@ -1898,7 +1932,7 @@ void InitListChecker::CheckStructUnionTypes(
if (!VerifyOnly)
for (FieldDecl *FD : RD->fields()) {
QualType ET = SemaRef.Context.getBaseElementType(FD->getType());
- if (hasAccessibleDestructor(ET, IList->getEndLoc(), SemaRef)) {
+ if (checkDestructorReference(ET, IList->getEndLoc(), SemaRef)) {
hadError = true;
return;
}
@@ -1958,7 +1992,7 @@ void InitListChecker::CheckStructUnionTypes(
}
if (!VerifyOnly)
- if (hasAccessibleDestructor(Base.getType(), InitLoc, SemaRef)) {
+ if (checkDestructorReference(Base.getType(), InitLoc, SemaRef)) {
hadError = true;
return;
}
@@ -2000,7 +2034,7 @@ void InitListChecker::CheckStructUnionTypes(
while (std::next(F) != Field)
++F;
QualType ET = SemaRef.Context.getBaseElementType(F->getType());
- if (hasAccessibleDestructor(ET, InitLoc, SemaRef)) {
+ if (checkDestructorReference(ET, InitLoc, SemaRef)) {
hadError = true;
return;
}
@@ -2049,7 +2083,7 @@ void InitListChecker::CheckStructUnionTypes(
if (!VerifyOnly) {
QualType ET = SemaRef.Context.getBaseElementType(Field->getType());
- if (hasAccessibleDestructor(ET, InitLoc, SemaRef)) {
+ if (checkDestructorReference(ET, InitLoc, SemaRef)) {
hadError = true;
return;
}
@@ -2105,7 +2139,7 @@ void InitListChecker::CheckStructUnionTypes(
: Field;
for (RecordDecl::field_iterator E = RD->field_end(); I != E; ++I) {
QualType ET = SemaRef.Context.getBaseElementType(I->getType());
- if (hasAccessibleDestructor(ET, IList->getEndLoc(), SemaRef)) {
+ if (checkDestructorReference(ET, IList->getEndLoc(), SemaRef)) {
hadError = true;
return;
}
@@ -2182,7 +2216,7 @@ namespace {
// Callback to only accept typo corrections that are for field members of
// the given struct or union.
-class FieldInitializerValidatorCCC : public CorrectionCandidateCallback {
+class FieldInitializerValidatorCCC final : public CorrectionCandidateCallback {
public:
explicit FieldInitializerValidatorCCC(RecordDecl *RD)
: Record(RD) {}
@@ -2192,6 +2226,10 @@ class FieldInitializerValidatorCCC : public CorrectionCandidateCallback {
return FD && FD->getDeclContext()->getRedeclContext()->Equals(Record);
}
+ std::unique_ptr<CorrectionCandidateCallback> clone() override {
+ return llvm::make_unique<FieldInitializerValidatorCCC>(*this);
+ }
+
private:
RecordDecl *Record;
};
@@ -2394,10 +2432,10 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
// Name lookup didn't find anything.
// Determine whether this was a typo for another field name.
+ FieldInitializerValidatorCCC CCC(RT->getDecl());
if (TypoCorrection Corrected = SemaRef.CorrectTypo(
DeclarationNameInfo(FieldName, D->getFieldLoc()),
- Sema::LookupMemberName, /*Scope=*/nullptr, /*SS=*/nullptr,
- llvm::make_unique<FieldInitializerValidatorCCC>(RT->getDecl()),
+ Sema::LookupMemberName, /*Scope=*/nullptr, /*SS=*/nullptr, CCC,
Sema::CTK_ErrorRecovery, RT->getDecl())) {
SemaRef.diagnoseTypo(
Corrected,
@@ -3244,7 +3282,6 @@ void InitializationSequence::Step::Destroy() {
case SK_QualificationConversionXValue:
case SK_QualificationConversionLValue:
case SK_AtomicConversion:
- case SK_LValueToRValue:
case SK_ListInitialization:
case SK_UnwrapInitList:
case SK_RewrapInitList:
@@ -3306,6 +3343,7 @@ bool InitializationSequence::isAmbiguous() const {
case FK_NonConstLValueReferenceBindingToVectorElement:
case FK_NonConstLValueReferenceBindingToUnrelated:
case FK_RValueReferenceBindingToLValue:
+ case FK_ReferenceAddrspaceMismatchTemporary:
case FK_ReferenceInitDropsQualifiers:
case FK_ReferenceInitFailed:
case FK_ConversionFailed:
@@ -3427,15 +3465,6 @@ void InitializationSequence::AddAtomicConversionStep(QualType Ty) {
Steps.push_back(S);
}
-void InitializationSequence::AddLValueToRValueStep(QualType Ty) {
- assert(!Ty.hasQualifiers() && "rvalues may not have qualifiers");
-
- Step S;
- S.Kind = SK_LValueToRValue;
- S.Type = Ty;
- Steps.push_back(S);
-}
-
void InitializationSequence::AddConversionSequenceStep(
const ImplicitConversionSequence &ICS, QualType T,
bool TopLevelOfInitList) {
@@ -3704,6 +3733,7 @@ ResolveConstructorOverload(Sema &S, SourceLocation DeclLoc,
bool OnlyListConstructors, bool IsListInit,
bool SecondStepOfCopyInit = false) {
CandidateSet.clear(OverloadCandidateSet::CSK_InitByConstructor);
+ CandidateSet.setDestAS(DestType.getQualifiers().getAddressSpace());
for (NamedDecl *D : Ctors) {
auto Info = getConstructorInfo(D);
@@ -3733,9 +3763,10 @@ ResolveConstructorOverload(Sema &S, SourceLocation DeclLoc,
hasCopyOrMoveCtorParam(S.Context, Info));
if (Info.ConstructorTmpl)
- S.AddTemplateOverloadCandidate(Info.ConstructorTmpl, Info.FoundDecl,
- /*ExplicitArgs*/ nullptr, Args,
- CandidateSet, SuppressUserConversions);
+ S.AddTemplateOverloadCandidate(
+ Info.ConstructorTmpl, Info.FoundDecl,
+ /*ExplicitArgs*/ nullptr, Args, CandidateSet, SuppressUserConversions,
+ /*PartialOverloading=*/false, AllowExplicit);
else {
// C++ [over.match.copy]p1:
// - When initializing a temporary to be bound to the first parameter
@@ -3749,8 +3780,8 @@ ResolveConstructorOverload(Sema &S, SourceLocation DeclLoc,
hasCopyOrMoveCtorParam(S.Context, Info);
S.AddOverloadCandidate(Info.Constructor, Info.FoundDecl, Args,
CandidateSet, SuppressUserConversions,
- /*PartialOverloading=*/false,
- /*AllowExplicit=*/AllowExplicitConv);
+ /*PartialOverloading=*/false, AllowExplicit,
+ AllowExplicitConv);
}
}
@@ -3783,16 +3814,17 @@ ResolveConstructorOverload(Sema &S, SourceLocation DeclLoc,
else
Conv = cast<CXXConversionDecl>(D);
- if ((AllowExplicit && !CopyInitializing) || !Conv->isExplicit()) {
+ if (AllowExplicit || !Conv->isExplicit()) {
if (ConvTemplate)
- S.AddTemplateConversionCandidate(ConvTemplate, I.getPair(),
- ActingDC, Initializer, DestType,
- CandidateSet, AllowExplicit,
- /*AllowResultConversion*/false);
+ S.AddTemplateConversionCandidate(
+ ConvTemplate, I.getPair(), ActingDC, Initializer, DestType,
+ CandidateSet, AllowExplicit, AllowExplicit,
+ /*AllowResultConversion*/ false);
else
S.AddConversionCandidate(Conv, I.getPair(), ActingDC, Initializer,
DestType, CandidateSet, AllowExplicit,
- /*AllowResultConversion*/false);
+ AllowExplicit,
+ /*AllowResultConversion*/ false);
}
}
}
@@ -3899,7 +3931,7 @@ static void TryConstructorInitialization(Sema &S,
Result = ResolveConstructorOverload(S, Kind.getLocation(), Args,
CandidateSet, DestType, Ctors, Best,
CopyInitialization, AllowExplicit,
- /*OnlyListConstructor=*/true,
+ /*OnlyListConstructors=*/true,
IsListInit);
}
@@ -4086,7 +4118,7 @@ static void TryReferenceListInitialization(Sema &S,
if (Sequence) {
if (DestType->isRValueReferenceType() ||
(T1Quals.hasConst() && !T1Quals.hasVolatile()))
- Sequence.AddReferenceBindingStep(cv1T1, /*bindingTemporary=*/true);
+ Sequence.AddReferenceBindingStep(cv1T1, /*BindingTemporary=*/true);
else
Sequence.SetFailed(
InitializationSequence::FK_NonConstLValueReferenceBindingToTemporary);
@@ -4338,14 +4370,16 @@ static OverloadingResult TryRefInitWithConversionFunction(
if (!Info.Constructor->isInvalidDecl() &&
Info.Constructor->isConvertingConstructor(AllowExplicitCtors)) {
if (Info.ConstructorTmpl)
- S.AddTemplateOverloadCandidate(Info.ConstructorTmpl, Info.FoundDecl,
- /*ExplicitArgs*/ nullptr,
- Initializer, CandidateSet,
- /*SuppressUserConversions=*/true);
+ S.AddTemplateOverloadCandidate(
+ Info.ConstructorTmpl, Info.FoundDecl,
+ /*ExplicitArgs*/ nullptr, Initializer, CandidateSet,
+ /*SuppressUserConversions=*/true,
+ /*PartialOverloading*/ false, AllowExplicitCtors);
else
- S.AddOverloadCandidate(Info.Constructor, Info.FoundDecl,
- Initializer, CandidateSet,
- /*SuppressUserConversions=*/true);
+ S.AddOverloadCandidate(
+ Info.Constructor, Info.FoundDecl, Initializer, CandidateSet,
+ /*SuppressUserConversions=*/true,
+ /*PartialOverloading*/ false, AllowExplicitCtors);
}
}
}
@@ -4380,17 +4414,17 @@ static OverloadingResult TryRefInitWithConversionFunction(
// candidates with reference-compatible results? That might be needed to
// break recursion.
if ((AllowExplicitConvs || !Conv->isExplicit()) &&
- (AllowRValues || Conv->getConversionType()->isLValueReferenceType())){
+ (AllowRValues ||
+ Conv->getConversionType()->isLValueReferenceType())) {
if (ConvTemplate)
- S.AddTemplateConversionCandidate(ConvTemplate, I.getPair(),
- ActingDC, Initializer,
- DestType, CandidateSet,
- /*AllowObjCConversionOnExplicit=*/
- false);
+ S.AddTemplateConversionCandidate(
+ ConvTemplate, I.getPair(), ActingDC, Initializer, DestType,
+ CandidateSet,
+ /*AllowObjCConversionOnExplicit=*/false, AllowExplicitConvs);
else
- S.AddConversionCandidate(Conv, I.getPair(), ActingDC,
- Initializer, DestType, CandidateSet,
- /*AllowObjCConversionOnExplicit=*/false);
+ S.AddConversionCandidate(
+ Conv, I.getPair(), ActingDC, Initializer, DestType, CandidateSet,
+ /*AllowObjCConversionOnExplicit=*/false, AllowExplicitConvs);
}
}
}
@@ -4597,7 +4631,10 @@ static void TryReferenceInitializationCore(Sema &S,
// - Otherwise, the reference shall be an lvalue reference to a
// non-volatile const type (i.e., cv1 shall be const), or the reference
// shall be an rvalue reference.
- if (isLValueRef && !(T1Quals.hasConst() && !T1Quals.hasVolatile())) {
+ // For address spaces, we interpret this to mean that an addr space
+ // of a reference "cv1 T1" is a superset of addr space of "cv2 T2".
+ if (isLValueRef && !(T1Quals.hasConst() && !T1Quals.hasVolatile() &&
+ T1Quals.isAddressSpaceSupersetOf(T2Quals))) {
if (S.Context.getCanonicalType(T2) == S.Context.OverloadTy)
Sequence.SetFailed(InitializationSequence::FK_AddressOfOverloadFailed);
else if (ConvOvlResult && !Sequence.getFailedCandidateSet().empty())
@@ -4606,7 +4643,10 @@ static void TryReferenceInitializationCore(Sema &S,
ConvOvlResult);
else if (!InitCategory.isLValue())
Sequence.SetFailed(
- InitializationSequence::FK_NonConstLValueReferenceBindingToTemporary);
+ T1Quals.isAddressSpaceSupersetOf(T2Quals)
+ ? InitializationSequence::
+ FK_NonConstLValueReferenceBindingToTemporary
+ : InitializationSequence::FK_ReferenceInitDropsQualifiers);
else {
InitializationSequence::FailureKind FK;
switch (RefRelationship) {
@@ -4671,19 +4711,23 @@ static void TryReferenceInitializationCore(Sema &S,
// applied.
// Postpone address space conversions to after the temporary materialization
// conversion to allow creating temporaries in the alloca address space.
- auto AS1 = T1Quals.getAddressSpace();
- auto AS2 = T2Quals.getAddressSpace();
- T1Quals.removeAddressSpace();
- T2Quals.removeAddressSpace();
- QualType cv1T4 = S.Context.getQualifiedType(cv2T2, T1Quals);
- if (T1Quals != T2Quals)
+ auto T1QualsIgnoreAS = T1Quals;
+ auto T2QualsIgnoreAS = T2Quals;
+ if (T1Quals.getAddressSpace() != T2Quals.getAddressSpace()) {
+ T1QualsIgnoreAS.removeAddressSpace();
+ T2QualsIgnoreAS.removeAddressSpace();
+ }
+ QualType cv1T4 = S.Context.getQualifiedType(cv2T2, T1QualsIgnoreAS);
+ if (T1QualsIgnoreAS != T2QualsIgnoreAS)
Sequence.AddQualificationConversionStep(cv1T4, ValueKind);
Sequence.AddReferenceBindingStep(cv1T4, ValueKind == VK_RValue);
ValueKind = isLValueRef ? VK_LValue : VK_XValue;
- if (AS1 != AS2) {
- T1Quals.addAddressSpace(AS1);
- QualType cv1AST4 = S.Context.getQualifiedType(cv2T2, T1Quals);
- Sequence.AddQualificationConversionStep(cv1AST4, ValueKind);
+ // Add addr space conversion if required.
+ if (T1Quals.getAddressSpace() != T2Quals.getAddressSpace()) {
+ auto T4Quals = cv1T4.getQualifiers();
+ T4Quals.addAddressSpace(T1Quals.getAddressSpace());
+ QualType cv1T4WithAS = S.Context.getQualifiedType(T2, T4Quals);
+ Sequence.AddQualificationConversionStep(cv1T4WithAS, ValueKind);
}
// In any case, the reference is bound to the resulting glvalue (or to
@@ -4730,7 +4774,15 @@ static void TryReferenceInitializationCore(Sema &S,
// copy-initialization (8.5). The reference is then bound to the
// temporary. [...]
- InitializedEntity TempEntity = InitializedEntity::InitializeTemporary(cv1T1);
+ // Ignore address space of reference type at this point and perform address
+ // space conversion after the reference binding step.
+ QualType cv1T1IgnoreAS =
+ T1Quals.hasAddressSpace()
+ ? S.Context.getQualifiedType(T1, T1Quals.withoutAddressSpace())
+ : cv1T1;
+
+ InitializedEntity TempEntity =
+ InitializedEntity::InitializeTemporary(cv1T1IgnoreAS);
// FIXME: Why do we use an implicit conversion here rather than trying
// copy-initialization?
@@ -4765,8 +4817,9 @@ static void TryReferenceInitializationCore(Sema &S,
// than, cv2; otherwise, the program is ill-formed.
unsigned T1CVRQuals = T1Quals.getCVRQualifiers();
unsigned T2CVRQuals = T2Quals.getCVRQualifiers();
- if (RefRelationship == Sema::Ref_Related &&
- (T1CVRQuals | T2CVRQuals) != T1CVRQuals) {
+ if ((RefRelationship == Sema::Ref_Related &&
+ (T1CVRQuals | T2CVRQuals) != T1CVRQuals) ||
+ !T1Quals.isAddressSpaceSupersetOf(T2Quals)) {
Sequence.SetFailed(InitializationSequence::FK_ReferenceInitDropsQualifiers);
return;
}
@@ -4780,7 +4833,18 @@ static void TryReferenceInitializationCore(Sema &S,
return;
}
- Sequence.AddReferenceBindingStep(cv1T1, /*bindingTemporary=*/true);
+ Sequence.AddReferenceBindingStep(cv1T1IgnoreAS, /*BindingTemporary=*/true);
+
+ if (T1Quals.hasAddressSpace()) {
+ if (!Qualifiers::isAddressSpaceSupersetOf(T1Quals.getAddressSpace(),
+ LangAS::Default)) {
+ Sequence.SetFailed(
+ InitializationSequence::FK_ReferenceAddrspaceMismatchTemporary);
+ return;
+ }
+ Sequence.AddQualificationConversionStep(cv1T1, isLValueRef ? VK_LValue
+ : VK_XValue);
+ }
}
/// Attempt character array initialization from a string literal
@@ -4928,6 +4992,7 @@ static void TryUserDefinedConversion(Sema &S,
// structure, so that it will persist if we fail.
OverloadCandidateSet &CandidateSet = Sequence.getFailedCandidateSet();
CandidateSet.clear(OverloadCandidateSet::CSK_InitByUserDefinedConversion);
+ CandidateSet.setDestAS(DestType.getQualifiers().getAddressSpace());
// Determine whether we are allowed to call explicit constructors or
// explicit conversion operators.
@@ -4949,14 +5014,16 @@ static void TryUserDefinedConversion(Sema &S,
if (!Info.Constructor->isInvalidDecl() &&
Info.Constructor->isConvertingConstructor(AllowExplicit)) {
if (Info.ConstructorTmpl)
- S.AddTemplateOverloadCandidate(Info.ConstructorTmpl, Info.FoundDecl,
- /*ExplicitArgs*/ nullptr,
- Initializer, CandidateSet,
- /*SuppressUserConversions=*/true);
+ S.AddTemplateOverloadCandidate(
+ Info.ConstructorTmpl, Info.FoundDecl,
+ /*ExplicitArgs*/ nullptr, Initializer, CandidateSet,
+ /*SuppressUserConversions=*/true,
+ /*PartialOverloading*/ false, AllowExplicit);
else
S.AddOverloadCandidate(Info.Constructor, Info.FoundDecl,
Initializer, CandidateSet,
- /*SuppressUserConversions=*/true);
+ /*SuppressUserConversions=*/true,
+ /*PartialOverloading*/ false, AllowExplicit);
}
}
}
@@ -4991,12 +5058,12 @@ static void TryUserDefinedConversion(Sema &S,
if (AllowExplicit || !Conv->isExplicit()) {
if (ConvTemplate)
- S.AddTemplateConversionCandidate(ConvTemplate, I.getPair(),
- ActingDC, Initializer, DestType,
- CandidateSet, AllowExplicit);
+ S.AddTemplateConversionCandidate(
+ ConvTemplate, I.getPair(), ActingDC, Initializer, DestType,
+ CandidateSet, AllowExplicit, AllowExplicit);
else
- S.AddConversionCandidate(Conv, I.getPair(), ActingDC,
- Initializer, DestType, CandidateSet,
+ S.AddConversionCandidate(Conv, I.getPair(), ActingDC, Initializer,
+ DestType, CandidateSet, AllowExplicit,
AllowExplicit);
}
}
@@ -5538,8 +5605,7 @@ void InitializationSequence::InitializeFrom(Sema &S,
// array from a compound literal that creates an array of the same
// type, so long as the initializer has no side effects.
if (!S.getLangOpts().CPlusPlus && Initializer &&
- (isa<ConstantExpr>(Initializer->IgnoreParens()) ||
- isa<CompoundLiteralExpr>(Initializer->IgnoreParens())) &&
+ isa<CompoundLiteralExpr>(Initializer->IgnoreParens()) &&
Initializer->getType()->isArrayType()) {
const ArrayType *SourceAT
= Context.getAsArrayType(Initializer->getType());
@@ -5574,6 +5640,9 @@ void InitializationSequence::InitializeFrom(Sema &S,
bool allowObjCWritebackConversion = S.getLangOpts().ObjCAutoRefCount &&
Entity.isParameterKind();
+ if (TryOCLSamplerInitialization(S, *this, DestType, Initializer))
+ return;
+
// We're at the end of the line for C: it's either a write-back conversion
// or it's a C assignment. There's no need to check anything else.
if (!S.getLangOpts().CPlusPlus) {
@@ -5583,9 +5652,6 @@ void InitializationSequence::InitializeFrom(Sema &S,
return;
}
- if (TryOCLSamplerInitialization(S, *this, DestType, Initializer))
- return;
-
if (TryOCLZeroOpaqueTypeInitialization(S, *this, DestType, Initializer))
return;
@@ -5925,21 +5991,25 @@ static ExprResult CopyObject(Sema &S,
break;
case OR_No_Viable_Function:
- S.Diag(Loc, IsExtraneousCopy && !S.isSFINAEContext()
- ? diag::ext_rvalue_to_reference_temp_copy_no_viable
- : diag::err_temp_copy_no_viable)
- << (int)Entity.getKind() << CurInitExpr->getType()
- << CurInitExpr->getSourceRange();
- CandidateSet.NoteCandidates(S, OCD_AllCandidates, CurInitExpr);
+ CandidateSet.NoteCandidates(
+ PartialDiagnosticAt(
+ Loc, S.PDiag(IsExtraneousCopy && !S.isSFINAEContext()
+ ? diag::ext_rvalue_to_reference_temp_copy_no_viable
+ : diag::err_temp_copy_no_viable)
+ << (int)Entity.getKind() << CurInitExpr->getType()
+ << CurInitExpr->getSourceRange()),
+ S, OCD_AllCandidates, CurInitExpr);
if (!IsExtraneousCopy || S.isSFINAEContext())
return ExprError();
return CurInit;
case OR_Ambiguous:
- S.Diag(Loc, diag::err_temp_copy_ambiguous)
- << (int)Entity.getKind() << CurInitExpr->getType()
- << CurInitExpr->getSourceRange();
- CandidateSet.NoteCandidates(S, OCD_ViableCandidates, CurInitExpr);
+ CandidateSet.NoteCandidates(
+ PartialDiagnosticAt(Loc, S.PDiag(diag::err_temp_copy_ambiguous)
+ << (int)Entity.getKind()
+ << CurInitExpr->getType()
+ << CurInitExpr->getSourceRange()),
+ S, OCD_ViableCandidates, CurInitExpr);
return ExprError();
case OR_Deleted:
@@ -6074,13 +6144,13 @@ static void CheckCXX98CompatAccessibleCopy(Sema &S,
break;
case OR_No_Viable_Function:
- S.Diag(Loc, Diag);
- CandidateSet.NoteCandidates(S, OCD_AllCandidates, CurInitExpr);
+ CandidateSet.NoteCandidates(PartialDiagnosticAt(Loc, Diag), S,
+ OCD_AllCandidates, CurInitExpr);
break;
case OR_Ambiguous:
- S.Diag(Loc, Diag);
- CandidateSet.NoteCandidates(S, OCD_ViableCandidates, CurInitExpr);
+ CandidateSet.NoteCandidates(PartialDiagnosticAt(Loc, Diag), S,
+ OCD_ViableCandidates, CurInitExpr);
break;
case OR_Deleted:
@@ -6269,6 +6339,10 @@ PerformConstructorInitialization(Sema &S,
if (S.DiagnoseUseOfDecl(Step.Function.FoundDecl, Loc))
return ExprError();
+ if (const ArrayType *AT = S.Context.getAsArrayType(Entity.getType()))
+ if (checkDestructorReference(S.Context.getBaseElementType(AT), Loc, S))
+ return ExprError();
+
if (shouldBindAsTemporary(Entity))
CurInit = S.MaybeBindToTemporary(CurInit.get());
@@ -6784,6 +6858,9 @@ static void visitLocalsRetainedByInitializer(IndirectLocalPath &Path,
RK_ReferenceBinding, Visit);
else {
unsigned Index = 0;
+ for (; Index < RD->getNumBases() && Index < ILE->getNumInits(); ++Index)
+ visitLocalsRetainedByInitializer(Path, ILE->getInit(Index), Visit,
+ RevisitSubinits);
for (const auto *I : RD->fields()) {
if (Index >= ILE->getNumInits())
break;
@@ -7275,9 +7352,19 @@ ExprResult Sema::TemporaryMaterializationConversion(Expr *E) {
ExprResult Sema::PerformQualificationConversion(Expr *E, QualType Ty,
ExprValueKind VK,
CheckedConversionKind CCK) {
- CastKind CK = (Ty.getAddressSpace() != E->getType().getAddressSpace())
- ? CK_AddressSpaceConversion
- : CK_NoOp;
+
+ CastKind CK = CK_NoOp;
+
+ if (VK == VK_RValue) {
+ auto PointeeTy = Ty->getPointeeType();
+ auto ExprPointeeTy = E->getType()->getPointeeType();
+ if (!PointeeTy.isNull() &&
+ PointeeTy.getAddressSpace() != ExprPointeeTy.getAddressSpace())
+ CK = CK_AddressSpaceConversion;
+ } else if (Ty.getAddressSpace() != E->getType().getAddressSpace()) {
+ CK = CK_AddressSpaceConversion;
+ }
+
return ImpCastExprToType(E, Ty, CK, VK, /*BasePath=*/nullptr, CCK);
}
@@ -7416,7 +7503,6 @@ ExprResult InitializationSequence::Perform(Sema &S,
case SK_QualificationConversionXValue:
case SK_QualificationConversionRValue:
case SK_AtomicConversion:
- case SK_LValueToRValue:
case SK_ConversionSequence:
case SK_ConversionSequenceNoNarrowing:
case SK_ListInitialization:
@@ -7688,14 +7774,6 @@ ExprResult InitializationSequence::Perform(Sema &S,
break;
}
- case SK_LValueToRValue: {
- assert(CurInit.get()->isGLValue() && "cannot load from a prvalue");
- CurInit = ImplicitCastExpr::Create(S.Context, Step->Type,
- CK_LValueToRValue, CurInit.get(),
- /*BasePath=*/nullptr, VK_RValue);
- break;
- }
-
case SK_ConversionSequence:
case SK_ConversionSequenceNoNarrowing: {
if (const auto *FromPtrType =
@@ -7952,6 +8030,7 @@ ExprResult InitializationSequence::Perform(Sema &S,
S.Diag(Kind.getLocation(), diag::ext_array_init_copy)
<< Step->Type << CurInit.get()->getType()
<< CurInit.get()->getSourceRange();
+ updateGNUCompoundLiteralRValue(CurInit.get());
LLVM_FALLTHROUGH;
case SK_ArrayInit:
// If the destination type is an incomplete array type, update the
@@ -8342,19 +8421,22 @@ bool InitializationSequence::Diagnose(Sema &S,
case FK_UserConversionOverloadFailed:
switch (FailedOverloadResult) {
case OR_Ambiguous:
- if (Failure == FK_UserConversionOverloadFailed)
- S.Diag(Kind.getLocation(), diag::err_typecheck_ambiguous_condition)
- << OnlyArg->getType() << DestType
- << Args[0]->getSourceRange();
- else
- S.Diag(Kind.getLocation(), diag::err_ref_init_ambiguous)
- << DestType << OnlyArg->getType()
- << Args[0]->getSourceRange();
- FailedCandidateSet.NoteCandidates(S, OCD_ViableCandidates, Args);
+ FailedCandidateSet.NoteCandidates(
+ PartialDiagnosticAt(
+ Kind.getLocation(),
+ Failure == FK_UserConversionOverloadFailed
+ ? (S.PDiag(diag::err_typecheck_ambiguous_condition)
+ << OnlyArg->getType() << DestType
+ << Args[0]->getSourceRange())
+ : (S.PDiag(diag::err_ref_init_ambiguous)
+ << DestType << OnlyArg->getType()
+ << Args[0]->getSourceRange())),
+ S, OCD_ViableCandidates, Args);
break;
- case OR_No_Viable_Function:
+ case OR_No_Viable_Function: {
+ auto Cands = FailedCandidateSet.CompleteCandidates(S, OCD_AllCandidates, Args);
if (!S.RequireCompleteType(Kind.getLocation(),
DestType.getNonReferenceType(),
diag::err_typecheck_nonviable_condition_incomplete,
@@ -8364,9 +8446,9 @@ bool InitializationSequence::Diagnose(Sema &S,
<< OnlyArg->getType() << Args[0]->getSourceRange()
<< DestType.getNonReferenceType();
- FailedCandidateSet.NoteCandidates(S, OCD_AllCandidates, Args);
+ FailedCandidateSet.NoteCandidates(S, Args, Cands);
break;
-
+ }
case OR_Deleted: {
S.Diag(Kind.getLocation(), diag::err_typecheck_deleted_function)
<< OnlyArg->getType() << DestType.getNonReferenceType()
@@ -8434,23 +8516,34 @@ bool InitializationSequence::Diagnose(Sema &S,
<< Args[0]->getSourceRange();
break;
+ case FK_ReferenceAddrspaceMismatchTemporary:
+ S.Diag(Kind.getLocation(), diag::err_reference_bind_temporary_addrspace)
+ << DestType << Args[0]->getSourceRange();
+ break;
+
case FK_ReferenceInitDropsQualifiers: {
QualType SourceType = OnlyArg->getType();
QualType NonRefType = DestType.getNonReferenceType();
Qualifiers DroppedQualifiers =
SourceType.getQualifiers() - NonRefType.getQualifiers();
- S.Diag(Kind.getLocation(), diag::err_reference_bind_drops_quals)
- << SourceType
- << NonRefType
- << DroppedQualifiers.getCVRQualifiers()
- << Args[0]->getSourceRange();
+ if (!NonRefType.getQualifiers().isAddressSpaceSupersetOf(
+ SourceType.getQualifiers()))
+ S.Diag(Kind.getLocation(), diag::err_reference_bind_drops_quals)
+ << NonRefType << SourceType << 1 /*addr space*/
+ << Args[0]->getSourceRange();
+ else
+ S.Diag(Kind.getLocation(), diag::err_reference_bind_drops_quals)
+ << NonRefType << SourceType << 0 /*cv quals*/
+ << Qualifiers::fromCVRMask(DroppedQualifiers.getCVRQualifiers())
+ << DroppedQualifiers.getCVRQualifiers() << Args[0]->getSourceRange();
break;
}
case FK_ReferenceInitFailed:
S.Diag(Kind.getLocation(), diag::err_reference_bind_failed)
<< DestType.getNonReferenceType()
+ << DestType.getNonReferenceType()->isIncompleteType()
<< OnlyArg->isLValue()
<< OnlyArg->getType()
<< Args[0]->getSourceRange();
@@ -8529,9 +8622,11 @@ bool InitializationSequence::Diagnose(Sema &S,
// bad.
switch (FailedOverloadResult) {
case OR_Ambiguous:
- S.Diag(Kind.getLocation(), diag::err_ovl_ambiguous_init)
- << DestType << ArgsRange;
- FailedCandidateSet.NoteCandidates(S, OCD_ViableCandidates, Args);
+ FailedCandidateSet.NoteCandidates(
+ PartialDiagnosticAt(Kind.getLocation(),
+ S.PDiag(diag::err_ovl_ambiguous_init)
+ << DestType << ArgsRange),
+ S, OCD_ViableCandidates, Args);
break;
case OR_No_Viable_Function:
@@ -8580,9 +8675,12 @@ bool InitializationSequence::Diagnose(Sema &S,
break;
}
- S.Diag(Kind.getLocation(), diag::err_ovl_no_viable_function_in_init)
- << DestType << ArgsRange;
- FailedCandidateSet.NoteCandidates(S, OCD_AllCandidates, Args);
+ FailedCandidateSet.NoteCandidates(
+ PartialDiagnosticAt(
+ Kind.getLocation(),
+ S.PDiag(diag::err_ovl_no_viable_function_in_init)
+ << DestType << ArgsRange),
+ S, OCD_AllCandidates, Args);
break;
case OR_Deleted: {
@@ -8591,7 +8689,7 @@ bool InitializationSequence::Diagnose(Sema &S,
= FailedCandidateSet.BestViableFunction(S, Kind.getLocation(), Best);
if (Ovl != OR_Deleted) {
S.Diag(Kind.getLocation(), diag::err_ovl_deleted_init)
- << true << DestType << ArgsRange;
+ << DestType << ArgsRange;
llvm_unreachable("Inconsistent overload resolution?");
break;
}
@@ -8605,7 +8703,7 @@ bool InitializationSequence::Diagnose(Sema &S,
<< DestType << ArgsRange;
else
S.Diag(Kind.getLocation(), diag::err_ovl_deleted_init)
- << true << DestType << ArgsRange;
+ << DestType << ArgsRange;
S.NoteDeletedFunction(Best->Function);
break;
@@ -8763,6 +8861,10 @@ void InitializationSequence::dump(raw_ostream &OS) const {
OS << "reference initialization drops qualifiers";
break;
+ case FK_ReferenceAddrspaceMismatchTemporary:
+ OS << "reference with mismatching address space bound to temporary";
+ break;
+
case FK_ReferenceInitFailed:
OS << "reference initialization failed";
break;
@@ -8898,10 +9000,6 @@ void InitializationSequence::dump(raw_ostream &OS) const {
OS << "non-atomic-to-atomic conversion";
break;
- case SK_LValueToRValue:
- OS << "load (lvalue to rvalue)";
- break;
-
case SK_ConversionSequence:
OS << "implicit conversion sequence (";
S->ICS->dump(); // FIXME: use OS
@@ -9264,9 +9362,15 @@ QualType Sema::DeduceTemplateSpecializationFromInitializer(
OverloadCandidateSet Candidates(Kind.getLocation(),
OverloadCandidateSet::CSK_Normal);
OverloadCandidateSet::iterator Best;
+
+ bool HasAnyDeductionGuide = false;
+ bool AllowExplicit = !Kind.isCopyInit() || ListInit;
+
auto tryToResolveOverload =
[&](bool OnlyListConstructors) -> OverloadingResult {
Candidates.clear(OverloadCandidateSet::CSK_Normal);
+ HasAnyDeductionGuide = false;
+
for (auto I = Guides.begin(), E = Guides.end(); I != E; ++I) {
NamedDecl *D = (*I)->getUnderlyingDecl();
if (D->isInvalidDecl())
@@ -9278,12 +9382,15 @@ QualType Sema::DeduceTemplateSpecializationFromInitializer(
if (!GD)
continue;
+ if (!GD->isImplicit())
+ HasAnyDeductionGuide = true;
+
// C++ [over.match.ctor]p1: (non-list copy-initialization from non-class)
// For copy-initialization, the candidate functions are all the
// converting constructors (12.3.1) of that class.
// C++ [over.match.copy]p1: (non-list copy-initialization from class)
// The converting constructors of T are candidate functions.
- if (Kind.isCopyInit() && !ListInit) {
+ if (!AllowExplicit) {
// Only consider converting constructors.
if (GD->isExplicit())
continue;
@@ -9318,11 +9425,13 @@ QualType Sema::DeduceTemplateSpecializationFromInitializer(
if (TD)
AddTemplateOverloadCandidate(TD, I.getPair(), /*ExplicitArgs*/ nullptr,
- Inits, Candidates,
- SuppressUserConversions);
+ Inits, Candidates, SuppressUserConversions,
+ /*PartialOverloading*/ false,
+ AllowExplicit);
else
AddOverloadCandidate(GD, I.getPair(), Inits, Candidates,
- SuppressUserConversions);
+ SuppressUserConversions,
+ /*PartialOverloading*/ false, AllowExplicit);
}
return Candidates.BestViableFunction(*this, Kind.getLocation(), Best);
};
@@ -9372,12 +9481,15 @@ QualType Sema::DeduceTemplateSpecializationFromInitializer(
switch (Result) {
case OR_Ambiguous:
- Diag(Kind.getLocation(), diag::err_deduced_class_template_ctor_ambiguous)
- << TemplateName;
// FIXME: For list-initialization candidates, it'd usually be better to
// list why they were not viable when given the initializer list itself as
// an argument.
- Candidates.NoteCandidates(*this, OCD_ViableCandidates, Inits);
+ Candidates.NoteCandidates(
+ PartialDiagnosticAt(
+ Kind.getLocation(),
+ PDiag(diag::err_deduced_class_template_ctor_ambiguous)
+ << TemplateName),
+ *this, OCD_ViableCandidates, Inits);
return QualType();
case OR_No_Viable_Function: {
@@ -9385,11 +9497,13 @@ QualType Sema::DeduceTemplateSpecializationFromInitializer(
cast<ClassTemplateDecl>(Template)->getTemplatedDecl();
bool Complete =
isCompleteType(Kind.getLocation(), Context.getTypeDeclType(Primary));
- Diag(Kind.getLocation(),
- Complete ? diag::err_deduced_class_template_ctor_no_viable
- : diag::err_deduced_class_template_incomplete)
- << TemplateName << !Guides.empty();
- Candidates.NoteCandidates(*this, OCD_AllCandidates, Inits);
+ Candidates.NoteCandidates(
+ PartialDiagnosticAt(
+ Kind.getLocation(),
+ PDiag(Complete ? diag::err_deduced_class_template_ctor_no_viable
+ : diag::err_deduced_class_template_incomplete)
+ << TemplateName << !Guides.empty()),
+ *this, OCD_AllCandidates, Inits);
return QualType();
}
@@ -9430,5 +9544,15 @@ QualType Sema::DeduceTemplateSpecializationFromInitializer(
Diag(TSInfo->getTypeLoc().getBeginLoc(),
diag::warn_cxx14_compat_class_template_argument_deduction)
<< TSInfo->getTypeLoc().getSourceRange() << 1 << DeducedType;
+
+ // Warn if CTAD was used on a type that does not have any user-defined
+ // deduction guides.
+ if (!HasAnyDeductionGuide) {
+ Diag(TSInfo->getTypeLoc().getBeginLoc(),
+ diag::warn_ctad_maybe_unsupported)
+ << TemplateName;
+ Diag(Template->getLocation(), diag::note_suppress_ctad_maybe_unsupported);
+ }
+
return DeducedType;
}
diff --git a/lib/Sema/SemaLambda.cpp b/lib/Sema/SemaLambda.cpp
index af233b96d69b..986524e6d56b 100644
--- a/lib/Sema/SemaLambda.cpp
+++ b/lib/Sema/SemaLambda.cpp
@@ -1,9 +1,8 @@
//===--- SemaLambda.cpp - Semantic Analysis for C++11 Lambdas -------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -21,6 +20,7 @@
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/SemaInternal.h"
#include "clang/Sema/SemaLambda.h"
+#include "llvm/ADT/STLExtras.h"
using namespace clang;
using namespace sema;
@@ -226,19 +226,14 @@ Optional<unsigned> clang::getStackIndexOfNearestEnclosingCaptureCapableLambda(
static inline TemplateParameterList *
getGenericLambdaTemplateParameterList(LambdaScopeInfo *LSI, Sema &SemaRef) {
- if (LSI->GLTemplateParameterList)
- return LSI->GLTemplateParameterList;
-
- if (!LSI->AutoTemplateParams.empty()) {
- SourceRange IntroRange = LSI->IntroducerRange;
- SourceLocation LAngleLoc = IntroRange.getBegin();
- SourceLocation RAngleLoc = IntroRange.getEnd();
+ if (!LSI->GLTemplateParameterList && !LSI->TemplateParams.empty()) {
LSI->GLTemplateParameterList = TemplateParameterList::Create(
SemaRef.Context,
- /*Template kw loc*/ SourceLocation(), LAngleLoc,
- llvm::makeArrayRef((NamedDecl *const *)LSI->AutoTemplateParams.data(),
- LSI->AutoTemplateParams.size()),
- RAngleLoc, nullptr);
+ /*Template kw loc*/ SourceLocation(),
+ /*L angle loc*/ LSI->ExplicitTemplateParamsRange.getBegin(),
+ LSI->TemplateParams,
+ /*R angle loc*/LSI->ExplicitTemplateParamsRange.getEnd(),
+ nullptr);
}
return LSI->GLTemplateParameterList;
}
@@ -372,12 +367,11 @@ Sema::ExpressionEvaluationContextRecord::getMangleNumberingContext(
return *MangleNumbering;
}
-CXXMethodDecl *Sema::startLambdaDefinition(CXXRecordDecl *Class,
- SourceRange IntroducerRange,
- TypeSourceInfo *MethodTypeInfo,
- SourceLocation EndLoc,
- ArrayRef<ParmVarDecl *> Params,
- const bool IsConstexprSpecified) {
+CXXMethodDecl *Sema::startLambdaDefinition(
+ CXXRecordDecl *Class, SourceRange IntroducerRange,
+ TypeSourceInfo *MethodTypeInfo, SourceLocation EndLoc,
+ ArrayRef<ParmVarDecl *> Params, ConstexprSpecKind ConstexprKind,
+ Optional<std::pair<unsigned, Decl *>> Mangling) {
QualType MethodType = MethodTypeInfo->getType();
TemplateParameterList *TemplateParams =
getGenericLambdaTemplateParameterList(getCurLambda(), *this);
@@ -406,16 +400,12 @@ CXXMethodDecl *Sema::startLambdaDefinition(CXXRecordDecl *Class,
= IntroducerRange.getBegin().getRawEncoding();
MethodNameLoc.CXXOperatorName.EndOpNameLoc
= IntroducerRange.getEnd().getRawEncoding();
- CXXMethodDecl *Method
- = CXXMethodDecl::Create(Context, Class, EndLoc,
- DeclarationNameInfo(MethodName,
- IntroducerRange.getBegin(),
- MethodNameLoc),
- MethodType, MethodTypeInfo,
- SC_None,
- /*isInline=*/true,
- IsConstexprSpecified,
- EndLoc);
+ CXXMethodDecl *Method = CXXMethodDecl::Create(
+ Context, Class, EndLoc,
+ DeclarationNameInfo(MethodName, IntroducerRange.getBegin(),
+ MethodNameLoc),
+ MethodType, MethodTypeInfo, SC_None,
+ /*isInline=*/true, ConstexprKind, EndLoc);
Method->setAccess(AS_public);
// Temporarily set the lexical declaration context to the current
@@ -443,12 +433,16 @@ CXXMethodDecl *Sema::startLambdaDefinition(CXXRecordDecl *Class,
P->setOwningFunction(Method);
}
- Decl *ManglingContextDecl;
- if (MangleNumberingContext *MCtx =
- getCurrentMangleNumberContext(Class->getDeclContext(),
- ManglingContextDecl)) {
- unsigned ManglingNumber = MCtx->getManglingNumber(Method);
- Class->setLambdaMangling(ManglingNumber, ManglingContextDecl);
+ if (Mangling) {
+ Class->setLambdaMangling(Mangling->first, Mangling->second);
+ } else {
+ Decl *ManglingContextDecl;
+ if (MangleNumberingContext *MCtx =
+ getCurrentMangleNumberContext(Class->getDeclContext(),
+ ManglingContextDecl)) {
+ unsigned ManglingNumber = MCtx->getManglingNumber(Method);
+ Class->setLambdaMangling(ManglingNumber, ManglingContextDecl);
+ }
}
return Method;
@@ -493,6 +487,23 @@ void Sema::finishLambdaExplicitCaptures(LambdaScopeInfo *LSI) {
LSI->finishedExplicitCaptures();
}
+void Sema::ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc,
+ ArrayRef<NamedDecl *> TParams,
+ SourceLocation RAngleLoc) {
+ LambdaScopeInfo *LSI = getCurLambda();
+ assert(LSI && "Expected a lambda scope");
+ assert(LSI->NumExplicitTemplateParams == 0 &&
+ "Already acted on explicit template parameters");
+ assert(LSI->TemplateParams.empty() &&
+ "Explicit template parameters should come "
+ "before invented (auto) ones");
+ assert(!TParams.empty() &&
+ "No template parameters to act on");
+ LSI->TemplateParams.append(TParams.begin(), TParams.end());
+ LSI->NumExplicitTemplateParams = TParams.size();
+ LSI->ExplicitTemplateParamsRange = {LAngleLoc, RAngleLoc};
+}
+
void Sema::addLambdaParameters(
ArrayRef<LambdaIntroducer::LambdaCapture> Captures,
CXXMethodDecl *CallOperator, Scope *CurScope) {
@@ -741,11 +752,10 @@ void Sema::deduceClosureReturnType(CapturingScopeInfo &CSI) {
}
}
-QualType Sema::buildLambdaInitCaptureInitialization(SourceLocation Loc,
- bool ByRef,
- IdentifierInfo *Id,
- bool IsDirectInit,
- Expr *&Init) {
+QualType Sema::buildLambdaInitCaptureInitialization(
+ SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
+ Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool IsDirectInit,
+ Expr *&Init) {
// Create an 'auto' or 'auto&' TypeSourceInfo that we can use to
// deduce against.
QualType DeductType = Context.getAutoDeductType();
@@ -756,18 +766,29 @@ QualType Sema::buildLambdaInitCaptureInitialization(SourceLocation Loc,
assert(!DeductType.isNull() && "can't build reference to auto");
TLB.push<ReferenceTypeLoc>(DeductType).setSigilLoc(Loc);
}
+ if (EllipsisLoc.isValid()) {
+ if (Init->containsUnexpandedParameterPack()) {
+ Diag(EllipsisLoc, getLangOpts().CPlusPlus2a
+ ? diag::warn_cxx17_compat_init_capture_pack
+ : diag::ext_init_capture_pack);
+ DeductType = Context.getPackExpansionType(DeductType, NumExpansions);
+ TLB.push<PackExpansionTypeLoc>(DeductType).setEllipsisLoc(EllipsisLoc);
+ } else {
+ // Just ignore the ellipsis for now and form a non-pack variable. We'll
+ // diagnose this later when we try to capture it.
+ }
+ }
TypeSourceInfo *TSI = TLB.getTypeSourceInfo(Context, DeductType);
// Deduce the type of the init capture.
- Expr *DeduceInit = Init;
QualType DeducedType = deduceVarTypeFromInitializer(
/*VarDecl*/nullptr, DeclarationName(Id), DeductType, TSI,
- SourceRange(Loc, Loc), IsDirectInit, DeduceInit);
+ SourceRange(Loc, Loc), IsDirectInit, Init);
if (DeducedType.isNull())
return QualType();
// Are we a non-list direct initialization?
- bool CXXDirectInit = isa<ParenListExpr>(Init);
+ ParenListExpr *CXXDirectInit = dyn_cast<ParenListExpr>(Init);
// Perform initialization analysis and ensure any implicit conversions
// (such as lvalue-to-rvalue) are enforced.
@@ -780,7 +801,10 @@ QualType Sema::buildLambdaInitCaptureInitialization(SourceLocation Loc,
: InitializationKind::CreateDirectList(Loc))
: InitializationKind::CreateCopy(Loc, Init->getBeginLoc());
- MultiExprArg Args = DeduceInit;
+ MultiExprArg Args = Init;
+ if (CXXDirectInit)
+ Args =
+ MultiExprArg(CXXDirectInit->getExprs(), CXXDirectInit->getNumExprs());
QualType DclT;
InitializationSequence InitSeq(*this, Entity, Kind, Args);
ExprResult Result = InitSeq.Perform(*this, Entity, Kind, Args, &DclT);
@@ -794,10 +818,15 @@ QualType Sema::buildLambdaInitCaptureInitialization(SourceLocation Loc,
VarDecl *Sema::createLambdaInitCaptureVarDecl(SourceLocation Loc,
QualType InitCaptureType,
+ SourceLocation EllipsisLoc,
IdentifierInfo *Id,
unsigned InitStyle, Expr *Init) {
- TypeSourceInfo *TSI = Context.getTrivialTypeSourceInfo(InitCaptureType,
- Loc);
+ // FIXME: Retain the TypeSourceInfo from buildLambdaInitCaptureInitialization
+ // rather than reconstructing it here.
+ TypeSourceInfo *TSI = Context.getTrivialTypeSourceInfo(InitCaptureType, Loc);
+ if (auto PETL = TSI->getTypeLoc().getAs<PackExpansionTypeLoc>())
+ PETL.setEllipsisLoc(EllipsisLoc);
+
// Create a dummy variable representing the init-capture. This is not actually
// used as a variable, and only exists as a way to name and refer to the
// init-capture.
@@ -813,35 +842,33 @@ VarDecl *Sema::createLambdaInitCaptureVarDecl(SourceLocation Loc,
return NewVD;
}
-FieldDecl *Sema::buildInitCaptureField(LambdaScopeInfo *LSI, VarDecl *Var) {
- FieldDecl *Field = FieldDecl::Create(
- Context, LSI->Lambda, Var->getLocation(), Var->getLocation(),
- nullptr, Var->getType(), Var->getTypeSourceInfo(), nullptr, false,
- ICIS_NoInit);
- Field->setImplicit(true);
- Field->setAccess(AS_private);
- LSI->Lambda->addDecl(Field);
-
+void Sema::addInitCapture(LambdaScopeInfo *LSI, VarDecl *Var) {
+ assert(Var->isInitCapture() && "init capture flag should be set");
LSI->addCapture(Var, /*isBlock*/false, Var->getType()->isReferenceType(),
/*isNested*/false, Var->getLocation(), SourceLocation(),
- Var->getType(), Var->getInit());
- return Field;
+ Var->getType(), /*Invalid*/false);
}
void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
Declarator &ParamInfo,
Scope *CurScope) {
- // Determine if we're within a context where we know that the lambda will
- // be dependent, because there are template parameters in scope.
- bool KnownDependent = false;
LambdaScopeInfo *const LSI = getCurLambda();
assert(LSI && "LambdaScopeInfo should be on stack!");
- // The lambda-expression's closure type might be dependent even if its
- // semantic context isn't, if it appears within a default argument of a
- // function template.
- if (CurScope->getTemplateParamParent())
- KnownDependent = true;
+ // Determine if we're within a context where we know that the lambda will
+ // be dependent, because there are template parameters in scope.
+ bool KnownDependent;
+ if (LSI->NumExplicitTemplateParams > 0) {
+ auto *TemplateParamScope = CurScope->getTemplateParamParent();
+ assert(TemplateParamScope &&
+ "Lambda with explicit template param list should establish a "
+ "template param scope");
+ assert(TemplateParamScope->getParent());
+ KnownDependent = TemplateParamScope->getParent()
+ ->getTemplateParamParent() != nullptr;
+ } else {
+ KnownDependent = CurScope->getTemplateParamParent() != nullptr;
+ }
// Determine the signature of the call operator.
TypeSourceInfo *MethodTyInfo;
@@ -909,7 +936,7 @@ void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
CXXMethodDecl *Method =
startLambdaDefinition(Class, Intro.Range, MethodTyInfo, EndLoc, Params,
- ParamInfo.getDeclSpec().isConstexprSpecified());
+ ParamInfo.getDeclSpec().getConstexprSpecifier());
if (ExplicitParams)
CheckCXXDefaultArguments(Method);
@@ -1016,8 +1043,6 @@ void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
? diag::warn_cxx11_compat_init_capture
: diag::ext_init_capture);
- if (C->Init.get()->containsUnexpandedParameterPack())
- ContainsUnexpandedParameterPack = true;
// If the initializer expression is usable, but the InitCaptureType
// is not, then an error has occurred - so ignore the capture for now.
// for e.g., [n{0}] { }; <-- if no <initializer_list> is included.
@@ -1026,6 +1051,10 @@ void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
if (C->InitCaptureType.get().isNull())
continue;
+ if (C->Init.get()->containsUnexpandedParameterPack() &&
+ !C->InitCaptureType.get()->getAs<PackExpansionType>())
+ ContainsUnexpandedParameterPack = true;
+
unsigned InitStyle;
switch (C->InitKind) {
case LambdaCaptureInitKind::NoInit:
@@ -1041,7 +1070,8 @@ void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
break;
}
Var = createLambdaInitCaptureVarDecl(C->Loc, C->InitCaptureType.get(),
- C->Id, InitStyle, C->Init.get());
+ C->EllipsisLoc, C->Id, InitStyle,
+ C->Init.get());
// C++1y [expr.prim.lambda]p11:
// An init-capture behaves as if it declares and explicitly
// captures a variable [...] whose declarative region is the
@@ -1080,8 +1110,8 @@ void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
if (R.empty()) {
// FIXME: Disable corrections that would add qualification?
CXXScopeSpec ScopeSpec;
- if (DiagnoseEmptyLookup(CurScope, ScopeSpec, R,
- llvm::make_unique<DeclFilterCCC<VarDecl>>()))
+ DeclFilterCCC<VarDecl> Validator{};
+ if (DiagnoseEmptyLookup(CurScope, ScopeSpec, R, Validator))
continue;
}
@@ -1133,7 +1163,8 @@ void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
EllipsisLoc = C->EllipsisLoc;
} else {
Diag(C->EllipsisLoc, diag::err_pack_expansion_without_parameter_packs)
- << SourceRange(C->Loc);
+ << (C->Init.isUsable() ? C->Init.get()->getSourceRange()
+ : SourceRange(C->Loc));
// Just ignore the ellipsis.
}
@@ -1142,7 +1173,7 @@ void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
}
if (C->Init.isUsable()) {
- buildInitCaptureField(LSI, Var);
+ addInitCapture(LSI, Var);
} else {
TryCaptureKind Kind = C->Kind == LCK_ByRef ? TryCapture_ExplicitByRef :
TryCapture_ExplicitByVal;
@@ -1228,9 +1259,10 @@ static void addFunctionPointerConversion(Sema &S,
FunctionProtoType::ExtProtoInfo ConvExtInfo(
S.Context.getDefaultCallingConvention(
/*IsVariadic=*/false, /*IsCXXMethod=*/true));
- // The conversion function is always const.
+ // The conversion function is always const and noexcept.
ConvExtInfo.TypeQuals = Qualifiers();
ConvExtInfo.TypeQuals.addConst();
+ ConvExtInfo.ExceptionSpec.Type = EST_BasicNoexcept;
QualType ConvTy =
S.Context.getFunctionType(PtrToFunctionTy, None, ConvExtInfo);
@@ -1296,7 +1328,7 @@ static void addFunctionPointerConversion(Sema &S,
S.Context.getTranslationUnitDecl(), From->getBeginLoc(),
From->getLocation(), From->getIdentifier(), From->getType(),
From->getTypeSourceInfo(), From->getStorageClass(),
- /*DefaultArg=*/nullptr));
+ /*DefArg=*/nullptr));
CallOpConvTL.setParam(I, From);
CallOpConvNameTL.setParam(I, From);
}
@@ -1304,8 +1336,8 @@ static void addFunctionPointerConversion(Sema &S,
CXXConversionDecl *Conversion = CXXConversionDecl::Create(
S.Context, Class, Loc,
DeclarationNameInfo(ConversionName, Loc, ConvNameLoc), ConvTy, ConvTSI,
- /*isInline=*/true, /*isExplicit=*/false,
- /*isConstexpr=*/S.getLangOpts().CPlusPlus17,
+ /*isInline=*/true, ExplicitSpecifier(),
+ S.getLangOpts().CPlusPlus17 ? CSK_constexpr : CSK_unspecified,
CallOperator->getBody()->getEndLoc());
Conversion->setAccess(AS_public);
Conversion->setImplicit(true);
@@ -1344,8 +1376,7 @@ static void addFunctionPointerConversion(Sema &S,
CXXMethodDecl *Invoke = CXXMethodDecl::Create(
S.Context, Class, Loc, DeclarationNameInfo(InvokerName, Loc),
InvokerFunctionTy, CallOperator->getTypeSourceInfo(), SC_Static,
- /*IsInline=*/true,
- /*IsConstexpr=*/false, CallOperator->getBody()->getEndLoc());
+ /*isInline=*/true, CSK_unspecified, CallOperator->getBody()->getEndLoc());
for (unsigned I = 0, N = CallOperator->getNumParams(); I != N; ++I)
InvokerParams[I]->setOwningFunction(Invoke);
Invoke->setParams(InvokerParams);
@@ -1391,21 +1422,29 @@ static void addBlockPointerConversion(Sema &S,
CXXConversionDecl *Conversion = CXXConversionDecl::Create(
S.Context, Class, Loc, DeclarationNameInfo(Name, Loc, NameLoc), ConvTy,
S.Context.getTrivialTypeSourceInfo(ConvTy, Loc),
- /*isInline=*/true, /*isExplicit=*/false,
- /*isConstexpr=*/false, CallOperator->getBody()->getEndLoc());
+ /*isInline=*/true, ExplicitSpecifier(), CSK_unspecified,
+ CallOperator->getBody()->getEndLoc());
Conversion->setAccess(AS_public);
Conversion->setImplicit(true);
Class->addDecl(Conversion);
}
-static ExprResult performLambdaVarCaptureInitialization(
- Sema &S, const Capture &Capture, FieldDecl *Field,
- SourceLocation ImplicitCaptureLoc, bool IsImplicitCapture) {
- assert(Capture.isVariableCapture() && "not a variable capture");
+ExprResult Sema::BuildCaptureInit(const Capture &Cap,
+ SourceLocation ImplicitCaptureLoc,
+ bool IsOpenMPMapping) {
+ // VLA captures don't have a stored initialization expression.
+ if (Cap.isVLATypeCapture())
+ return ExprResult();
+
+ // An init-capture is initialized directly from its stored initializer.
+ if (Cap.isInitCapture())
+ return Cap.getVariable()->getInit();
- auto *Var = Capture.getVariable();
+ // For anything else, build an initialization expression. For an implicit
+ // capture, the capture notionally happens at the capture-default, so use
+ // that location here.
SourceLocation Loc =
- IsImplicitCapture ? ImplicitCaptureLoc : Capture.getLocation();
+ ImplicitCaptureLoc.isValid() ? ImplicitCaptureLoc : Cap.getLocation();
// C++11 [expr.prim.lambda]p21:
// When the lambda-expression is evaluated, the entities that
@@ -1419,17 +1458,39 @@ static ExprResult performLambdaVarCaptureInitialization(
// C++ [expr.prim.lambda]p12:
// An entity captured by a lambda-expression is odr-used (3.2) in
// the scope containing the lambda-expression.
- ExprResult RefResult = S.BuildDeclarationNameExpr(
+ ExprResult Init;
+ IdentifierInfo *Name = nullptr;
+ if (Cap.isThisCapture()) {
+ QualType ThisTy = getCurrentThisType();
+ Expr *This = BuildCXXThisExpr(Loc, ThisTy, ImplicitCaptureLoc.isValid());
+ if (Cap.isCopyCapture())
+ Init = CreateBuiltinUnaryOp(Loc, UO_Deref, This);
+ else
+ Init = This;
+ } else {
+ assert(Cap.isVariableCapture() && "unknown kind of capture");
+ VarDecl *Var = Cap.getVariable();
+ Name = Var->getIdentifier();
+ Init = BuildDeclarationNameExpr(
CXXScopeSpec(), DeclarationNameInfo(Var->getDeclName(), Loc), Var);
- if (RefResult.isInvalid())
+ }
+
+ // In OpenMP, the capture kind doesn't actually describe how to capture:
+ // variables are "mapped" onto the device in a process that does not formally
+ // make a copy, even for a "copy capture".
+ if (IsOpenMPMapping)
+ return Init;
+
+ if (Init.isInvalid())
return ExprError();
- Expr *Ref = RefResult.get();
- auto Entity = InitializedEntity::InitializeLambdaCapture(
- Var->getIdentifier(), Field->getType(), Loc);
- InitializationKind InitKind = InitializationKind::CreateDirect(Loc, Loc, Loc);
- InitializationSequence Init(S, Entity, InitKind, Ref);
- return Init.Perform(S, Entity, InitKind, Ref);
+ Expr *InitExpr = Init.get();
+ InitializedEntity Entity = InitializedEntity::InitializeLambdaCapture(
+ Name, Cap.getCaptureType(), Loc);
+ InitializationKind InitKind =
+ InitializationKind::CreateDirect(Loc, Loc, Loc);
+ InitializationSequence InitSeq(*this, Entity, InitKind, InitExpr);
+ return InitSeq.Perform(*this, Entity, InitKind, InitExpr);
}
ExprResult Sema::ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
@@ -1456,8 +1517,8 @@ mapImplicitCaptureStyle(CapturingScopeInfo::ImplicitCaptureStyle ICS) {
}
bool Sema::CaptureHasSideEffects(const Capture &From) {
- if (!From.isVLATypeCapture()) {
- Expr *Init = From.getInitExpr();
+ if (From.isInitCapture()) {
+ Expr *Init = From.getVariable()->getInit();
if (Init && Init->HasSideEffects(Context))
return true;
}
@@ -1498,6 +1559,54 @@ bool Sema::DiagnoseUnusedLambdaCapture(SourceRange CaptureRange,
return true;
}
+/// Create a field within the lambda class or captured statement record for the
+/// given capture.
+FieldDecl *Sema::BuildCaptureField(RecordDecl *RD,
+ const sema::Capture &Capture) {
+ SourceLocation Loc = Capture.getLocation();
+ QualType FieldType = Capture.getCaptureType();
+
+ TypeSourceInfo *TSI = nullptr;
+ if (Capture.isVariableCapture()) {
+ auto *Var = Capture.getVariable();
+ if (Var->isInitCapture())
+ TSI = Capture.getVariable()->getTypeSourceInfo();
+ }
+
+ // FIXME: Should we really be doing this? A null TypeSourceInfo seems more
+ // appropriate, at least for an implicit capture.
+ if (!TSI)
+ TSI = Context.getTrivialTypeSourceInfo(FieldType, Loc);
+
+ // Build the non-static data member.
+ FieldDecl *Field =
+ FieldDecl::Create(Context, RD, Loc, Loc, nullptr, FieldType, TSI, nullptr,
+ false, ICIS_NoInit);
+ // If the variable being captured has an invalid type, mark the class as
+ // invalid as well.
+ if (!FieldType->isDependentType()) {
+ if (RequireCompleteType(Loc, FieldType, diag::err_field_incomplete)) {
+ RD->setInvalidDecl();
+ Field->setInvalidDecl();
+ } else {
+ NamedDecl *Def;
+ FieldType->isIncompleteType(&Def);
+ if (Def && Def->isInvalidDecl()) {
+ RD->setInvalidDecl();
+ Field->setInvalidDecl();
+ }
+ }
+ }
+ Field->setImplicit(true);
+ Field->setAccess(AS_private);
+ RD->addDecl(Field);
+
+ if (Capture.isVLATypeCapture())
+ Field->setCapturedVLAType(Capture.getCapturedVLAType());
+
+ return Field;
+}
+
ExprResult Sema::BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
LambdaScopeInfo *LSI) {
// Collect information from the lambda scope.
@@ -1535,28 +1644,33 @@ ExprResult Sema::BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
PopExpressionEvaluationContext();
- // Translate captures.
- auto CurField = Class->field_begin();
// True if the current capture has a used capture or default before it.
bool CurHasPreviousCapture = CaptureDefault != LCD_None;
SourceLocation PrevCaptureLoc = CurHasPreviousCapture ?
CaptureDefaultLoc : IntroducerRange.getBegin();
- for (unsigned I = 0, N = LSI->Captures.size(); I != N; ++I, ++CurField) {
+ for (unsigned I = 0, N = LSI->Captures.size(); I != N; ++I) {
const Capture &From = LSI->Captures[I];
+ if (From.isInvalid())
+ return ExprError();
+
assert(!From.isBlockCapture() && "Cannot capture __block variables");
bool IsImplicit = I >= LSI->NumExplicitCaptures;
+ SourceLocation ImplicitCaptureLoc =
+ IsImplicit ? CaptureDefaultLoc : SourceLocation();
// Use source ranges of explicit captures for fixits where available.
SourceRange CaptureRange = LSI->ExplicitCaptureRanges[I];
// Warn about unused explicit captures.
bool IsCaptureUsed = true;
- if (!CurContext->isDependentContext() && !IsImplicit && !From.isODRUsed()) {
+ if (!CurContext->isDependentContext() && !IsImplicit &&
+ !From.isODRUsed()) {
// Initialized captures that are non-ODR used may not be eliminated.
+ // FIXME: Where did the IsGenericLambda here come from?
bool NonODRUsedInitCapture =
- IsGenericLambda && From.isNonODRUsed() && From.getInitExpr();
+ IsGenericLambda && From.isNonODRUsed() && From.isInitCapture();
if (!NonODRUsedInitCapture) {
bool IsLast = (I + 1) == LSI->NumExplicitCaptures;
SourceRange FixItRange;
@@ -1582,45 +1696,44 @@ ExprResult Sema::BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
PrevCaptureLoc = CaptureRange.getEnd();
}
- // Handle 'this' capture.
- if (From.isThisCapture()) {
- // Capturing 'this' implicitly with a default of '[=]' is deprecated,
- // because it results in a reference capture. Don't warn prior to
- // C++2a; there's nothing that can be done about it before then.
- if (getLangOpts().CPlusPlus2a && IsImplicit &&
- CaptureDefault == LCD_ByCopy) {
- Diag(From.getLocation(), diag::warn_deprecated_this_capture);
- Diag(CaptureDefaultLoc, diag::note_deprecated_this_capture)
- << FixItHint::CreateInsertion(
- getLocForEndOfToken(CaptureDefaultLoc), ", this");
+ // Map the capture to our AST representation.
+ LambdaCapture Capture = [&] {
+ if (From.isThisCapture()) {
+ // Capturing 'this' implicitly with a default of '[=]' is deprecated,
+ // because it results in a reference capture. Don't warn prior to
+ // C++2a; there's nothing that can be done about it before then.
+ if (getLangOpts().CPlusPlus2a && IsImplicit &&
+ CaptureDefault == LCD_ByCopy) {
+ Diag(From.getLocation(), diag::warn_deprecated_this_capture);
+ Diag(CaptureDefaultLoc, diag::note_deprecated_this_capture)
+ << FixItHint::CreateInsertion(
+ getLocForEndOfToken(CaptureDefaultLoc), ", this");
+ }
+ return LambdaCapture(From.getLocation(), IsImplicit,
+ From.isCopyCapture() ? LCK_StarThis : LCK_This);
+ } else if (From.isVLATypeCapture()) {
+ return LambdaCapture(From.getLocation(), IsImplicit, LCK_VLAType);
+ } else {
+ assert(From.isVariableCapture() && "unknown kind of capture");
+ VarDecl *Var = From.getVariable();
+ LambdaCaptureKind Kind =
+ From.isCopyCapture() ? LCK_ByCopy : LCK_ByRef;
+ return LambdaCapture(From.getLocation(), IsImplicit, Kind, Var,
+ From.getEllipsisLoc());
}
+ }();
- Captures.push_back(
- LambdaCapture(From.getLocation(), IsImplicit,
- From.isCopyCapture() ? LCK_StarThis : LCK_This));
- CaptureInits.push_back(From.getInitExpr());
- continue;
- }
- if (From.isVLATypeCapture()) {
- Captures.push_back(
- LambdaCapture(From.getLocation(), IsImplicit, LCK_VLAType));
- CaptureInits.push_back(nullptr);
- continue;
- }
+ // Form the initializer for the capture field.
+ ExprResult Init = BuildCaptureInit(From, ImplicitCaptureLoc);
- VarDecl *Var = From.getVariable();
- LambdaCaptureKind Kind = From.isCopyCapture() ? LCK_ByCopy : LCK_ByRef;
- Captures.push_back(LambdaCapture(From.getLocation(), IsImplicit, Kind,
- Var, From.getEllipsisLoc()));
- Expr *Init = From.getInitExpr();
- if (!Init) {
- auto InitResult = performLambdaVarCaptureInitialization(
- *this, From, *CurField, CaptureDefaultLoc, IsImplicit);
- if (InitResult.isInvalid())
- return ExprError();
- Init = InitResult.get();
- }
- CaptureInits.push_back(Init);
+ // FIXME: Skip this capture if the capture is not used, the initializer
+ // has no side-effects, the type of the capture is trivial, and the
+ // lambda is not externally visible.
+
+ // Add a FieldDecl for the capture and form its initializer.
+ BuildCaptureField(Class, From);
+ Captures.push_back(Capture);
+ CaptureInits.push_back(Init.get());
}
// C++11 [expr.prim.lambda]p6:
@@ -1664,9 +1777,11 @@ ExprResult Sema::BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
!isa<CoroutineBodyStmt>(CallOperator->getBody()) &&
!Class->getDeclContext()->isDependentContext()) {
TentativeAnalysisScope DiagnosticScopeGuard(*this);
- CallOperator->setConstexpr(
- CheckConstexprFunctionDecl(CallOperator) &&
- CheckConstexprFunctionBody(CallOperator, CallOperator->getBody()));
+ CallOperator->setConstexprKind(
+ (CheckConstexprFunctionDecl(CallOperator) &&
+ CheckConstexprFunctionBody(CallOperator, CallOperator->getBody()))
+ ? CSK_constexpr
+ : CSK_unspecified);
}
// Emit delayed shadowing warnings now that the full capture list is known.
@@ -1745,7 +1860,7 @@ ExprResult Sema::BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
Context, Block, From->getBeginLoc(), From->getLocation(),
From->getIdentifier(), From->getType(), From->getTypeSourceInfo(),
From->getStorageClass(),
- /*DefaultArg=*/nullptr));
+ /*DefArg=*/nullptr));
}
Block->setParams(BlockParams);
@@ -1760,8 +1875,8 @@ ExprResult Sema::BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
ConvLocation, nullptr,
Src->getType(), CapVarTSI,
SC_None);
- BlockDecl::Capture Capture(/*Variable=*/CapVar, /*ByRef=*/false,
- /*Nested=*/false, /*Copy=*/Init.get());
+ BlockDecl::Capture Capture(/*variable=*/CapVar, /*byRef=*/false,
+ /*nested=*/false, /*copy=*/Init.get());
Block->setCaptures(Context, Capture, /*CapturesCXXThis=*/false);
// Add a fake function body to the block. IR generation is responsible
diff --git a/lib/Sema/SemaLookup.cpp b/lib/Sema/SemaLookup.cpp
index effccc2f3d38..8a24dd884a76 100644
--- a/lib/Sema/SemaLookup.cpp
+++ b/lib/Sema/SemaLookup.cpp
@@ -1,9 +1,8 @@
//===--------------------- SemaLookup.cpp - Name Lookup ------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -22,6 +21,7 @@
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/Basic/Builtins.h"
+#include "clang/Basic/FileManager.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/ModuleLoader.h"
@@ -47,6 +47,8 @@
#include <utility>
#include <vector>
+#include "OpenCLBuiltins.inc"
+
using namespace clang;
using namespace sema;
@@ -279,6 +281,10 @@ static inline unsigned getIDNS(Sema::LookupNameKind NameKind,
IDNS = Decl::IDNS_OMPReduction;
break;
+ case Sema::LookupOMPMapperName:
+ IDNS = Decl::IDNS_OMPMapper;
+ break;
+
case Sema::LookupAnyName:
IDNS = Decl::IDNS_Ordinary | Decl::IDNS_Tag | Decl::IDNS_Member
| Decl::IDNS_Using | Decl::IDNS_Namespace | Decl::IDNS_ObjCProtocol
@@ -667,6 +673,79 @@ LLVM_DUMP_METHOD void LookupResult::dump() {
D->dump();
}
+/// When trying to resolve a function name, if the isOpenCLBuiltin function
+/// defined in "OpenCLBuiltins.inc" returns a non-null <Index, Len>, then the
+/// identifier is referencing an OpenCL builtin function. Thus, all its
+/// prototypes are added to the LookUpResult.
+///
+/// \param S The Sema instance
+/// \param LR The LookupResult instance
+/// \param II The identifier being resolved
+/// \param Index The list of prototypes starts at Index in OpenCLBuiltins[]
+/// \param Len The list of prototypes has Len elements
+static void InsertOCLBuiltinDeclarations(Sema &S, LookupResult &LR,
+ IdentifierInfo *II, unsigned Index,
+ unsigned Len) {
+
+ for (unsigned i = 0; i < Len; ++i) {
+ const OpenCLBuiltinDecl &Decl = OpenCLBuiltins[Index - 1 + i];
+ ASTContext &Context = S.Context;
+
+ // Ignore this BIF if the version is incorrect.
+ if (Context.getLangOpts().OpenCLVersion < Decl.Version)
+ continue;
+
+ FunctionProtoType::ExtProtoInfo PI;
+ PI.Variadic = false;
+
+ // Defined in "OpenCLBuiltins.inc"
+ QualType RT = OCL2Qual(Context, OpenCLSignature[Decl.ArgTableIndex]);
+
+ SmallVector<QualType, 5> ArgTypes;
+ for (unsigned I = 1; I < Decl.NumArgs; I++) {
+ QualType Ty = OCL2Qual(Context, OpenCLSignature[Decl.ArgTableIndex + I]);
+ ArgTypes.push_back(Ty);
+ }
+
+ QualType R = Context.getFunctionType(RT, ArgTypes, PI);
+ SourceLocation Loc = LR.getNameLoc();
+
+ // TODO: This part is taken from Sema::LazilyCreateBuiltin,
+ // maybe refactor it.
+ DeclContext *Parent = Context.getTranslationUnitDecl();
+ FunctionDecl *New = FunctionDecl::Create(Context, Parent, Loc, Loc, II, R,
+ /*TInfo=*/nullptr, SC_Extern,
+ false, R->isFunctionProtoType());
+ New->setImplicit();
+
+ // Create Decl objects for each parameter, adding them to the
+ // FunctionDecl.
+ if (const FunctionProtoType *FT = dyn_cast<FunctionProtoType>(R)) {
+ SmallVector<ParmVarDecl *, 16> Params;
+ for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
+ ParmVarDecl *Parm =
+ ParmVarDecl::Create(Context, New, SourceLocation(),
+ SourceLocation(), nullptr, FT->getParamType(i),
+ /*TInfo=*/nullptr, SC_None, nullptr);
+ Parm->setScopeInfo(0, i);
+ Params.push_back(Parm);
+ }
+ New->setParams(Params);
+ }
+
+ New->addAttr(OverloadableAttr::CreateImplicit(Context));
+
+ if (strlen(Decl.Extension))
+ S.setOpenCLExtensionForDecl(New, Decl.Extension);
+
+ LR.addDecl(New);
+ }
+
+ // If we added overloads, need to resolve the lookup result.
+ if (Len > 1)
+ LR.resolveKind();
+}
+
/// Lookup a builtin function, when name lookup would otherwise
/// fail.
static bool LookupBuiltin(Sema &S, LookupResult &R) {
@@ -689,6 +768,15 @@ static bool LookupBuiltin(Sema &S, LookupResult &R) {
}
}
+ // Check if this is an OpenCL Builtin, and if so, insert its overloads.
+ if (S.getLangOpts().OpenCL && S.getLangOpts().DeclareOpenCLBuiltins) {
+ auto Index = isOpenCLBuiltin(II->getName());
+ if (Index.first) {
+ InsertOCLBuiltinDeclarations(S, R, II, Index.first, Index.second);
+ return true;
+ }
+ }
+
// If this is a builtin on this (or all) targets, create the decl.
if (unsigned BuiltinID = II->getBuiltinID()) {
// In C++ and OpenCL (spec v1.2 s6.9.f), we don't have any predefined
@@ -1540,8 +1628,21 @@ bool LookupResult::isVisibleSlow(Sema &SemaRef, NamedDecl *D) {
// and in C we must not because each declaration of a function gets its own
// set of declarations for tags in prototype scope.
bool VisibleWithinParent;
- if (D->isTemplateParameter() || isa<ParmVarDecl>(D) ||
- (isa<FunctionDecl>(DC) && !SemaRef.getLangOpts().CPlusPlus))
+ if (D->isTemplateParameter()) {
+ bool SearchDefinitions = true;
+ if (const auto *DCD = dyn_cast<Decl>(DC)) {
+ if (const auto *TD = DCD->getDescribedTemplate()) {
+ TemplateParameterList *TPL = TD->getTemplateParameters();
+ auto Index = getDepthAndIndex(D).second;
+ SearchDefinitions = Index >= TPL->size() || TPL->getParam(Index) != D;
+ }
+ }
+ if (SearchDefinitions)
+ VisibleWithinParent = SemaRef.hasVisibleDefinition(cast<NamedDecl>(DC));
+ else
+ VisibleWithinParent = isVisible(SemaRef, cast<NamedDecl>(DC));
+ } else if (isa<ParmVarDecl>(D) ||
+ (isa<FunctionDecl>(DC) && !SemaRef.getLangOpts().CPlusPlus))
VisibleWithinParent = isVisible(SemaRef, cast<NamedDecl>(DC));
else if (D->isModulePrivate()) {
// A module-private declaration is only visible if an enclosing lexical
@@ -2104,6 +2205,10 @@ bool Sema::LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
BaseCallback = &CXXRecordDecl::FindOMPReductionMember;
break;
+ case LookupOMPMapperName:
+ BaseCallback = &CXXRecordDecl::FindOMPMapperMember;
+ break;
+
case LookupUsingDeclName:
// This lookup is for redeclarations only.
@@ -2165,11 +2270,27 @@ bool Sema::LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
DeclContext::lookup_iterator FirstD = FirstPath->Decls.begin();
DeclContext::lookup_iterator CurrentD = Path->Decls.begin();
+ // Get the decl that we should use for deduplicating this lookup.
+ auto GetRepresentativeDecl = [&](NamedDecl *D) -> Decl * {
+ // C++ [temp.local]p3:
+ // A lookup that finds an injected-class-name (10.2) can result in
+ // an ambiguity in certain cases (for example, if it is found in
+ // more than one base class). If all of the injected-class-names
+ // that are found refer to specializations of the same class
+ // template, and if the name is used as a template-name, the
+ // reference refers to the class template itself and not a
+ // specialization thereof, and is not ambiguous.
+ if (R.isTemplateNameLookup())
+ if (auto *TD = getAsTemplateNameDecl(D))
+ D = TD;
+ return D->getUnderlyingDecl()->getCanonicalDecl();
+ };
+
while (FirstD != FirstPath->Decls.end() &&
CurrentD != Path->Decls.end()) {
- if ((*FirstD)->getUnderlyingDecl()->getCanonicalDecl() !=
- (*CurrentD)->getUnderlyingDecl()->getCanonicalDecl())
- break;
+ if (GetRepresentativeDecl(*FirstD) !=
+ GetRepresentativeDecl(*CurrentD))
+ break;
++FirstD;
++CurrentD;
@@ -2417,40 +2538,56 @@ namespace {
InstantiationLoc(InstantiationLoc) {
}
+ bool addClassTransitive(CXXRecordDecl *RD) {
+ Classes.insert(RD);
+ return ClassesTransitive.insert(RD);
+ }
+
Sema &S;
Sema::AssociatedNamespaceSet &Namespaces;
Sema::AssociatedClassSet &Classes;
SourceLocation InstantiationLoc;
+
+ private:
+ Sema::AssociatedClassSet ClassesTransitive;
};
} // end anonymous namespace
static void
addAssociatedClassesAndNamespaces(AssociatedLookup &Result, QualType T);
+// Given the declaration context \param Ctx of a class, class template or
+// enumeration, add the associated namespaces to \param Namespaces as described
+// in [basic.lookup.argdep]p2.
static void CollectEnclosingNamespace(Sema::AssociatedNamespaceSet &Namespaces,
DeclContext *Ctx) {
- // Add the associated namespace for this class.
-
- // We don't use DeclContext::getEnclosingNamespaceContext() as this may
- // be a locally scoped record.
+ // The exact wording has been changed in C++14 as a result of
+ // CWG 1691 (see also CWG 1690 and CWG 1692). We apply it unconditionally
+ // to all language versions since it is possible to return a local type
+ // from a lambda in C++11.
+ //
+ // C++14 [basic.lookup.argdep]p2:
+ // If T is a class type [...]. Its associated namespaces are the innermost
+ // enclosing namespaces of its associated classes. [...]
+ //
+ // If T is an enumeration type, its associated namespace is the innermost
+ // enclosing namespace of its declaration. [...]
- // We skip out of inline namespaces. The innermost non-inline namespace
+ // We additionally skip inline namespaces. The innermost non-inline namespace
// contains all names of all its nested inline namespaces anyway, so we can
// replace the entire inline namespace tree with its root.
- while (Ctx->isRecord() || Ctx->isTransparentContext() ||
- Ctx->isInlineNamespace())
+ while (!Ctx->isFileContext() || Ctx->isInlineNamespace())
Ctx = Ctx->getParent();
- if (Ctx->isFileContext())
- Namespaces.insert(Ctx->getPrimaryContext());
+ Namespaces.insert(Ctx->getPrimaryContext());
}
// Add the associated classes and namespaces for argument-dependent
-// lookup that involves a template argument (C++ [basic.lookup.koenig]p2).
+// lookup that involves a template argument (C++ [basic.lookup.argdep]p2).
static void
addAssociatedClassesAndNamespaces(AssociatedLookup &Result,
const TemplateArgument &Arg) {
- // C++ [basic.lookup.koenig]p2, last bullet:
+ // C++ [basic.lookup.argdep]p2, last bullet:
// -- [...] ;
switch (Arg.getKind()) {
case TemplateArgument::Null:
@@ -2495,9 +2632,8 @@ addAssociatedClassesAndNamespaces(AssociatedLookup &Result,
}
}
-// Add the associated classes and namespaces for
-// argument-dependent lookup with an argument of class type
-// (C++ [basic.lookup.koenig]p2).
+// Add the associated classes and namespaces for argument-dependent lookup
+// with an argument of class type (C++ [basic.lookup.argdep]p2).
static void
addAssociatedClassesAndNamespaces(AssociatedLookup &Result,
CXXRecordDecl *Class) {
@@ -2506,30 +2642,22 @@ addAssociatedClassesAndNamespaces(AssociatedLookup &Result,
if (Class->getDeclName() == Result.S.VAListTagName)
return;
- // C++ [basic.lookup.koenig]p2:
+ // C++ [basic.lookup.argdep]p2:
// [...]
// -- If T is a class type (including unions), its associated
// classes are: the class itself; the class of which it is a
- // member, if any; and its direct and indirect base
- // classes. Its associated namespaces are the namespaces in
- // which its associated classes are defined.
+ // member, if any; and its direct and indirect base classes.
+ // Its associated namespaces are the innermost enclosing
+ // namespaces of its associated classes.
// Add the class of which it is a member, if any.
DeclContext *Ctx = Class->getDeclContext();
if (CXXRecordDecl *EnclosingClass = dyn_cast<CXXRecordDecl>(Ctx))
Result.Classes.insert(EnclosingClass);
+
// Add the associated namespace for this class.
CollectEnclosingNamespace(Result.Namespaces, Ctx);
- // Add the class itself. If we've already seen this class, we don't
- // need to visit base classes.
- //
- // FIXME: That's not correct, we may have added this class only because it
- // was the enclosing class of another class, and in that case we won't have
- // added its base classes yet.
- if (!Result.Classes.insert(Class))
- return;
-
// -- If T is a template-id, its associated namespaces and classes are
// the namespace in which the template is defined; for member
// templates, the member template's class; the namespaces and classes
@@ -2552,6 +2680,11 @@ addAssociatedClassesAndNamespaces(AssociatedLookup &Result,
addAssociatedClassesAndNamespaces(Result, TemplateArgs[I]);
}
+ // Add the class itself. If we've already transitively visited this class,
+ // we don't need to visit base classes.
+ if (!Result.addClassTransitive(Class))
+ return;
+
// Only recurse into base classes for complete types.
if (!Result.S.isCompleteType(Result.InstantiationLoc,
Result.S.Context.getRecordType(Class)))
@@ -2577,7 +2710,7 @@ addAssociatedClassesAndNamespaces(AssociatedLookup &Result,
if (!BaseType)
continue;
CXXRecordDecl *BaseDecl = cast<CXXRecordDecl>(BaseType->getDecl());
- if (Result.Classes.insert(BaseDecl)) {
+ if (Result.addClassTransitive(BaseDecl)) {
// Find the associated namespace for this base class.
DeclContext *BaseCtx = BaseDecl->getDeclContext();
CollectEnclosingNamespace(Result.Namespaces, BaseCtx);
@@ -2642,10 +2775,10 @@ addAssociatedClassesAndNamespaces(AssociatedLookup &Result, QualType Ty) {
break;
// -- If T is a class type (including unions), its associated
- // classes are: the class itself; the class of which it is a
- // member, if any; and its direct and indirect base
- // classes. Its associated namespaces are the namespaces in
- // which its associated classes are defined.
+ // classes are: the class itself; the class of which it is
+ // a member, if any; and its direct and indirect base classes.
+ // Its associated namespaces are the innermost enclosing
+ // namespaces of its associated classes.
case Type::Record: {
CXXRecordDecl *Class =
cast<CXXRecordDecl>(cast<RecordType>(T)->getDecl());
@@ -2653,10 +2786,10 @@ addAssociatedClassesAndNamespaces(AssociatedLookup &Result, QualType Ty) {
break;
}
- // -- If T is an enumeration type, its associated namespace is
- // the namespace in which it is defined. If it is class
- // member, its associated class is the member's class; else
- // it has no associated class.
+ // -- If T is an enumeration type, its associated namespace
+ // is the innermost enclosing namespace of its declaration.
+ // If it is a class member, its associated class is the
+ // member’s class; else it has no associated class.
case Type::Enum: {
EnumDecl *Enum = cast<EnumType>(T)->getDecl();
@@ -2664,7 +2797,7 @@ addAssociatedClassesAndNamespaces(AssociatedLookup &Result, QualType Ty) {
if (CXXRecordDecl *EnclosingClass = dyn_cast<CXXRecordDecl>(Ctx))
Result.Classes.insert(EnclosingClass);
- // Add the associated namespace for this class.
+ // Add the associated namespace for this enumeration.
CollectEnclosingNamespace(Result.Namespaces, Ctx);
break;
@@ -2793,15 +2926,9 @@ void Sema::FindAssociatedClassesAndNamespaces(
// in which the function or function template is defined and the
// classes and namespaces associated with its (non-dependent)
// parameter types and return type.
- Arg = Arg->IgnoreParens();
- if (UnaryOperator *unaryOp = dyn_cast<UnaryOperator>(Arg))
- if (unaryOp->getOpcode() == UO_AddrOf)
- Arg = unaryOp->getSubExpr();
+ OverloadExpr *OE = OverloadExpr::find(Arg).Expression;
- UnresolvedLookupExpr *ULE = dyn_cast<UnresolvedLookupExpr>(Arg);
- if (!ULE) continue;
-
- for (const auto *D : ULE->decls()) {
+ for (const NamedDecl *D : OE->decls()) {
// Look through any using declarations to find the underlying function.
const FunctionDecl *FDecl = D->getUnderlyingDecl()->getAsFunction();
@@ -2999,10 +3126,11 @@ Sema::SpecialMemberOverloadResult Sema::LookupSpecialMember(CXXRecordDecl *RD,
llvm::makeArrayRef(&Arg, NumArgs), OCS, true);
else if (CtorInfo)
AddOverloadCandidate(CtorInfo.Constructor, CtorInfo.FoundDecl,
- llvm::makeArrayRef(&Arg, NumArgs), OCS, true);
+ llvm::makeArrayRef(&Arg, NumArgs), OCS,
+ /*SuppressUserConversions*/ true);
else
AddOverloadCandidate(M, Cand, llvm::makeArrayRef(&Arg, NumArgs), OCS,
- true);
+ /*SuppressUserConversions*/ true);
} else if (FunctionTemplateDecl *Tmpl =
dyn_cast<FunctionTemplateDecl>(Cand->getUnderlyingDecl())) {
if (SM == CXXCopyAssignment || SM == CXXMoveAssignment)
@@ -4317,9 +4445,8 @@ void TypoCorrectionConsumer::NamespaceSpecifierSet::addNameSpecifier(
SpecifierOStream.flush();
SameNameSpecifier = NewNameSpecifier == CurNameSpecifier;
}
- if (SameNameSpecifier ||
- std::find(CurContextIdentifiers.begin(), CurContextIdentifiers.end(),
- Name) != CurContextIdentifiers.end()) {
+ if (SameNameSpecifier || llvm::find(CurContextIdentifiers, Name) !=
+ CurContextIdentifiers.end()) {
// Rebuild the NestedNameSpecifier as a globally-qualified specifier.
NNS = NestedNameSpecifier::GlobalSpecifier(Context);
NumSpecifiers =
@@ -4551,8 +4678,7 @@ static void AddKeywordsToConsumer(Sema &SemaRef,
std::unique_ptr<TypoCorrectionConsumer> Sema::makeTypoCorrectionConsumer(
const DeclarationNameInfo &TypoName, Sema::LookupNameKind LookupKind,
- Scope *S, CXXScopeSpec *SS,
- std::unique_ptr<CorrectionCandidateCallback> CCC,
+ Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC,
DeclContext *MemberContext, bool EnteringContext,
const ObjCObjectPointerType *OPT, bool ErrorRecovery) {
@@ -4614,9 +4740,13 @@ std::unique_ptr<TypoCorrectionConsumer> Sema::makeTypoCorrectionConsumer(
TypoName.getBeginLoc());
}
- CorrectionCandidateCallback &CCCRef = *CCC;
+ // Extend the lifetime of the callback. We delayed this until here
+ // to avoid allocations in the hot path (which is where no typo correction
+ // occurs). Note that CorrectionCandidateCallback is polymorphic and
+ // initially stack-allocated.
+ std::unique_ptr<CorrectionCandidateCallback> ClonedCCC = CCC.clone();
auto Consumer = llvm::make_unique<TypoCorrectionConsumer>(
- *this, TypoName, LookupKind, S, SS, std::move(CCC), MemberContext,
+ *this, TypoName, LookupKind, S, SS, std::move(ClonedCCC), MemberContext,
EnteringContext);
// Perform name lookup to find visible, similarly-named entities.
@@ -4668,7 +4798,9 @@ std::unique_ptr<TypoCorrectionConsumer> Sema::makeTypoCorrectionConsumer(
}
}
- AddKeywordsToConsumer(*this, *Consumer, S, CCCRef, SS && SS->isNotEmpty());
+ AddKeywordsToConsumer(*this, *Consumer, S,
+ *Consumer->getCorrectionValidator(),
+ SS && SS->isNotEmpty());
// Build the NestedNameSpecifiers for the KnownNamespaces, if we're going
// to search those namespaces.
@@ -4722,19 +4854,18 @@ std::unique_ptr<TypoCorrectionConsumer> Sema::makeTypoCorrectionConsumer(
TypoCorrection Sema::CorrectTypo(const DeclarationNameInfo &TypoName,
Sema::LookupNameKind LookupKind,
Scope *S, CXXScopeSpec *SS,
- std::unique_ptr<CorrectionCandidateCallback> CCC,
+ CorrectionCandidateCallback &CCC,
CorrectTypoKind Mode,
DeclContext *MemberContext,
bool EnteringContext,
const ObjCObjectPointerType *OPT,
bool RecordFailure) {
- assert(CCC && "CorrectTypo requires a CorrectionCandidateCallback");
-
// Always let the ExternalSource have the first chance at correction, even
// if we would otherwise have given up.
if (ExternalSource) {
- if (TypoCorrection Correction = ExternalSource->CorrectTypo(
- TypoName, LookupKind, S, SS, *CCC, MemberContext, EnteringContext, OPT))
+ if (TypoCorrection Correction =
+ ExternalSource->CorrectTypo(TypoName, LookupKind, S, SS, CCC,
+ MemberContext, EnteringContext, OPT))
return Correction;
}
@@ -4742,12 +4873,12 @@ TypoCorrection Sema::CorrectTypo(const DeclarationNameInfo &TypoName,
// WantObjCSuper is only true for CTC_ObjCMessageReceiver and for
// some instances of CTC_Unknown, while WantRemainingKeywords is true
// for CTC_Unknown but not for CTC_ObjCMessageReceiver.
- bool ObjCMessageReceiver = CCC->WantObjCSuper && !CCC->WantRemainingKeywords;
+ bool ObjCMessageReceiver = CCC.WantObjCSuper && !CCC.WantRemainingKeywords;
IdentifierInfo *Typo = TypoName.getName().getAsIdentifierInfo();
- auto Consumer = makeTypoCorrectionConsumer(
- TypoName, LookupKind, S, SS, std::move(CCC), MemberContext,
- EnteringContext, OPT, Mode == CTK_ErrorRecovery);
+ auto Consumer = makeTypoCorrectionConsumer(TypoName, LookupKind, S, SS, CCC,
+ MemberContext, EnteringContext,
+ OPT, Mode == CTK_ErrorRecovery);
if (!Consumer)
return TypoCorrection();
@@ -4857,16 +4988,13 @@ TypoCorrection Sema::CorrectTypo(const DeclarationNameInfo &TypoName,
/// needed.
TypoExpr *Sema::CorrectTypoDelayed(
const DeclarationNameInfo &TypoName, Sema::LookupNameKind LookupKind,
- Scope *S, CXXScopeSpec *SS,
- std::unique_ptr<CorrectionCandidateCallback> CCC,
+ Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC,
TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode,
DeclContext *MemberContext, bool EnteringContext,
const ObjCObjectPointerType *OPT) {
- assert(CCC && "CorrectTypoDelayed requires a CorrectionCandidateCallback");
-
- auto Consumer = makeTypoCorrectionConsumer(
- TypoName, LookupKind, S, SS, std::move(CCC), MemberContext,
- EnteringContext, OPT, Mode == CTK_ErrorRecovery);
+ auto Consumer = makeTypoCorrectionConsumer(TypoName, LookupKind, S, SS, CCC,
+ MemberContext, EnteringContext,
+ OPT, Mode == CTK_ErrorRecovery);
// Give the external sema source a chance to correct the typo.
TypoCorrection ExternalTypo;
@@ -4954,7 +5082,9 @@ FunctionCallFilterCCC::FunctionCallFilterCCC(Sema &SemaRef, unsigned NumArgs,
: NumArgs(NumArgs), HasExplicitTemplateArgs(HasExplicitTemplateArgs),
CurContext(SemaRef.CurContext), MemberFn(ME) {
WantTypeSpecifiers = false;
- WantFunctionLikeCasts = SemaRef.getLangOpts().CPlusPlus && NumArgs == 1;
+ WantFunctionLikeCasts = SemaRef.getLangOpts().CPlusPlus &&
+ !HasExplicitTemplateArgs && NumArgs == 1;
+ WantCXXNamedCasts = HasExplicitTemplateArgs && NumArgs == 1;
WantRemainingKeywords = false;
}
@@ -4983,6 +5113,13 @@ bool FunctionCallFilterCCC::ValidateCandidate(const TypoCorrection &candidate) {
}
}
+ // A typo for a function-style cast can look like a function call in C++.
+ if ((HasExplicitTemplateArgs ? getAsTypeTemplateDecl(ND) != nullptr
+ : isa<TypeDecl>(ND)) &&
+ CurContext->getParentASTContext().getLangOpts().CPlusPlus)
+ // Only a class or class template can take two or more arguments.
+ return NumArgs <= 1 || HasExplicitTemplateArgs || isa<CXXRecordDecl>(ND);
+
// Skip the current candidate if it is not a FunctionDecl or does not accept
// the current number of arguments.
if (!FD || !(FD->getNumParams() >= NumArgs &&
@@ -5032,7 +5169,8 @@ static NamedDecl *getDefinitionToImport(NamedDecl *D) {
if (ObjCProtocolDecl *PD = dyn_cast<ObjCProtocolDecl>(D))
return PD->getDefinition();
if (TemplateDecl *TD = dyn_cast<TemplateDecl>(D))
- return getDefinitionToImport(TD->getTemplatedDecl());
+ if (NamedDecl *TTD = TD->getTemplatedDecl())
+ return getDefinitionToImport(TTD);
return nullptr;
}
@@ -5052,17 +5190,18 @@ void Sema::diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
auto Merged = Context.getModulesWithMergedDefinition(Def);
OwningModules.insert(OwningModules.end(), Merged.begin(), Merged.end());
- diagnoseMissingImport(Loc, Decl, Decl->getLocation(), OwningModules, MIK,
+ diagnoseMissingImport(Loc, Def, Def->getLocation(), OwningModules, MIK,
Recover);
}
/// Get a "quoted.h" or <angled.h> include path to use in a diagnostic
/// suggesting the addition of a #include of the specified file.
static std::string getIncludeStringForHeader(Preprocessor &PP,
- const FileEntry *E) {
- bool IsSystem;
- auto Path =
- PP.getHeaderSearchInfo().suggestPathToFileForDiagnostics(E, &IsSystem);
+ const FileEntry *E,
+ llvm::StringRef IncludingFile) {
+ bool IsSystem = false;
+ auto Path = PP.getHeaderSearchInfo().suggestPathToFileForDiagnostics(
+ E, IncludingFile, &IsSystem);
return (IsSystem ? '<' : '"') + Path + (IsSystem ? '>' : '"');
}
@@ -5072,12 +5211,63 @@ void Sema::diagnoseMissingImport(SourceLocation UseLoc, NamedDecl *Decl,
MissingImportKind MIK, bool Recover) {
assert(!Modules.empty());
+ auto NotePrevious = [&] {
+ unsigned DiagID;
+ switch (MIK) {
+ case MissingImportKind::Declaration:
+ DiagID = diag::note_previous_declaration;
+ break;
+ case MissingImportKind::Definition:
+ DiagID = diag::note_previous_definition;
+ break;
+ case MissingImportKind::DefaultArgument:
+ DiagID = diag::note_default_argument_declared_here;
+ break;
+ case MissingImportKind::ExplicitSpecialization:
+ DiagID = diag::note_explicit_specialization_declared_here;
+ break;
+ case MissingImportKind::PartialSpecialization:
+ DiagID = diag::note_partial_specialization_declared_here;
+ break;
+ }
+ Diag(DeclLoc, DiagID);
+ };
+
// Weed out duplicates from module list.
llvm::SmallVector<Module*, 8> UniqueModules;
llvm::SmallDenseSet<Module*, 8> UniqueModuleSet;
- for (auto *M : Modules)
+ for (auto *M : Modules) {
+ if (M->Kind == Module::GlobalModuleFragment)
+ continue;
if (UniqueModuleSet.insert(M).second)
UniqueModules.push_back(M);
+ }
+
+ llvm::StringRef IncludingFile;
+ if (const FileEntry *FE =
+ SourceMgr.getFileEntryForID(SourceMgr.getFileID(UseLoc)))
+ IncludingFile = FE->tryGetRealPathName();
+
+ if (UniqueModules.empty()) {
+ // All candidates were global module fragments. Try to suggest a #include.
+ const FileEntry *E =
+ PP.getModuleHeaderToIncludeForDiagnostics(UseLoc, Modules[0], DeclLoc);
+ // FIXME: Find a smart place to suggest inserting a #include, and add
+ // a FixItHint there.
+ Diag(UseLoc, diag::err_module_unimported_use_global_module_fragment)
+ << (int)MIK << Decl << !!E
+ << (E ? getIncludeStringForHeader(PP, E, IncludingFile) : "");
+ // Produce a "previous" note if it will point to a header rather than some
+ // random global module fragment.
+ // FIXME: Suppress the note backtrace even under
+ // -fdiagnostics-show-note-include-stack.
+ if (E)
+ NotePrevious();
+ if (Recover)
+ createImplicitModuleImportForErrorRecovery(UseLoc, Modules[0]);
+ return;
+ }
+
Modules = UniqueModules;
if (Modules.size() > 1) {
@@ -5102,33 +5292,15 @@ void Sema::diagnoseMissingImport(SourceLocation UseLoc, NamedDecl *Decl,
// FIXME: Find a smart place to suggest inserting a #include, and add
// a FixItHint there.
Diag(UseLoc, diag::err_module_unimported_use_header)
- << (int)MIK << Decl << Modules[0]->getFullModuleName()
- << getIncludeStringForHeader(PP, E);
+ << (int)MIK << Decl << Modules[0]->getFullModuleName()
+ << getIncludeStringForHeader(PP, E, IncludingFile);
} else {
// FIXME: Add a FixItHint that imports the corresponding module.
Diag(UseLoc, diag::err_module_unimported_use)
<< (int)MIK << Decl << Modules[0]->getFullModuleName();
}
- unsigned DiagID;
- switch (MIK) {
- case MissingImportKind::Declaration:
- DiagID = diag::note_previous_declaration;
- break;
- case MissingImportKind::Definition:
- DiagID = diag::note_previous_definition;
- break;
- case MissingImportKind::DefaultArgument:
- DiagID = diag::note_default_argument_declared_here;
- break;
- case MissingImportKind::ExplicitSpecialization:
- DiagID = diag::note_explicit_specialization_declared_here;
- break;
- case MissingImportKind::PartialSpecialization:
- DiagID = diag::note_partial_specialization_declared_here;
- break;
- }
- Diag(DeclLoc, DiagID);
+ NotePrevious();
// Try to recover by implicitly importing this module.
if (Recover)
diff --git a/lib/Sema/SemaModule.cpp b/lib/Sema/SemaModule.cpp
new file mode 100644
index 000000000000..10de0ca91221
--- /dev/null
+++ b/lib/Sema/SemaModule.cpp
@@ -0,0 +1,710 @@
+//===--- SemaModule.cpp - Semantic Analysis for Modules -------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis for modules (C++ modules syntax,
+// Objective-C modules syntax, and Clang header modules).
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ASTConsumer.h"
+#include "clang/Lex/HeaderSearch.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Sema/SemaInternal.h"
+
+using namespace clang;
+using namespace sema;
+
+static void checkModuleImportContext(Sema &S, Module *M,
+ SourceLocation ImportLoc, DeclContext *DC,
+ bool FromInclude = false) {
+ SourceLocation ExternCLoc;
+
+ if (auto *LSD = dyn_cast<LinkageSpecDecl>(DC)) {
+ switch (LSD->getLanguage()) {
+ case LinkageSpecDecl::lang_c:
+ if (ExternCLoc.isInvalid())
+ ExternCLoc = LSD->getBeginLoc();
+ break;
+ case LinkageSpecDecl::lang_cxx:
+ break;
+ }
+ DC = LSD->getParent();
+ }
+
+ while (isa<LinkageSpecDecl>(DC) || isa<ExportDecl>(DC))
+ DC = DC->getParent();
+
+ if (!isa<TranslationUnitDecl>(DC)) {
+ S.Diag(ImportLoc, (FromInclude && S.isModuleVisible(M))
+ ? diag::ext_module_import_not_at_top_level_noop
+ : diag::err_module_import_not_at_top_level_fatal)
+ << M->getFullModuleName() << DC;
+ S.Diag(cast<Decl>(DC)->getBeginLoc(),
+ diag::note_module_import_not_at_top_level)
+ << DC;
+ } else if (!M->IsExternC && ExternCLoc.isValid()) {
+ S.Diag(ImportLoc, diag::ext_module_import_in_extern_c)
+ << M->getFullModuleName();
+ S.Diag(ExternCLoc, diag::note_extern_c_begins_here);
+ }
+}
+
+Sema::DeclGroupPtrTy
+Sema::ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc) {
+ if (!ModuleScopes.empty() &&
+ ModuleScopes.back().Module->Kind == Module::GlobalModuleFragment) {
+ // Under -std=c++2a -fmodules-ts, we can find an explicit 'module;' after
+ // already implicitly entering the global module fragment. That's OK.
+ assert(getLangOpts().CPlusPlusModules && getLangOpts().ModulesTS &&
+ "unexpectedly encountered multiple global module fragment decls");
+ ModuleScopes.back().BeginLoc = ModuleLoc;
+ return nullptr;
+ }
+
+ // We start in the global module; all those declarations are implicitly
+ // module-private (though they do not have module linkage).
+ auto &Map = PP.getHeaderSearchInfo().getModuleMap();
+ auto *GlobalModule = Map.createGlobalModuleFragmentForModuleUnit(ModuleLoc);
+ assert(GlobalModule && "module creation should not fail");
+
+ // Enter the scope of the global module.
+ ModuleScopes.push_back({});
+ ModuleScopes.back().BeginLoc = ModuleLoc;
+ ModuleScopes.back().Module = GlobalModule;
+ VisibleModules.setVisible(GlobalModule, ModuleLoc);
+
+ // All declarations created from now on are owned by the global module.
+ auto *TU = Context.getTranslationUnitDecl();
+ TU->setModuleOwnershipKind(Decl::ModuleOwnershipKind::Visible);
+ TU->setLocalOwningModule(GlobalModule);
+
+ // FIXME: Consider creating an explicit representation of this declaration.
+ return nullptr;
+}
+
+Sema::DeclGroupPtrTy
+Sema::ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc,
+ ModuleDeclKind MDK, ModuleIdPath Path, bool IsFirstDecl) {
+ assert((getLangOpts().ModulesTS || getLangOpts().CPlusPlusModules) &&
+ "should only have module decl in Modules TS or C++20");
+
+ // A module implementation unit requires that we are not compiling a module
+ // of any kind. A module interface unit requires that we are not compiling a
+ // module map.
+ switch (getLangOpts().getCompilingModule()) {
+ case LangOptions::CMK_None:
+ // It's OK to compile a module interface as a normal translation unit.
+ break;
+
+ case LangOptions::CMK_ModuleInterface:
+ if (MDK != ModuleDeclKind::Implementation)
+ break;
+
+ // We were asked to compile a module interface unit but this is a module
+ // implementation unit. That indicates the 'export' is missing.
+ Diag(ModuleLoc, diag::err_module_interface_implementation_mismatch)
+ << FixItHint::CreateInsertion(ModuleLoc, "export ");
+ MDK = ModuleDeclKind::Interface;
+ break;
+
+ case LangOptions::CMK_ModuleMap:
+ Diag(ModuleLoc, diag::err_module_decl_in_module_map_module);
+ return nullptr;
+
+ case LangOptions::CMK_HeaderModule:
+ Diag(ModuleLoc, diag::err_module_decl_in_header_module);
+ return nullptr;
+ }
+
+ assert(ModuleScopes.size() <= 1 && "expected to be at global module scope");
+
+ // FIXME: Most of this work should be done by the preprocessor rather than
+ // here, in order to support macro import.
+
+ // Only one module-declaration is permitted per source file.
+ if (!ModuleScopes.empty() &&
+ ModuleScopes.back().Module->isModulePurview()) {
+ Diag(ModuleLoc, diag::err_module_redeclaration);
+ Diag(VisibleModules.getImportLoc(ModuleScopes.back().Module),
+ diag::note_prev_module_declaration);
+ return nullptr;
+ }
+
+ // Find the global module fragment we're adopting into this module, if any.
+ Module *GlobalModuleFragment = nullptr;
+ if (!ModuleScopes.empty() &&
+ ModuleScopes.back().Module->Kind == Module::GlobalModuleFragment)
+ GlobalModuleFragment = ModuleScopes.back().Module;
+
+ // In C++20, the module-declaration must be the first declaration if there
+ // is no global module fragment.
+ if (getLangOpts().CPlusPlusModules && !IsFirstDecl && !GlobalModuleFragment) {
+ Diag(ModuleLoc, diag::err_module_decl_not_at_start);
+ SourceLocation BeginLoc =
+ ModuleScopes.empty()
+ ? SourceMgr.getLocForStartOfFile(SourceMgr.getMainFileID())
+ : ModuleScopes.back().BeginLoc;
+ if (BeginLoc.isValid()) {
+ Diag(BeginLoc, diag::note_global_module_introducer_missing)
+ << FixItHint::CreateInsertion(BeginLoc, "module;\n");
+ }
+ }
+
+ // Flatten the dots in a module name. Unlike Clang's hierarchical module map
+ // modules, the dots here are just another character that can appear in a
+ // module name.
+ std::string ModuleName;
+ for (auto &Piece : Path) {
+ if (!ModuleName.empty())
+ ModuleName += ".";
+ ModuleName += Piece.first->getName();
+ }
+
+ // If a module name was explicitly specified on the command line, it must be
+ // correct.
+ if (!getLangOpts().CurrentModule.empty() &&
+ getLangOpts().CurrentModule != ModuleName) {
+ Diag(Path.front().second, diag::err_current_module_name_mismatch)
+ << SourceRange(Path.front().second, Path.back().second)
+ << getLangOpts().CurrentModule;
+ return nullptr;
+ }
+ const_cast<LangOptions&>(getLangOpts()).CurrentModule = ModuleName;
+
+ auto &Map = PP.getHeaderSearchInfo().getModuleMap();
+ Module *Mod;
+
+ switch (MDK) {
+ case ModuleDeclKind::Interface: {
+ // We can't have parsed or imported a definition of this module or parsed a
+ // module map defining it already.
+ if (auto *M = Map.findModule(ModuleName)) {
+ Diag(Path[0].second, diag::err_module_redefinition) << ModuleName;
+ if (M->DefinitionLoc.isValid())
+ Diag(M->DefinitionLoc, diag::note_prev_module_definition);
+ else if (const auto *FE = M->getASTFile())
+ Diag(M->DefinitionLoc, diag::note_prev_module_definition_from_ast_file)
+ << FE->getName();
+ Mod = M;
+ break;
+ }
+
+ // Create a Module for the module that we're defining.
+ Mod = Map.createModuleForInterfaceUnit(ModuleLoc, ModuleName,
+ GlobalModuleFragment);
+ assert(Mod && "module creation should not fail");
+ break;
+ }
+
+ case ModuleDeclKind::Implementation:
+ std::pair<IdentifierInfo *, SourceLocation> ModuleNameLoc(
+ PP.getIdentifierInfo(ModuleName), Path[0].second);
+ Mod = getModuleLoader().loadModule(ModuleLoc, {ModuleNameLoc},
+ Module::AllVisible,
+ /*IsInclusionDirective=*/false);
+ if (!Mod) {
+ Diag(ModuleLoc, diag::err_module_not_defined) << ModuleName;
+ // Create an empty module interface unit for error recovery.
+ Mod = Map.createModuleForInterfaceUnit(ModuleLoc, ModuleName,
+ GlobalModuleFragment);
+ }
+ break;
+ }
+
+ if (!GlobalModuleFragment) {
+ ModuleScopes.push_back({});
+ if (getLangOpts().ModulesLocalVisibility)
+ ModuleScopes.back().OuterVisibleModules = std::move(VisibleModules);
+ } else {
+ // We're done with the global module fragment now.
+ ActOnEndOfTranslationUnitFragment(TUFragmentKind::Global);
+ }
+
+ // Switch from the global module fragment (if any) to the named module.
+ ModuleScopes.back().BeginLoc = StartLoc;
+ ModuleScopes.back().Module = Mod;
+ ModuleScopes.back().ModuleInterface = MDK != ModuleDeclKind::Implementation;
+ VisibleModules.setVisible(Mod, ModuleLoc);
+
+ // From now on, we have an owning module for all declarations we see.
+ // However, those declarations are module-private unless explicitly
+ // exported.
+ auto *TU = Context.getTranslationUnitDecl();
+ TU->setModuleOwnershipKind(Decl::ModuleOwnershipKind::ModulePrivate);
+ TU->setLocalOwningModule(Mod);
+
+ // FIXME: Create a ModuleDecl.
+ return nullptr;
+}
+
+Sema::DeclGroupPtrTy
+Sema::ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc,
+ SourceLocation PrivateLoc) {
+ // C++20 [basic.link]/2:
+ // A private-module-fragment shall appear only in a primary module
+ // interface unit.
+ switch (ModuleScopes.empty() ? Module::GlobalModuleFragment
+ : ModuleScopes.back().Module->Kind) {
+ case Module::ModuleMapModule:
+ case Module::GlobalModuleFragment:
+ Diag(PrivateLoc, diag::err_private_module_fragment_not_module);
+ return nullptr;
+
+ case Module::PrivateModuleFragment:
+ Diag(PrivateLoc, diag::err_private_module_fragment_redefined);
+ Diag(ModuleScopes.back().BeginLoc, diag::note_previous_definition);
+ return nullptr;
+
+ case Module::ModuleInterfaceUnit:
+ break;
+ }
+
+ if (!ModuleScopes.back().ModuleInterface) {
+ Diag(PrivateLoc, diag::err_private_module_fragment_not_module_interface);
+ Diag(ModuleScopes.back().BeginLoc,
+ diag::note_not_module_interface_add_export)
+ << FixItHint::CreateInsertion(ModuleScopes.back().BeginLoc, "export ");
+ return nullptr;
+ }
+
+ // FIXME: Check this isn't a module interface partition.
+ // FIXME: Check that this translation unit does not import any partitions;
+ // such imports would violate [basic.link]/2's "shall be the only module unit"
+ // restriction.
+
+ // We've finished the public fragment of the translation unit.
+ ActOnEndOfTranslationUnitFragment(TUFragmentKind::Normal);
+
+ auto &Map = PP.getHeaderSearchInfo().getModuleMap();
+ Module *PrivateModuleFragment =
+ Map.createPrivateModuleFragmentForInterfaceUnit(
+ ModuleScopes.back().Module, PrivateLoc);
+ assert(PrivateModuleFragment && "module creation should not fail");
+
+ // Enter the scope of the private module fragment.
+ ModuleScopes.push_back({});
+ ModuleScopes.back().BeginLoc = ModuleLoc;
+ ModuleScopes.back().Module = PrivateModuleFragment;
+ ModuleScopes.back().ModuleInterface = true;
+ VisibleModules.setVisible(PrivateModuleFragment, ModuleLoc);
+
+ // All declarations created from now on are scoped to the private module
+ // fragment (and are neither visible nor reachable in importers of the module
+ // interface).
+ auto *TU = Context.getTranslationUnitDecl();
+ TU->setModuleOwnershipKind(Decl::ModuleOwnershipKind::ModulePrivate);
+ TU->setLocalOwningModule(PrivateModuleFragment);
+
+ // FIXME: Consider creating an explicit representation of this declaration.
+ return nullptr;
+}
+
+DeclResult Sema::ActOnModuleImport(SourceLocation StartLoc,
+ SourceLocation ExportLoc,
+ SourceLocation ImportLoc,
+ ModuleIdPath Path) {
+ // Flatten the module path for a Modules TS module name.
+ std::pair<IdentifierInfo *, SourceLocation> ModuleNameLoc;
+ if (getLangOpts().ModulesTS) {
+ std::string ModuleName;
+ for (auto &Piece : Path) {
+ if (!ModuleName.empty())
+ ModuleName += ".";
+ ModuleName += Piece.first->getName();
+ }
+ ModuleNameLoc = {PP.getIdentifierInfo(ModuleName), Path[0].second};
+ Path = ModuleIdPath(ModuleNameLoc);
+ }
+
+ Module *Mod =
+ getModuleLoader().loadModule(ImportLoc, Path, Module::AllVisible,
+ /*IsInclusionDirective=*/false);
+ if (!Mod)
+ return true;
+
+ return ActOnModuleImport(StartLoc, ExportLoc, ImportLoc, Mod, Path);
+}
+
+/// Determine whether \p D is lexically within an export-declaration.
+static const ExportDecl *getEnclosingExportDecl(const Decl *D) {
+ for (auto *DC = D->getLexicalDeclContext(); DC; DC = DC->getLexicalParent())
+ if (auto *ED = dyn_cast<ExportDecl>(DC))
+ return ED;
+ return nullptr;
+}
+
+DeclResult Sema::ActOnModuleImport(SourceLocation StartLoc,
+ SourceLocation ExportLoc,
+ SourceLocation ImportLoc,
+ Module *Mod, ModuleIdPath Path) {
+ VisibleModules.setVisible(Mod, ImportLoc);
+
+ checkModuleImportContext(*this, Mod, ImportLoc, CurContext);
+
+ // FIXME: we should support importing a submodule within a different submodule
+ // of the same top-level module. Until we do, make it an error rather than
+ // silently ignoring the import.
+ // Import-from-implementation is valid in the Modules TS. FIXME: Should we
+ // warn on a redundant import of the current module?
+ // FIXME: Import of a module from an implementation partition of the same
+ // module is permitted.
+ if (Mod->getTopLevelModuleName() == getLangOpts().CurrentModule &&
+ (getLangOpts().isCompilingModule() || !getLangOpts().ModulesTS)) {
+ Diag(ImportLoc, getLangOpts().isCompilingModule()
+ ? diag::err_module_self_import
+ : diag::err_module_import_in_implementation)
+ << Mod->getFullModuleName() << getLangOpts().CurrentModule;
+ }
+
+ SmallVector<SourceLocation, 2> IdentifierLocs;
+ Module *ModCheck = Mod;
+ for (unsigned I = 0, N = Path.size(); I != N; ++I) {
+ // If we've run out of module parents, just drop the remaining identifiers.
+ // We need the length to be consistent.
+ if (!ModCheck)
+ break;
+ ModCheck = ModCheck->Parent;
+
+ IdentifierLocs.push_back(Path[I].second);
+ }
+
+ // If this was a header import, pad out with dummy locations.
+ // FIXME: Pass in and use the location of the header-name token in this case.
+ if (Path.empty()) {
+ for (; ModCheck; ModCheck = ModCheck->Parent) {
+ IdentifierLocs.push_back(SourceLocation());
+ }
+ }
+
+ ImportDecl *Import = ImportDecl::Create(Context, CurContext, StartLoc,
+ Mod, IdentifierLocs);
+ CurContext->addDecl(Import);
+
+ // Sequence initialization of the imported module before that of the current
+ // module, if any.
+ if (!ModuleScopes.empty())
+ Context.addModuleInitializer(ModuleScopes.back().Module, Import);
+
+ // Re-export the module if needed.
+ if (!ModuleScopes.empty() && ModuleScopes.back().ModuleInterface) {
+ if (ExportLoc.isValid() || getEnclosingExportDecl(Import))
+ getCurrentModule()->Exports.emplace_back(Mod, false);
+ } else if (ExportLoc.isValid()) {
+ Diag(ExportLoc, diag::err_export_not_in_module_interface);
+ }
+
+ return Import;
+}
+
+void Sema::ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod) {
+ checkModuleImportContext(*this, Mod, DirectiveLoc, CurContext, true);
+ BuildModuleInclude(DirectiveLoc, Mod);
+}
+
+void Sema::BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod) {
+ // Determine whether we're in the #include buffer for a module. The #includes
+ // in that buffer do not qualify as module imports; they're just an
+ // implementation detail of us building the module.
+ //
+ // FIXME: Should we even get ActOnModuleInclude calls for those?
+ bool IsInModuleIncludes =
+ TUKind == TU_Module &&
+ getSourceManager().isWrittenInMainFile(DirectiveLoc);
+
+ bool ShouldAddImport = !IsInModuleIncludes;
+
+ // If this module import was due to an inclusion directive, create an
+ // implicit import declaration to capture it in the AST.
+ if (ShouldAddImport) {
+ TranslationUnitDecl *TU = getASTContext().getTranslationUnitDecl();
+ ImportDecl *ImportD = ImportDecl::CreateImplicit(getASTContext(), TU,
+ DirectiveLoc, Mod,
+ DirectiveLoc);
+ if (!ModuleScopes.empty())
+ Context.addModuleInitializer(ModuleScopes.back().Module, ImportD);
+ TU->addDecl(ImportD);
+ Consumer.HandleImplicitImportDecl(ImportD);
+ }
+
+ getModuleLoader().makeModuleVisible(Mod, Module::AllVisible, DirectiveLoc);
+ VisibleModules.setVisible(Mod, DirectiveLoc);
+}
+
+void Sema::ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod) {
+ checkModuleImportContext(*this, Mod, DirectiveLoc, CurContext, true);
+
+ ModuleScopes.push_back({});
+ ModuleScopes.back().Module = Mod;
+ if (getLangOpts().ModulesLocalVisibility)
+ ModuleScopes.back().OuterVisibleModules = std::move(VisibleModules);
+
+ VisibleModules.setVisible(Mod, DirectiveLoc);
+
+ // The enclosing context is now part of this module.
+ // FIXME: Consider creating a child DeclContext to hold the entities
+ // lexically within the module.
+ if (getLangOpts().trackLocalOwningModule()) {
+ for (auto *DC = CurContext; DC; DC = DC->getLexicalParent()) {
+ cast<Decl>(DC)->setModuleOwnershipKind(
+ getLangOpts().ModulesLocalVisibility
+ ? Decl::ModuleOwnershipKind::VisibleWhenImported
+ : Decl::ModuleOwnershipKind::Visible);
+ cast<Decl>(DC)->setLocalOwningModule(Mod);
+ }
+ }
+}
+
+void Sema::ActOnModuleEnd(SourceLocation EomLoc, Module *Mod) {
+ if (getLangOpts().ModulesLocalVisibility) {
+ VisibleModules = std::move(ModuleScopes.back().OuterVisibleModules);
+ // Leaving a module hides namespace names, so our visible namespace cache
+ // is now out of date.
+ VisibleNamespaceCache.clear();
+ }
+
+ assert(!ModuleScopes.empty() && ModuleScopes.back().Module == Mod &&
+ "left the wrong module scope");
+ ModuleScopes.pop_back();
+
+ // We got to the end of processing a local module. Create an
+ // ImportDecl as we would for an imported module.
+ FileID File = getSourceManager().getFileID(EomLoc);
+ SourceLocation DirectiveLoc;
+ if (EomLoc == getSourceManager().getLocForEndOfFile(File)) {
+ // We reached the end of a #included module header. Use the #include loc.
+ assert(File != getSourceManager().getMainFileID() &&
+ "end of submodule in main source file");
+ DirectiveLoc = getSourceManager().getIncludeLoc(File);
+ } else {
+ // We reached an EOM pragma. Use the pragma location.
+ DirectiveLoc = EomLoc;
+ }
+ BuildModuleInclude(DirectiveLoc, Mod);
+
+ // Any further declarations are in whatever module we returned to.
+ if (getLangOpts().trackLocalOwningModule()) {
+ // The parser guarantees that this is the same context that we entered
+ // the module within.
+ for (auto *DC = CurContext; DC; DC = DC->getLexicalParent()) {
+ cast<Decl>(DC)->setLocalOwningModule(getCurrentModule());
+ if (!getCurrentModule())
+ cast<Decl>(DC)->setModuleOwnershipKind(
+ Decl::ModuleOwnershipKind::Unowned);
+ }
+ }
+}
+
+void Sema::createImplicitModuleImportForErrorRecovery(SourceLocation Loc,
+ Module *Mod) {
+ // Bail if we're not allowed to implicitly import a module here.
+ if (isSFINAEContext() || !getLangOpts().ModulesErrorRecovery ||
+ VisibleModules.isVisible(Mod))
+ return;
+
+ // Create the implicit import declaration.
+ TranslationUnitDecl *TU = getASTContext().getTranslationUnitDecl();
+ ImportDecl *ImportD = ImportDecl::CreateImplicit(getASTContext(), TU,
+ Loc, Mod, Loc);
+ TU->addDecl(ImportD);
+ Consumer.HandleImplicitImportDecl(ImportD);
+
+ // Make the module visible.
+ getModuleLoader().makeModuleVisible(Mod, Module::AllVisible, Loc);
+ VisibleModules.setVisible(Mod, Loc);
+}
+
+/// We have parsed the start of an export declaration, including the '{'
+/// (if present).
+Decl *Sema::ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
+ SourceLocation LBraceLoc) {
+ ExportDecl *D = ExportDecl::Create(Context, CurContext, ExportLoc);
+
+ // Set this temporarily so we know the export-declaration was braced.
+ D->setRBraceLoc(LBraceLoc);
+
+ // C++2a [module.interface]p1:
+ // An export-declaration shall appear only [...] in the purview of a module
+ // interface unit. An export-declaration shall not appear directly or
+ // indirectly within [...] a private-module-fragment.
+ if (ModuleScopes.empty() || !ModuleScopes.back().Module->isModulePurview()) {
+ Diag(ExportLoc, diag::err_export_not_in_module_interface) << 0;
+ } else if (!ModuleScopes.back().ModuleInterface) {
+ Diag(ExportLoc, diag::err_export_not_in_module_interface) << 1;
+ Diag(ModuleScopes.back().BeginLoc,
+ diag::note_not_module_interface_add_export)
+ << FixItHint::CreateInsertion(ModuleScopes.back().BeginLoc, "export ");
+ } else if (ModuleScopes.back().Module->Kind ==
+ Module::PrivateModuleFragment) {
+ Diag(ExportLoc, diag::err_export_in_private_module_fragment);
+ Diag(ModuleScopes.back().BeginLoc, diag::note_private_module_fragment);
+ }
+
+ for (const DeclContext *DC = CurContext; DC; DC = DC->getLexicalParent()) {
+ if (const auto *ND = dyn_cast<NamespaceDecl>(DC)) {
+ // An export-declaration shall not appear directly or indirectly within
+ // an unnamed namespace [...]
+ if (ND->isAnonymousNamespace()) {
+ Diag(ExportLoc, diag::err_export_within_anonymous_namespace);
+ Diag(ND->getLocation(), diag::note_anonymous_namespace);
+ // Don't diagnose internal-linkage declarations in this region.
+ D->setInvalidDecl();
+ break;
+ }
+
+ // A declaration is exported if it is [...] a namespace-definition
+ // that contains an exported declaration.
+ //
+ // Defer exporting the namespace until after we leave it, in order to
+ // avoid marking all subsequent declarations in the namespace as exported.
+ if (!DeferredExportedNamespaces.insert(ND).second)
+ break;
+ }
+ }
+
+ // [...] its declaration or declaration-seq shall not contain an
+ // export-declaration.
+ if (auto *ED = getEnclosingExportDecl(D)) {
+ Diag(ExportLoc, diag::err_export_within_export);
+ if (ED->hasBraces())
+ Diag(ED->getLocation(), diag::note_export);
+ }
+
+ CurContext->addDecl(D);
+ PushDeclContext(S, D);
+ D->setModuleOwnershipKind(Decl::ModuleOwnershipKind::VisibleWhenImported);
+ return D;
+}
+
+static bool checkExportedDeclContext(Sema &S, DeclContext *DC,
+ SourceLocation BlockStart);
+
+namespace {
+enum class UnnamedDeclKind {
+ Empty,
+ StaticAssert,
+ Asm,
+ UsingDirective,
+ Context
+};
+}
+
+static llvm::Optional<UnnamedDeclKind> getUnnamedDeclKind(Decl *D) {
+ if (isa<EmptyDecl>(D))
+ return UnnamedDeclKind::Empty;
+ if (isa<StaticAssertDecl>(D))
+ return UnnamedDeclKind::StaticAssert;
+ if (isa<FileScopeAsmDecl>(D))
+ return UnnamedDeclKind::Asm;
+ if (isa<UsingDirectiveDecl>(D))
+ return UnnamedDeclKind::UsingDirective;
+ // Everything else either introduces one or more names or is ill-formed.
+ return llvm::None;
+}
+
+unsigned getUnnamedDeclDiag(UnnamedDeclKind UDK, bool InBlock) {
+ switch (UDK) {
+ case UnnamedDeclKind::Empty:
+ case UnnamedDeclKind::StaticAssert:
+ // Allow empty-declarations and static_asserts in an export block as an
+ // extension.
+ return InBlock ? diag::ext_export_no_name_block : diag::err_export_no_name;
+
+ case UnnamedDeclKind::UsingDirective:
+ // Allow exporting using-directives as an extension.
+ return diag::ext_export_using_directive;
+
+ case UnnamedDeclKind::Context:
+ // Allow exporting DeclContexts that transitively contain no declarations
+ // as an extension.
+ return diag::ext_export_no_names;
+
+ case UnnamedDeclKind::Asm:
+ return diag::err_export_no_name;
+ }
+ llvm_unreachable("unknown kind");
+}
+
+static void diagExportedUnnamedDecl(Sema &S, UnnamedDeclKind UDK, Decl *D,
+ SourceLocation BlockStart) {
+ S.Diag(D->getLocation(), getUnnamedDeclDiag(UDK, BlockStart.isValid()))
+ << (unsigned)UDK;
+ if (BlockStart.isValid())
+ S.Diag(BlockStart, diag::note_export);
+}
+
+/// Check that it's valid to export \p D.
+static bool checkExportedDecl(Sema &S, Decl *D, SourceLocation BlockStart) {
+ // C++2a [module.interface]p3:
+ // An exported declaration shall declare at least one name
+ if (auto UDK = getUnnamedDeclKind(D))
+ diagExportedUnnamedDecl(S, *UDK, D, BlockStart);
+
+ // [...] shall not declare a name with internal linkage.
+ if (auto *ND = dyn_cast<NamedDecl>(D)) {
+ // Don't diagnose anonymous union objects; we'll diagnose their members
+ // instead.
+ if (ND->getDeclName() && ND->getFormalLinkage() == InternalLinkage) {
+ S.Diag(ND->getLocation(), diag::err_export_internal) << ND;
+ if (BlockStart.isValid())
+ S.Diag(BlockStart, diag::note_export);
+ }
+ }
+
+ // C++2a [module.interface]p5:
+ // all entities to which all of the using-declarators ultimately refer
+ // shall have been introduced with a name having external linkage
+ if (auto *USD = dyn_cast<UsingShadowDecl>(D)) {
+ NamedDecl *Target = USD->getUnderlyingDecl();
+ if (Target->getFormalLinkage() == InternalLinkage) {
+ S.Diag(USD->getLocation(), diag::err_export_using_internal) << Target;
+ S.Diag(Target->getLocation(), diag::note_using_decl_target);
+ if (BlockStart.isValid())
+ S.Diag(BlockStart, diag::note_export);
+ }
+ }
+
+ // Recurse into namespace-scope DeclContexts. (Only namespace-scope
+ // declarations are exported.)
+ if (auto *DC = dyn_cast<DeclContext>(D))
+ if (DC->getRedeclContext()->isFileContext() && !isa<EnumDecl>(D))
+ return checkExportedDeclContext(S, DC, BlockStart);
+ return false;
+}
+
+/// Check that it's valid to export all the declarations in \p DC.
+static bool checkExportedDeclContext(Sema &S, DeclContext *DC,
+ SourceLocation BlockStart) {
+ bool AllUnnamed = true;
+ for (auto *D : DC->decls())
+ AllUnnamed &= checkExportedDecl(S, D, BlockStart);
+ return AllUnnamed;
+}
+
+/// Complete the definition of an export declaration.
+Decl *Sema::ActOnFinishExportDecl(Scope *S, Decl *D, SourceLocation RBraceLoc) {
+ auto *ED = cast<ExportDecl>(D);
+ if (RBraceLoc.isValid())
+ ED->setRBraceLoc(RBraceLoc);
+
+ PopDeclContext();
+
+ if (!D->isInvalidDecl()) {
+ SourceLocation BlockStart =
+ ED->hasBraces() ? ED->getBeginLoc() : SourceLocation();
+ for (auto *Child : ED->decls()) {
+ if (checkExportedDecl(*this, Child, BlockStart)) {
+ // If a top-level child is a linkage-spec declaration, it might contain
+ // no declarations (transitively), in which case it's ill-formed.
+ diagExportedUnnamedDecl(*this, UnnamedDeclKind::Context, Child,
+ BlockStart);
+ }
+ }
+ }
+
+ return D;
+}
diff --git a/lib/Sema/SemaObjCProperty.cpp b/lib/Sema/SemaObjCProperty.cpp
index 9412d0160048..e5c014501431 100644
--- a/lib/Sema/SemaObjCProperty.cpp
+++ b/lib/Sema/SemaObjCProperty.cpp
@@ -1,9 +1,8 @@
//===--- SemaObjCProperty.cpp - Semantic Analysis for ObjC @property ------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -1289,7 +1288,7 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
Ivar = ObjCIvarDecl::Create(Context, ClassImpDecl,
PropertyIvarLoc,PropertyIvarLoc, PropertyIvar,
- PropertyIvarType, /*Dinfo=*/nullptr,
+ PropertyIvarType, /*TInfo=*/nullptr,
ObjCIvarDecl::Private,
(Expr *)nullptr, true);
if (RequireNonAbstractType(PropertyIvarLoc,
@@ -1943,11 +1942,10 @@ static void DiagnoseUnimplementedAccessor(
llvm::SmallPtrSet<const ObjCMethodDecl *, 8> &SMap) {
// Check to see if we have a corresponding selector in SMap and with the
// right method type.
- auto I = std::find_if(SMap.begin(), SMap.end(),
- [&](const ObjCMethodDecl *x) {
- return x->getSelector() == Method &&
- x->isClassMethod() == Prop->isClassProperty();
- });
+ auto I = llvm::find_if(SMap, [&](const ObjCMethodDecl *x) {
+ return x->getSelector() == Method &&
+ x->isClassMethod() == Prop->isClassProperty();
+ });
// When reporting on missing property setter/getter implementation in
// categories, do not report when they are declared in primary class,
// class's protocol, or one of it super classes. This is because,
@@ -2281,9 +2279,18 @@ void Sema::DiagnoseMissingDesignatedInitOverrides(
I = DesignatedInits.begin(), E = DesignatedInits.end(); I != E; ++I) {
const ObjCMethodDecl *MD = *I;
if (!InitSelSet.count(MD->getSelector())) {
+ // Don't emit a diagnostic if the overriding method in the subclass is
+ // marked as unavailable.
bool Ignore = false;
if (auto *IMD = IFD->getInstanceMethod(MD->getSelector())) {
Ignore = IMD->isUnavailable();
+ } else {
+ // Check the methods declared in the class extensions too.
+ for (auto *Ext : IFD->visible_extensions())
+ if (auto *IMD = Ext->getInstanceMethod(MD->getSelector())) {
+ Ignore = IMD->isUnavailable();
+ break;
+ }
}
if (!Ignore) {
Diag(ImplD->getLocation(),
diff --git a/lib/Sema/SemaOpenMP.cpp b/lib/Sema/SemaOpenMP.cpp
index 36048a38b999..bd68011c18b2 100644
--- a/lib/Sema/SemaOpenMP.cpp
+++ b/lib/Sema/SemaOpenMP.cpp
@@ -1,9 +1,8 @@
//===--- SemaOpenMP.cpp - Semantic Analysis for OpenMP constructs ---------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
@@ -135,7 +134,7 @@ private:
/// get the data (loop counters etc.) about enclosing loop-based construct.
/// This data is required during codegen.
DoacrossDependMapTy DoacrossDepends;
- /// first argument (Expr *) contains optional argument of the
+ /// First argument (Expr *) contains optional argument of the
/// 'ordered' clause, the second one is true if the regions has 'ordered'
/// clause, false otherwise.
llvm::Optional<std::pair<const Expr *, OMPOrderedClause *>> OrderedRegion;
@@ -144,10 +143,14 @@ private:
bool NowaitRegion = false;
bool CancelRegion = false;
bool LoopStart = false;
+ bool BodyComplete = false;
SourceLocation InnerTeamsRegionLoc;
/// Reference to the taskgroup task_reduction reference expression.
Expr *TaskgroupReductionRef = nullptr;
llvm::DenseSet<QualType> MappedClassesQualTypes;
+ /// List of globals marked as declare target link in this target region
+ /// (isOpenMPTargetExecutionDirective(Directive) == true).
+ llvm::SmallVector<DeclRefExpr *, 4> DeclareTargetLinkVarDecls;
SharingMapTy(OpenMPDirectiveKind DKind, DeclarationNameInfo Name,
Scope *CurScope, SourceLocation Loc)
: Directive(DKind), DirectiveName(Name), CurScope(CurScope),
@@ -170,26 +173,112 @@ private:
/// captured by reference.
bool ForceCaptureByReferenceInTargetExecutable = false;
CriticalsWithHintsTy Criticals;
+ unsigned IgnoredStackElements = 0;
- using iterator = StackTy::const_reverse_iterator;
+ /// Iterators over the stack iterate in order from innermost to outermost
+ /// directive.
+ using const_iterator = StackTy::const_reverse_iterator;
+ const_iterator begin() const {
+ return Stack.empty() ? const_iterator()
+ : Stack.back().first.rbegin() + IgnoredStackElements;
+ }
+ const_iterator end() const {
+ return Stack.empty() ? const_iterator() : Stack.back().first.rend();
+ }
+ using iterator = StackTy::reverse_iterator;
+ iterator begin() {
+ return Stack.empty() ? iterator()
+ : Stack.back().first.rbegin() + IgnoredStackElements;
+ }
+ iterator end() {
+ return Stack.empty() ? iterator() : Stack.back().first.rend();
+ }
- DSAVarData getDSA(iterator &Iter, ValueDecl *D) const;
-
- /// Checks if the variable is a local for OpenMP region.
- bool isOpenMPLocal(VarDecl *D, iterator Iter) const;
+ // Convenience operations to get at the elements of the stack.
bool isStackEmpty() const {
return Stack.empty() ||
Stack.back().second != CurrentNonCapturingFunctionScope ||
- Stack.back().first.empty();
+ Stack.back().first.size() <= IgnoredStackElements;
+ }
+ size_t getStackSize() const {
+ return isStackEmpty() ? 0
+ : Stack.back().first.size() - IgnoredStackElements;
+ }
+
+ SharingMapTy *getTopOfStackOrNull() {
+ size_t Size = getStackSize();
+ if (Size == 0)
+ return nullptr;
+ return &Stack.back().first[Size - 1];
}
+ const SharingMapTy *getTopOfStackOrNull() const {
+ return const_cast<DSAStackTy&>(*this).getTopOfStackOrNull();
+ }
+ SharingMapTy &getTopOfStack() {
+ assert(!isStackEmpty() && "no current directive");
+ return *getTopOfStackOrNull();
+ }
+ const SharingMapTy &getTopOfStack() const {
+ return const_cast<DSAStackTy&>(*this).getTopOfStack();
+ }
+
+ SharingMapTy *getSecondOnStackOrNull() {
+ size_t Size = getStackSize();
+ if (Size <= 1)
+ return nullptr;
+ return &Stack.back().first[Size - 2];
+ }
+ const SharingMapTy *getSecondOnStackOrNull() const {
+ return const_cast<DSAStackTy&>(*this).getSecondOnStackOrNull();
+ }
+
+ /// Get the stack element at a certain level (previously returned by
+ /// \c getNestingLevel).
+ ///
+ /// Note that nesting levels count from outermost to innermost, and this is
+ /// the reverse of our iteration order where new inner levels are pushed at
+ /// the front of the stack.
+ SharingMapTy &getStackElemAtLevel(unsigned Level) {
+ assert(Level < getStackSize() && "no such stack element");
+ return Stack.back().first[Level];
+ }
+ const SharingMapTy &getStackElemAtLevel(unsigned Level) const {
+ return const_cast<DSAStackTy&>(*this).getStackElemAtLevel(Level);
+ }
+
+ DSAVarData getDSA(const_iterator &Iter, ValueDecl *D) const;
+
+ /// Checks if the variable is a local for OpenMP region.
+ bool isOpenMPLocal(VarDecl *D, const_iterator Iter) const;
/// Vector of previously declared requires directives
SmallVector<const OMPRequiresDecl *, 2> RequiresDecls;
+ /// omp_allocator_handle_t type.
+ QualType OMPAllocatorHandleT;
+ /// Expression for the predefined allocators.
+ Expr *OMPPredefinedAllocators[OMPAllocateDeclAttr::OMPUserDefinedMemAlloc] = {
+ nullptr};
+ /// Vector of previously encountered target directives
+ SmallVector<SourceLocation, 2> TargetLocations;
public:
explicit DSAStackTy(Sema &S) : SemaRef(S) {}
+ /// Sets omp_allocator_handle_t type.
+ void setOMPAllocatorHandleT(QualType Ty) { OMPAllocatorHandleT = Ty; }
+ /// Gets omp_allocator_handle_t type.
+ QualType getOMPAllocatorHandleT() const { return OMPAllocatorHandleT; }
+ /// Sets the given default allocator.
+ void setAllocator(OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind,
+ Expr *Allocator) {
+ OMPPredefinedAllocators[AllocatorKind] = Allocator;
+ }
+ /// Returns the specified default allocator.
+ Expr *getAllocator(OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind) const {
+ return OMPPredefinedAllocators[AllocatorKind];
+ }
+
bool isClauseParsingMode() const { return ClauseKindMode != OMPC_unknown; }
OpenMPClauseKind getClauseParsingMode() const {
assert(isClauseParsingMode() && "Must be in clause parsing mode.");
@@ -197,6 +286,14 @@ public:
}
void setClauseParsingMode(OpenMPClauseKind K) { ClauseKindMode = K; }
+ bool isBodyComplete() const {
+ const SharingMapTy *Top = getTopOfStackOrNull();
+ return Top && Top->BodyComplete;
+ }
+ void setBodyComplete() {
+ getTopOfStack().BodyComplete = true;
+ }
+
bool isForceVarCapturing() const { return ForceCapturing; }
void setForceVarCapturing(bool V) { ForceCapturing = V; }
@@ -209,6 +306,8 @@ public:
void push(OpenMPDirectiveKind DKind, const DeclarationNameInfo &DirName,
Scope *CurScope, SourceLocation Loc) {
+ assert(!IgnoredStackElements &&
+ "cannot change stack while ignoring elements");
if (Stack.empty() ||
Stack.back().second != CurrentNonCapturingFunctionScope)
Stack.emplace_back(StackTy(), CurrentNonCapturingFunctionScope);
@@ -217,46 +316,78 @@ public:
}
void pop() {
+ assert(!IgnoredStackElements &&
+ "cannot change stack while ignoring elements");
assert(!Stack.back().first.empty() &&
"Data-sharing attributes stack is empty!");
Stack.back().first.pop_back();
}
+ /// RAII object to temporarily leave the scope of a directive when we want to
+ /// logically operate in its parent.
+ class ParentDirectiveScope {
+ DSAStackTy &Self;
+ bool Active;
+ public:
+ ParentDirectiveScope(DSAStackTy &Self, bool Activate)
+ : Self(Self), Active(false) {
+ if (Activate)
+ enable();
+ }
+ ~ParentDirectiveScope() { disable(); }
+ void disable() {
+ if (Active) {
+ --Self.IgnoredStackElements;
+ Active = false;
+ }
+ }
+ void enable() {
+ if (!Active) {
+ ++Self.IgnoredStackElements;
+ Active = true;
+ }
+ }
+ };
+
/// Marks that we're started loop parsing.
void loopInit() {
assert(isOpenMPLoopDirective(getCurrentDirective()) &&
"Expected loop-based directive.");
- Stack.back().first.back().LoopStart = true;
+ getTopOfStack().LoopStart = true;
}
/// Start capturing of the variables in the loop context.
void loopStart() {
assert(isOpenMPLoopDirective(getCurrentDirective()) &&
"Expected loop-based directive.");
- Stack.back().first.back().LoopStart = false;
+ getTopOfStack().LoopStart = false;
}
/// true, if variables are captured, false otherwise.
bool isLoopStarted() const {
assert(isOpenMPLoopDirective(getCurrentDirective()) &&
"Expected loop-based directive.");
- return !Stack.back().first.back().LoopStart;
+ return !getTopOfStack().LoopStart;
}
/// Marks (or clears) declaration as possibly loop counter.
void resetPossibleLoopCounter(const Decl *D = nullptr) {
- Stack.back().first.back().PossiblyLoopCounter =
+ getTopOfStack().PossiblyLoopCounter =
D ? D->getCanonicalDecl() : D;
}
/// Gets the possible loop counter decl.
const Decl *getPossiblyLoopCunter() const {
- return Stack.back().first.back().PossiblyLoopCounter;
+ return getTopOfStack().PossiblyLoopCounter;
}
/// Start new OpenMP region stack in new non-capturing function.
void pushFunction() {
+ assert(!IgnoredStackElements &&
+ "cannot change stack while ignoring elements");
const FunctionScopeInfo *CurFnScope = SemaRef.getCurFunction();
assert(!isa<CapturingScopeInfo>(CurFnScope));
CurrentNonCapturingFunctionScope = CurFnScope;
}
/// Pop region stack for non-capturing function.
void popFunction(const FunctionScopeInfo *OldFSI) {
+ assert(!IgnoredStackElements &&
+ "cannot change stack while ignoring elements");
if (!Stack.empty() && Stack.back().second == OldFSI) {
assert(Stack.back().first.empty());
Stack.pop_back();
@@ -327,16 +458,16 @@ public:
Expr *&TaskgroupDescriptor) const;
/// Return reduction reference expression for the current taskgroup.
Expr *getTaskgroupReductionRef() const {
- assert(Stack.back().first.back().Directive == OMPD_taskgroup &&
+ assert(getTopOfStack().Directive == OMPD_taskgroup &&
"taskgroup reference expression requested for non taskgroup "
"directive.");
- return Stack.back().first.back().TaskgroupReductionRef;
+ return getTopOfStack().TaskgroupReductionRef;
}
/// Checks if the given \p VD declaration is actually a taskgroup reduction
/// descriptor variable at the \p Level of OpenMP regions.
bool isTaskgroupReductionRef(const ValueDecl *VD, unsigned Level) const {
- return Stack.back().first[Level].TaskgroupReductionRef &&
- cast<DeclRefExpr>(Stack.back().first[Level].TaskgroupReductionRef)
+ return getStackElemAtLevel(Level).TaskgroupReductionRef &&
+ cast<DeclRefExpr>(getStackElemAtLevel(Level).TaskgroupReductionRef)
->getDecl() == VD;
}
@@ -382,18 +513,18 @@ public:
/// Returns currently analyzed directive.
OpenMPDirectiveKind getCurrentDirective() const {
- return isStackEmpty() ? OMPD_unknown : Stack.back().first.back().Directive;
+ const SharingMapTy *Top = getTopOfStackOrNull();
+ return Top ? Top->Directive : OMPD_unknown;
}
/// Returns directive kind at specified level.
OpenMPDirectiveKind getDirective(unsigned Level) const {
assert(!isStackEmpty() && "No directive at specified level.");
- return Stack.back().first[Level].Directive;
+ return getStackElemAtLevel(Level).Directive;
}
/// Returns parent directive.
OpenMPDirectiveKind getParentDirective() const {
- if (isStackEmpty() || Stack.back().first.size() == 1)
- return OMPD_unknown;
- return std::next(Stack.back().first.rbegin())->Directive;
+ const SharingMapTy *Parent = getSecondOnStackOrNull();
+ return Parent ? Parent->Directive : OMPD_unknown;
}
/// Add requires decl to internal vector
@@ -401,6 +532,16 @@ public:
RequiresDecls.push_back(RD);
}
+ /// Checks if the defined 'requires' directive has specified type of clause.
+ template <typename ClauseType>
+ bool hasRequiresDeclWithClause() {
+ return llvm::any_of(RequiresDecls, [](const OMPRequiresDecl *D) {
+ return llvm::any_of(D->clauselists(), [](const OMPClause *C) {
+ return isa<ClauseType>(C);
+ });
+ });
+ }
+
/// Checks for a duplicate clause amongst previously declared requires
/// directives
bool hasDuplicateRequiresClause(ArrayRef<OMPClause *> ClauseList) const {
@@ -423,43 +564,50 @@ public:
return IsDuplicate;
}
+ /// Add location of previously encountered target to internal vector
+ void addTargetDirLocation(SourceLocation LocStart) {
+ TargetLocations.push_back(LocStart);
+ }
+
+ // Return previously encountered target region locations.
+ ArrayRef<SourceLocation> getEncounteredTargetLocs() const {
+ return TargetLocations;
+ }
+
/// Set default data sharing attribute to none.
void setDefaultDSANone(SourceLocation Loc) {
- assert(!isStackEmpty());
- Stack.back().first.back().DefaultAttr = DSA_none;
- Stack.back().first.back().DefaultAttrLoc = Loc;
+ getTopOfStack().DefaultAttr = DSA_none;
+ getTopOfStack().DefaultAttrLoc = Loc;
}
/// Set default data sharing attribute to shared.
void setDefaultDSAShared(SourceLocation Loc) {
- assert(!isStackEmpty());
- Stack.back().first.back().DefaultAttr = DSA_shared;
- Stack.back().first.back().DefaultAttrLoc = Loc;
+ getTopOfStack().DefaultAttr = DSA_shared;
+ getTopOfStack().DefaultAttrLoc = Loc;
}
/// Set default data mapping attribute to 'tofrom:scalar'.
void setDefaultDMAToFromScalar(SourceLocation Loc) {
- assert(!isStackEmpty());
- Stack.back().first.back().DefaultMapAttr = DMA_tofrom_scalar;
- Stack.back().first.back().DefaultMapAttrLoc = Loc;
+ getTopOfStack().DefaultMapAttr = DMA_tofrom_scalar;
+ getTopOfStack().DefaultMapAttrLoc = Loc;
}
DefaultDataSharingAttributes getDefaultDSA() const {
return isStackEmpty() ? DSA_unspecified
- : Stack.back().first.back().DefaultAttr;
+ : getTopOfStack().DefaultAttr;
}
SourceLocation getDefaultDSALocation() const {
return isStackEmpty() ? SourceLocation()
- : Stack.back().first.back().DefaultAttrLoc;
+ : getTopOfStack().DefaultAttrLoc;
}
DefaultMapAttributes getDefaultDMA() const {
return isStackEmpty() ? DMA_unspecified
- : Stack.back().first.back().DefaultMapAttr;
+ : getTopOfStack().DefaultMapAttr;
}
DefaultMapAttributes getDefaultDMAAtLevel(unsigned Level) const {
- return Stack.back().first[Level].DefaultMapAttr;
+ return getStackElemAtLevel(Level).DefaultMapAttr;
}
SourceLocation getDefaultDMALocation() const {
return isStackEmpty() ? SourceLocation()
- : Stack.back().first.back().DefaultMapAttrLoc;
+ : getTopOfStack().DefaultMapAttrLoc;
}
/// Checks if the specified variable is a threadprivate.
@@ -471,82 +619,77 @@ public:
/// Marks current region as ordered (it has an 'ordered' clause).
void setOrderedRegion(bool IsOrdered, const Expr *Param,
OMPOrderedClause *Clause) {
- assert(!isStackEmpty());
if (IsOrdered)
- Stack.back().first.back().OrderedRegion.emplace(Param, Clause);
+ getTopOfStack().OrderedRegion.emplace(Param, Clause);
else
- Stack.back().first.back().OrderedRegion.reset();
+ getTopOfStack().OrderedRegion.reset();
}
/// Returns true, if region is ordered (has associated 'ordered' clause),
/// false - otherwise.
bool isOrderedRegion() const {
- if (isStackEmpty())
- return false;
- return Stack.back().first.rbegin()->OrderedRegion.hasValue();
+ if (const SharingMapTy *Top = getTopOfStackOrNull())
+ return Top->OrderedRegion.hasValue();
+ return false;
}
/// Returns optional parameter for the ordered region.
std::pair<const Expr *, OMPOrderedClause *> getOrderedRegionParam() const {
- if (isStackEmpty() ||
- !Stack.back().first.rbegin()->OrderedRegion.hasValue())
- return std::make_pair(nullptr, nullptr);
- return Stack.back().first.rbegin()->OrderedRegion.getValue();
+ if (const SharingMapTy *Top = getTopOfStackOrNull())
+ if (Top->OrderedRegion.hasValue())
+ return Top->OrderedRegion.getValue();
+ return std::make_pair(nullptr, nullptr);
}
/// Returns true, if parent region is ordered (has associated
/// 'ordered' clause), false - otherwise.
bool isParentOrderedRegion() const {
- if (isStackEmpty() || Stack.back().first.size() == 1)
- return false;
- return std::next(Stack.back().first.rbegin())->OrderedRegion.hasValue();
+ if (const SharingMapTy *Parent = getSecondOnStackOrNull())
+ return Parent->OrderedRegion.hasValue();
+ return false;
}
/// Returns optional parameter for the ordered region.
std::pair<const Expr *, OMPOrderedClause *>
getParentOrderedRegionParam() const {
- if (isStackEmpty() || Stack.back().first.size() == 1 ||
- !std::next(Stack.back().first.rbegin())->OrderedRegion.hasValue())
- return std::make_pair(nullptr, nullptr);
- return std::next(Stack.back().first.rbegin())->OrderedRegion.getValue();
+ if (const SharingMapTy *Parent = getSecondOnStackOrNull())
+ if (Parent->OrderedRegion.hasValue())
+ return Parent->OrderedRegion.getValue();
+ return std::make_pair(nullptr, nullptr);
}
/// Marks current region as nowait (it has a 'nowait' clause).
void setNowaitRegion(bool IsNowait = true) {
- assert(!isStackEmpty());
- Stack.back().first.back().NowaitRegion = IsNowait;
+ getTopOfStack().NowaitRegion = IsNowait;
}
/// Returns true, if parent region is nowait (has associated
/// 'nowait' clause), false - otherwise.
bool isParentNowaitRegion() const {
- if (isStackEmpty() || Stack.back().first.size() == 1)
- return false;
- return std::next(Stack.back().first.rbegin())->NowaitRegion;
+ if (const SharingMapTy *Parent = getSecondOnStackOrNull())
+ return Parent->NowaitRegion;
+ return false;
}
/// Marks parent region as cancel region.
void setParentCancelRegion(bool Cancel = true) {
- if (!isStackEmpty() && Stack.back().first.size() > 1) {
- auto &StackElemRef = *std::next(Stack.back().first.rbegin());
- StackElemRef.CancelRegion |= StackElemRef.CancelRegion || Cancel;
- }
+ if (SharingMapTy *Parent = getSecondOnStackOrNull())
+ Parent->CancelRegion |= Cancel;
}
/// Return true if current region has inner cancel construct.
bool isCancelRegion() const {
- return isStackEmpty() ? false : Stack.back().first.back().CancelRegion;
+ const SharingMapTy *Top = getTopOfStackOrNull();
+ return Top ? Top->CancelRegion : false;
}
/// Set collapse value for the region.
void setAssociatedLoops(unsigned Val) {
- assert(!isStackEmpty());
- Stack.back().first.back().AssociatedLoops = Val;
+ getTopOfStack().AssociatedLoops = Val;
}
/// Return collapse value for region.
unsigned getAssociatedLoops() const {
- return isStackEmpty() ? 0 : Stack.back().first.back().AssociatedLoops;
+ const SharingMapTy *Top = getTopOfStackOrNull();
+ return Top ? Top->AssociatedLoops : 0;
}
/// Marks current target region as one with closely nested teams
/// region.
void setParentTeamsRegionLoc(SourceLocation TeamsRegionLoc) {
- if (!isStackEmpty() && Stack.back().first.size() > 1) {
- std::next(Stack.back().first.rbegin())->InnerTeamsRegionLoc =
- TeamsRegionLoc;
- }
+ if (SharingMapTy *Parent = getSecondOnStackOrNull())
+ Parent->InnerTeamsRegionLoc = TeamsRegionLoc;
}
/// Returns true, if current region has closely nested teams region.
bool hasInnerTeamsRegion() const {
@@ -554,16 +697,17 @@ public:
}
/// Returns location of the nested teams region (if any).
SourceLocation getInnerTeamsRegionLoc() const {
- return isStackEmpty() ? SourceLocation()
- : Stack.back().first.back().InnerTeamsRegionLoc;
+ const SharingMapTy *Top = getTopOfStackOrNull();
+ return Top ? Top->InnerTeamsRegionLoc : SourceLocation();
}
Scope *getCurScope() const {
- return isStackEmpty() ? nullptr : Stack.back().first.back().CurScope;
+ const SharingMapTy *Top = getTopOfStackOrNull();
+ return Top ? Top->CurScope : nullptr;
}
SourceLocation getConstructLoc() const {
- return isStackEmpty() ? SourceLocation()
- : Stack.back().first.back().ConstructLoc;
+ const SharingMapTy *Top = getTopOfStackOrNull();
+ return Top ? Top->ConstructLoc : SourceLocation();
}
/// Do the check specified in \a Check to all component lists and return true
@@ -576,8 +720,8 @@ public:
Check) const {
if (isStackEmpty())
return false;
- auto SI = Stack.back().first.rbegin();
- auto SE = Stack.back().first.rend();
+ auto SI = begin();
+ auto SE = end();
if (SI == SE)
return false;
@@ -606,17 +750,12 @@ public:
bool(OMPClauseMappableExprCommon::MappableExprComponentListRef,
OpenMPClauseKind)>
Check) const {
- if (isStackEmpty())
- return false;
-
- auto StartI = Stack.back().first.begin();
- auto EndI = Stack.back().first.end();
- if (std::distance(StartI, EndI) <= (int)Level)
+ if (getStackSize() <= Level)
return false;
- std::advance(StartI, Level);
- auto MI = StartI->MappedExprComponents.find(VD);
- if (MI != StartI->MappedExprComponents.end())
+ const SharingMapTy &StackElem = getStackElemAtLevel(Level);
+ auto MI = StackElem.MappedExprComponents.find(VD);
+ if (MI != StackElem.MappedExprComponents.end())
for (OMPClauseMappableExprCommon::MappableExprComponentListRef L :
MI->second.Components)
if (Check(L, MI->second.Kind))
@@ -630,10 +769,7 @@ public:
const ValueDecl *VD,
OMPClauseMappableExprCommon::MappableExprComponentListRef Components,
OpenMPClauseKind WhereFoundClauseKind) {
- assert(!isStackEmpty() &&
- "Not expecting to retrieve components from a empty stack!");
- MappedExprComponentTy &MEC =
- Stack.back().first.back().MappedExprComponents[VD];
+ MappedExprComponentTy &MEC = getTopOfStack().MappedExprComponents[VD];
// Create new entry and append the new components there.
MEC.Components.resize(MEC.Components.size() + 1);
MEC.Components.back().append(Components.begin(), Components.end());
@@ -642,19 +778,17 @@ public:
unsigned getNestingLevel() const {
assert(!isStackEmpty());
- return Stack.back().first.size() - 1;
+ return getStackSize() - 1;
}
void addDoacrossDependClause(OMPDependClause *C,
const OperatorOffsetTy &OpsOffs) {
- assert(!isStackEmpty() && Stack.back().first.size() > 1);
- SharingMapTy &StackElem = *std::next(Stack.back().first.rbegin());
- assert(isOpenMPWorksharingDirective(StackElem.Directive));
- StackElem.DoacrossDepends.try_emplace(C, OpsOffs);
+ SharingMapTy *Parent = getSecondOnStackOrNull();
+ assert(Parent && isOpenMPWorksharingDirective(Parent->Directive));
+ Parent->DoacrossDepends.try_emplace(C, OpsOffs);
}
llvm::iterator_range<DoacrossDependMapTy::const_iterator>
getDoacrossDependClauses() const {
- assert(!isStackEmpty());
- const SharingMapTy &StackElem = Stack.back().first.back();
+ const SharingMapTy &StackElem = getTopOfStack();
if (isOpenMPWorksharingDirective(StackElem.Directive)) {
const DoacrossDependMapTy &Ref = StackElem.DoacrossDepends;
return llvm::make_range(Ref.begin(), Ref.end());
@@ -665,16 +799,36 @@ public:
// Store types of classes which have been explicitly mapped
void addMappedClassesQualTypes(QualType QT) {
- SharingMapTy &StackElem = Stack.back().first.back();
+ SharingMapTy &StackElem = getTopOfStack();
StackElem.MappedClassesQualTypes.insert(QT);
}
// Return set of mapped classes types
bool isClassPreviouslyMapped(QualType QT) const {
- const SharingMapTy &StackElem = Stack.back().first.back();
+ const SharingMapTy &StackElem = getTopOfStack();
return StackElem.MappedClassesQualTypes.count(QT) != 0;
}
+ /// Adds global declare target to the parent target region.
+ void addToParentTargetRegionLinkGlobals(DeclRefExpr *E) {
+ assert(*OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(
+ E->getDecl()) == OMPDeclareTargetDeclAttr::MT_Link &&
+ "Expected declare target link global.");
+ for (auto &Elem : *this) {
+ if (isOpenMPTargetExecutionDirective(Elem.Directive)) {
+ Elem.DeclareTargetLinkVarDecls.push_back(E);
+ return;
+ }
+ }
+ }
+
+ /// Returns the list of globals with declare target link if current directive
+ /// is target.
+ ArrayRef<DeclRefExpr *> getLinkGlobals() const {
+ assert(isOpenMPTargetExecutionDirective(getCurrentDirective()) &&
+ "Expected target executable directive.");
+ return getTopOfStack().DeclareTargetLinkVarDecls;
+ }
};
bool isImplicitTaskingRegion(OpenMPDirectiveKind DKind) {
@@ -682,7 +836,8 @@ bool isImplicitTaskingRegion(OpenMPDirectiveKind DKind) {
}
bool isImplicitOrExplicitTaskingRegion(OpenMPDirectiveKind DKind) {
- return isImplicitTaskingRegion(DKind) || isOpenMPTaskingDirective(DKind) || DKind == OMPD_unknown;
+ return isImplicitTaskingRegion(DKind) || isOpenMPTaskingDirective(DKind) ||
+ DKind == OMPD_unknown;
}
} // namespace
@@ -728,13 +883,13 @@ static ValueDecl *getCanonicalDecl(ValueDecl *D) {
getCanonicalDecl(const_cast<const ValueDecl *>(D)));
}
-DSAStackTy::DSAVarData DSAStackTy::getDSA(iterator &Iter,
+DSAStackTy::DSAVarData DSAStackTy::getDSA(const_iterator &Iter,
ValueDecl *D) const {
D = getCanonicalDecl(D);
auto *VD = dyn_cast<VarDecl>(D);
const auto *FD = dyn_cast<FieldDecl>(D);
DSAVarData DVar;
- if (isStackEmpty() || Iter == Stack.back().first.rend()) {
+ if (Iter == end()) {
// OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
// in a region but not in construct]
// File-scope or namespace-scope variables referenced in called routines
@@ -809,7 +964,7 @@ DSAStackTy::DSAVarData DSAStackTy::getDSA(iterator &Iter,
// bound to the current team is shared.
if (isOpenMPTaskingDirective(DVar.DKind)) {
DSAVarData DVarTemp;
- iterator I = Iter, E = Stack.back().first.rend();
+ const_iterator I = Iter, E = end();
do {
++I;
// OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables
@@ -841,7 +996,7 @@ const Expr *DSAStackTy::addUniqueAligned(const ValueDecl *D,
const Expr *NewDE) {
assert(!isStackEmpty() && "Data sharing attributes stack is empty");
D = getCanonicalDecl(D);
- SharingMapTy &StackElem = Stack.back().first.back();
+ SharingMapTy &StackElem = getTopOfStack();
auto It = StackElem.AlignedMap.find(D);
if (It == StackElem.AlignedMap.end()) {
assert(NewDE && "Unexpected nullptr expr to be added into aligned map");
@@ -855,7 +1010,7 @@ const Expr *DSAStackTy::addUniqueAligned(const ValueDecl *D,
void DSAStackTy::addLoopControlVariable(const ValueDecl *D, VarDecl *Capture) {
assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
D = getCanonicalDecl(D);
- SharingMapTy &StackElem = Stack.back().first.back();
+ SharingMapTy &StackElem = getTopOfStack();
StackElem.LCVMap.try_emplace(
D, LCDeclInfo(StackElem.LCVMap.size() + 1, Capture));
}
@@ -864,7 +1019,7 @@ const DSAStackTy::LCDeclInfo
DSAStackTy::isLoopControlVariable(const ValueDecl *D) const {
assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
D = getCanonicalDecl(D);
- const SharingMapTy &StackElem = Stack.back().first.back();
+ const SharingMapTy &StackElem = getTopOfStack();
auto It = StackElem.LCVMap.find(D);
if (It != StackElem.LCVMap.end())
return It->second;
@@ -873,23 +1028,21 @@ DSAStackTy::isLoopControlVariable(const ValueDecl *D) const {
const DSAStackTy::LCDeclInfo
DSAStackTy::isParentLoopControlVariable(const ValueDecl *D) const {
- assert(!isStackEmpty() && Stack.back().first.size() > 1 &&
- "Data-sharing attributes stack is empty");
+ const SharingMapTy *Parent = getSecondOnStackOrNull();
+ assert(Parent && "Data-sharing attributes stack is empty");
D = getCanonicalDecl(D);
- const SharingMapTy &StackElem = *std::next(Stack.back().first.rbegin());
- auto It = StackElem.LCVMap.find(D);
- if (It != StackElem.LCVMap.end())
+ auto It = Parent->LCVMap.find(D);
+ if (It != Parent->LCVMap.end())
return It->second;
return {0, nullptr};
}
const ValueDecl *DSAStackTy::getParentLoopControlVariable(unsigned I) const {
- assert(!isStackEmpty() && Stack.back().first.size() > 1 &&
- "Data-sharing attributes stack is empty");
- const SharingMapTy &StackElem = *std::next(Stack.back().first.rbegin());
- if (StackElem.LCVMap.size() < I)
+ const SharingMapTy *Parent = getSecondOnStackOrNull();
+ assert(Parent && "Data-sharing attributes stack is empty");
+ if (Parent->LCVMap.size() < I)
return nullptr;
- for (const auto &Pair : StackElem.LCVMap)
+ for (const auto &Pair : Parent->LCVMap)
if (Pair.second.first == I)
return Pair.first;
return nullptr;
@@ -904,8 +1057,7 @@ void DSAStackTy::addDSA(const ValueDecl *D, const Expr *E, OpenMPClauseKind A,
Data.RefExpr.setPointer(E);
Data.PrivateCopy = nullptr;
} else {
- assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
- DSAInfo &Data = Stack.back().first.back().SharingMap[D];
+ DSAInfo &Data = getTopOfStack().SharingMap[D];
assert(Data.Attributes == OMPC_unknown || (A == Data.Attributes) ||
(A == OMPC_firstprivate && Data.Attributes == OMPC_lastprivate) ||
(A == OMPC_lastprivate && Data.Attributes == OMPC_firstprivate) ||
@@ -920,8 +1072,7 @@ void DSAStackTy::addDSA(const ValueDecl *D, const Expr *E, OpenMPClauseKind A,
Data.RefExpr.setPointerAndInt(E, IsLastprivate);
Data.PrivateCopy = PrivateCopy;
if (PrivateCopy) {
- DSAInfo &Data =
- Stack.back().first.back().SharingMap[PrivateCopy->getDecl()];
+ DSAInfo &Data = getTopOfStack().SharingMap[PrivateCopy->getDecl()];
Data.Attributes = A;
Data.RefExpr.setPointerAndInt(PrivateCopy, IsLastprivate);
Data.PrivateCopy = nullptr;
@@ -966,16 +1117,16 @@ void DSAStackTy::addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
D = getCanonicalDecl(D);
assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
assert(
- Stack.back().first.back().SharingMap[D].Attributes == OMPC_reduction &&
+ getTopOfStack().SharingMap[D].Attributes == OMPC_reduction &&
"Additional reduction info may be specified only for reduction items.");
- ReductionData &ReductionData = Stack.back().first.back().ReductionMap[D];
+ ReductionData &ReductionData = getTopOfStack().ReductionMap[D];
assert(ReductionData.ReductionRange.isInvalid() &&
- Stack.back().first.back().Directive == OMPD_taskgroup &&
+ getTopOfStack().Directive == OMPD_taskgroup &&
"Additional reduction info may be specified only once for reduction "
"items.");
ReductionData.set(BOK, SR);
Expr *&TaskgroupReductionRef =
- Stack.back().first.back().TaskgroupReductionRef;
+ getTopOfStack().TaskgroupReductionRef;
if (!TaskgroupReductionRef) {
VarDecl *VD = buildVarDecl(SemaRef, SR.getBegin(),
SemaRef.Context.VoidPtrTy, ".task_red.");
@@ -989,16 +1140,16 @@ void DSAStackTy::addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
D = getCanonicalDecl(D);
assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
assert(
- Stack.back().first.back().SharingMap[D].Attributes == OMPC_reduction &&
+ getTopOfStack().SharingMap[D].Attributes == OMPC_reduction &&
"Additional reduction info may be specified only for reduction items.");
- ReductionData &ReductionData = Stack.back().first.back().ReductionMap[D];
+ ReductionData &ReductionData = getTopOfStack().ReductionMap[D];
assert(ReductionData.ReductionRange.isInvalid() &&
- Stack.back().first.back().Directive == OMPD_taskgroup &&
+ getTopOfStack().Directive == OMPD_taskgroup &&
"Additional reduction info may be specified only once for reduction "
"items.");
ReductionData.set(ReductionRef, SR);
Expr *&TaskgroupReductionRef =
- Stack.back().first.back().TaskgroupReductionRef;
+ getTopOfStack().TaskgroupReductionRef;
if (!TaskgroupReductionRef) {
VarDecl *VD = buildVarDecl(SemaRef, SR.getBegin(),
SemaRef.Context.VoidPtrTy, ".task_red.");
@@ -1012,11 +1163,7 @@ const DSAStackTy::DSAVarData DSAStackTy::getTopMostTaskgroupReductionData(
Expr *&TaskgroupDescriptor) const {
D = getCanonicalDecl(D);
assert(!isStackEmpty() && "Data-sharing attributes stack is empty.");
- if (Stack.back().first.empty())
- return DSAVarData();
- for (iterator I = std::next(Stack.back().first.rbegin(), 1),
- E = Stack.back().first.rend();
- I != E; std::advance(I, 1)) {
+ for (const_iterator I = begin() + 1, E = end(); I != E; ++I) {
const DSAInfo &Data = I->SharingMap.lookup(D);
if (Data.Attributes != OMPC_reduction || I->Directive != OMPD_taskgroup)
continue;
@@ -1041,11 +1188,7 @@ const DSAStackTy::DSAVarData DSAStackTy::getTopMostTaskgroupReductionData(
Expr *&TaskgroupDescriptor) const {
D = getCanonicalDecl(D);
assert(!isStackEmpty() && "Data-sharing attributes stack is empty.");
- if (Stack.back().first.empty())
- return DSAVarData();
- for (iterator I = std::next(Stack.back().first.rbegin(), 1),
- E = Stack.back().first.rend();
- I != E; std::advance(I, 1)) {
+ for (const_iterator I = begin() + 1, E = end(); I != E; ++I) {
const DSAInfo &Data = I->SharingMap.lookup(D);
if (Data.Attributes != OMPC_reduction || I->Directive != OMPD_taskgroup)
continue;
@@ -1065,21 +1208,17 @@ const DSAStackTy::DSAVarData DSAStackTy::getTopMostTaskgroupReductionData(
return DSAVarData();
}
-bool DSAStackTy::isOpenMPLocal(VarDecl *D, iterator Iter) const {
+bool DSAStackTy::isOpenMPLocal(VarDecl *D, const_iterator I) const {
D = D->getCanonicalDecl();
- if (!isStackEmpty()) {
- iterator I = Iter, E = Stack.back().first.rend();
- Scope *TopScope = nullptr;
- while (I != E && !isImplicitOrExplicitTaskingRegion(I->Directive) &&
- !isOpenMPTargetExecutionDirective(I->Directive))
- ++I;
- if (I == E)
- return false;
- TopScope = I->CurScope ? I->CurScope->getParent() : nullptr;
- Scope *CurScope = getCurScope();
- while (CurScope != TopScope && !CurScope->isDeclScope(D))
- CurScope = CurScope->getParent();
- return CurScope != TopScope;
+ for (const_iterator E = end(); I != E; ++I) {
+ if (isImplicitOrExplicitTaskingRegion(I->Directive) ||
+ isOpenMPTargetExecutionDirective(I->Directive)) {
+ Scope *TopScope = I->CurScope ? I->CurScope->getParent() : nullptr;
+ Scope *CurScope = getCurScope();
+ while (CurScope && CurScope != TopScope && !CurScope->isDeclScope(D))
+ CurScope = CurScope->getParent();
+ return CurScope != TopScope;
+ }
}
return false;
}
@@ -1167,15 +1306,14 @@ const DSAStackTy::DSAVarData DSAStackTy::getTopDSA(ValueDecl *D,
if (SemaRef.getLangOpts().OpenMPCUDAMode && VD &&
VD->isLocalVarDeclOrParm() && !isStackEmpty() &&
!isLoopControlVariable(D).first) {
- iterator IterTarget =
- std::find_if(Stack.back().first.rbegin(), Stack.back().first.rend(),
- [](const SharingMapTy &Data) {
- return isOpenMPTargetExecutionDirective(Data.Directive);
- });
- if (IterTarget != Stack.back().first.rend()) {
- iterator ParentIterTarget = std::next(IterTarget, 1);
- for (iterator Iter = Stack.back().first.rbegin();
- Iter != ParentIterTarget; std::advance(Iter, 1)) {
+ const_iterator IterTarget =
+ std::find_if(begin(), end(), [](const SharingMapTy &Data) {
+ return isOpenMPTargetExecutionDirective(Data.Directive);
+ });
+ if (IterTarget != end()) {
+ const_iterator ParentIterTarget = IterTarget + 1;
+ for (const_iterator Iter = begin();
+ Iter != ParentIterTarget; ++Iter) {
if (isOpenMPLocal(VD, Iter)) {
DVar.RefExpr =
buildDeclRefExpr(SemaRef, VD, D->getType().getNonReferenceType(),
@@ -1184,7 +1322,7 @@ const DSAStackTy::DSAVarData DSAStackTy::getTopDSA(ValueDecl *D,
return DVar;
}
}
- if (!isClauseParsingMode() || IterTarget != Stack.back().first.rbegin()) {
+ if (!isClauseParsingMode() || IterTarget != begin()) {
auto DSAIter = IterTarget->SharingMap.find(D);
if (DSAIter != IterTarget->SharingMap.end() &&
isOpenMPPrivate(DSAIter->getSecond().Attributes)) {
@@ -1192,7 +1330,7 @@ const DSAStackTy::DSAVarData DSAStackTy::getTopDSA(ValueDecl *D,
DVar.CKind = OMPC_threadprivate;
return DVar;
}
- iterator End = Stack.back().first.rend();
+ const_iterator End = end();
if (!SemaRef.isOpenMPCapturedByRef(
D, std::distance(ParentIterTarget, End))) {
DVar.RefExpr =
@@ -1216,16 +1354,28 @@ const DSAStackTy::DSAVarData DSAStackTy::getTopDSA(ValueDecl *D,
// in a Construct, C/C++, predetermined, p.7]
// Variables with static storage duration that are declared in a scope
// inside the construct are shared.
- auto &&MatchesAlways = [](OpenMPDirectiveKind) { return true; };
if (VD && VD->isStaticDataMember()) {
- DSAVarData DVarTemp = hasDSA(D, isOpenMPPrivate, MatchesAlways, FromParent);
- if (DVarTemp.CKind != OMPC_unknown && DVarTemp.RefExpr)
+ // Check for explicitly specified attributes.
+ const_iterator I = begin();
+ const_iterator EndI = end();
+ if (FromParent && I != EndI)
+ ++I;
+ auto It = I->SharingMap.find(D);
+ if (It != I->SharingMap.end()) {
+ const DSAInfo &Data = It->getSecond();
+ DVar.RefExpr = Data.RefExpr.getPointer();
+ DVar.PrivateCopy = Data.PrivateCopy;
+ DVar.CKind = Data.Attributes;
+ DVar.ImplicitDSALoc = I->DefaultAttrLoc;
+ DVar.DKind = I->Directive;
return DVar;
+ }
DVar.CKind = OMPC_shared;
return DVar;
}
+ auto &&MatchesAlways = [](OpenMPDirectiveKind) { return true; };
// The predetermined shared attribute for const-qualified types having no
// mutable members was removed after OpenMP 3.1.
if (SemaRef.LangOpts.OpenMP <= 31) {
@@ -1252,10 +1402,10 @@ const DSAStackTy::DSAVarData DSAStackTy::getTopDSA(ValueDecl *D,
// Explicitly specified attributes and local variables with predetermined
// attributes.
- iterator I = Stack.back().first.rbegin();
- iterator EndI = Stack.back().first.rend();
+ const_iterator I = begin();
+ const_iterator EndI = end();
if (FromParent && I != EndI)
- std::advance(I, 1);
+ ++I;
auto It = I->SharingMap.find(D);
if (It != I->SharingMap.end()) {
const DSAInfo &Data = It->getSecond();
@@ -1272,14 +1422,14 @@ const DSAStackTy::DSAVarData DSAStackTy::getTopDSA(ValueDecl *D,
const DSAStackTy::DSAVarData DSAStackTy::getImplicitDSA(ValueDecl *D,
bool FromParent) const {
if (isStackEmpty()) {
- iterator I;
+ const_iterator I;
return getDSA(I, D);
}
D = getCanonicalDecl(D);
- iterator StartI = Stack.back().first.rbegin();
- iterator EndI = Stack.back().first.rend();
+ const_iterator StartI = begin();
+ const_iterator EndI = end();
if (FromParent && StartI != EndI)
- std::advance(StartI, 1);
+ ++StartI;
return getDSA(StartI, D);
}
@@ -1291,14 +1441,15 @@ DSAStackTy::hasDSA(ValueDecl *D,
if (isStackEmpty())
return {};
D = getCanonicalDecl(D);
- iterator I = Stack.back().first.rbegin();
- iterator EndI = Stack.back().first.rend();
+ const_iterator I = begin();
+ const_iterator EndI = end();
if (FromParent && I != EndI)
- std::advance(I, 1);
- for (; I != EndI; std::advance(I, 1)) {
- if (!DPred(I->Directive) && !isImplicitOrExplicitTaskingRegion(I->Directive))
+ ++I;
+ for (; I != EndI; ++I) {
+ if (!DPred(I->Directive) &&
+ !isImplicitOrExplicitTaskingRegion(I->Directive))
continue;
- iterator NewI = I;
+ const_iterator NewI = I;
DSAVarData DVar = getDSA(NewI, D);
if (I == NewI && CPred(DVar.CKind))
return DVar;
@@ -1313,13 +1464,13 @@ const DSAStackTy::DSAVarData DSAStackTy::hasInnermostDSA(
if (isStackEmpty())
return {};
D = getCanonicalDecl(D);
- iterator StartI = Stack.back().first.rbegin();
- iterator EndI = Stack.back().first.rend();
+ const_iterator StartI = begin();
+ const_iterator EndI = end();
if (FromParent && StartI != EndI)
- std::advance(StartI, 1);
+ ++StartI;
if (StartI == EndI || !DPred(StartI->Directive))
return {};
- iterator NewI = StartI;
+ const_iterator NewI = StartI;
DSAVarData DVar = getDSA(NewI, D);
return (NewI == StartI && CPred(DVar.CKind)) ? DVar : DSAVarData();
}
@@ -1327,23 +1478,19 @@ const DSAStackTy::DSAVarData DSAStackTy::hasInnermostDSA(
bool DSAStackTy::hasExplicitDSA(
const ValueDecl *D, const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
unsigned Level, bool NotLastprivate) const {
- if (isStackEmpty())
+ if (getStackSize() <= Level)
return false;
D = getCanonicalDecl(D);
- auto StartI = Stack.back().first.begin();
- auto EndI = Stack.back().first.end();
- if (std::distance(StartI, EndI) <= (int)Level)
- return false;
- std::advance(StartI, Level);
- auto I = StartI->SharingMap.find(D);
- if ((I != StartI->SharingMap.end()) &&
- I->getSecond().RefExpr.getPointer() &&
- CPred(I->getSecond().Attributes) &&
- (!NotLastprivate || !I->getSecond().RefExpr.getInt()))
+ const SharingMapTy &StackElem = getStackElemAtLevel(Level);
+ auto I = StackElem.SharingMap.find(D);
+ if (I != StackElem.SharingMap.end() &&
+ I->getSecond().RefExpr.getPointer() &&
+ CPred(I->getSecond().Attributes) &&
+ (!NotLastprivate || !I->getSecond().RefExpr.getInt()))
return true;
// Check predetermined rules for the loop control variables.
- auto LI = StartI->LCVMap.find(D);
- if (LI != StartI->LCVMap.end())
+ auto LI = StackElem.LCVMap.find(D);
+ if (LI != StackElem.LCVMap.end())
return CPred(OMPC_private);
return false;
}
@@ -1351,14 +1498,10 @@ bool DSAStackTy::hasExplicitDSA(
bool DSAStackTy::hasExplicitDirective(
const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
unsigned Level) const {
- if (isStackEmpty())
- return false;
- auto StartI = Stack.back().first.begin();
- auto EndI = Stack.back().first.end();
- if (std::distance(StartI, EndI) <= (int)Level)
+ if (getStackSize() <= Level)
return false;
- std::advance(StartI, Level);
- return DPred(StartI->Directive);
+ const SharingMapTy &StackElem = getStackElemAtLevel(Level);
+ return DPred(StackElem.Directive);
}
bool DSAStackTy::hasDirective(
@@ -1367,13 +1510,9 @@ bool DSAStackTy::hasDirective(
DPred,
bool FromParent) const {
// We look only in the enclosing region.
- if (isStackEmpty())
- return false;
- auto StartI = std::next(Stack.back().first.rbegin());
- auto EndI = Stack.back().first.rend();
- if (FromParent && StartI != EndI)
- StartI = std::next(StartI);
- for (auto I = StartI, EE = EndI; I != EE; ++I) {
+ size_t Skip = FromParent ? 2 : 1;
+ for (const_iterator I = begin() + std::min(Skip, getStackSize()), E = end();
+ I != E; ++I) {
if (DPred(I->Directive, I->DirectiveName, I->ConstructLoc))
return true;
}
@@ -1394,6 +1533,71 @@ void Sema::popOpenMPFunctionRegion(const FunctionScopeInfo *OldFSI) {
DSAStack->popFunction(OldFSI);
}
+static bool isOpenMPDeviceDelayedContext(Sema &S) {
+ assert(S.LangOpts.OpenMP && S.LangOpts.OpenMPIsDevice &&
+ "Expected OpenMP device compilation.");
+ return !S.isInOpenMPTargetExecutionDirective() &&
+ !S.isInOpenMPDeclareTargetContext();
+}
+
+/// Do we know that we will eventually codegen the given function?
+static bool isKnownEmitted(Sema &S, FunctionDecl *FD) {
+ assert(S.LangOpts.OpenMP && S.LangOpts.OpenMPIsDevice &&
+ "Expected OpenMP device compilation.");
+ // Templates are emitted when they're instantiated.
+ if (FD->isDependentContext())
+ return false;
+
+ if (OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(
+ FD->getCanonicalDecl()))
+ return true;
+
+ // Otherwise, the function is known-emitted if it's in our set of
+ // known-emitted functions.
+ return S.DeviceKnownEmittedFns.count(FD) > 0;
+}
+
+Sema::DeviceDiagBuilder Sema::diagIfOpenMPDeviceCode(SourceLocation Loc,
+ unsigned DiagID) {
+ assert(LangOpts.OpenMP && LangOpts.OpenMPIsDevice &&
+ "Expected OpenMP device compilation.");
+ return DeviceDiagBuilder((isOpenMPDeviceDelayedContext(*this) &&
+ !isKnownEmitted(*this, getCurFunctionDecl()))
+ ? DeviceDiagBuilder::K_Deferred
+ : DeviceDiagBuilder::K_Immediate,
+ Loc, DiagID, getCurFunctionDecl(), *this);
+}
+
+void Sema::checkOpenMPDeviceFunction(SourceLocation Loc, FunctionDecl *Callee) {
+ assert(LangOpts.OpenMP && LangOpts.OpenMPIsDevice &&
+ "Expected OpenMP device compilation.");
+ assert(Callee && "Callee may not be null.");
+ FunctionDecl *Caller = getCurFunctionDecl();
+
+ // If the caller is known-emitted, mark the callee as known-emitted.
+ // Otherwise, mark the call in our call graph so we can traverse it later.
+ if (!isOpenMPDeviceDelayedContext(*this) ||
+ (Caller && isKnownEmitted(*this, Caller)))
+ markKnownEmitted(*this, Caller, Callee, Loc, isKnownEmitted);
+ else if (Caller)
+ DeviceCallGraph[Caller].insert({Callee, Loc});
+}
+
+void Sema::checkOpenMPDeviceExpr(const Expr *E) {
+ assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice &&
+ "OpenMP device compilation mode is expected.");
+ QualType Ty = E->getType();
+ if ((Ty->isFloat16Type() && !Context.getTargetInfo().hasFloat16Type()) ||
+ ((Ty->isFloat128Type() ||
+ (Ty->isRealFloatingType() && Context.getTypeSize(Ty) == 128)) &&
+ !Context.getTargetInfo().hasFloat128Type()) ||
+ (Ty->isIntegerType() && Context.getTypeSize(Ty) == 128 &&
+ !Context.getTargetInfo().hasInt128Type()))
+ targetDiag(E->getExprLoc(), diag::err_omp_unsupported_type)
+ << static_cast<unsigned>(Context.getTypeSize(Ty)) << Ty
+ << Context.getTargetInfo().getTriple().str() << E->getSourceRange();
+}
+
bool Sema::isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level) const {
assert(LangOpts.OpenMP && "OpenMP is not allowed");
@@ -1523,12 +1727,10 @@ bool Sema::isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level) const {
if (IsByRef && Ty.getNonReferenceType()->isScalarType()) {
IsByRef =
- ((DSAStack->isForceCaptureByReferenceInTargetExecutable() &&
- !Ty->isAnyPointerType()) ||
- !DSAStack->hasExplicitDSA(
- D,
- [](OpenMPClauseKind K) -> bool { return K == OMPC_firstprivate; },
- Level, /*NotLastprivate=*/true)) &&
+ !DSAStack->hasExplicitDSA(
+ D,
+ [](OpenMPClauseKind K) -> bool { return K == OMPC_firstprivate; },
+ Level, /*NotLastprivate=*/true) &&
// If the variable is artificial and must be captured by value - try to
// capture by value.
!(isa<OMPCapturedExprDecl>(D) && !D->hasAttr<OMPCaptureNoInitAttr>() &&
@@ -1565,17 +1767,25 @@ bool Sema::isInOpenMPTargetExecutionDirective() const {
false);
}
-VarDecl *Sema::isOpenMPCapturedDecl(ValueDecl *D) {
+VarDecl *Sema::isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo,
+ unsigned StopAt) {
assert(LangOpts.OpenMP && "OpenMP is not allowed");
D = getCanonicalDecl(D);
+ // If we want to determine whether the variable should be captured from the
+ // perspective of the current capturing scope, and we've already left all the
+ // capturing scopes of the top directive on the stack, check from the
+ // perspective of its parent directive (if any) instead.
+ DSAStackTy::ParentDirectiveScope InParentDirectiveRAII(
+ *DSAStack, CheckScopeInfo && DSAStack->isBodyComplete());
+
// If we are attempting to capture a global variable in a directive with
// 'target' we return true so that this global is also mapped to the device.
//
auto *VD = dyn_cast<VarDecl>(D);
- if (VD && !VD->hasLocalStorage()) {
- if (isInOpenMPDeclareTargetContext() &&
- (getCurCapturedRegion() || getCurBlock() || getCurLambda())) {
+ if (VD && !VD->hasLocalStorage() &&
+ (getCurCapturedRegion() || getCurBlock() || getCurLambda())) {
+ if (isInOpenMPDeclareTargetContext()) {
// Try to mark variable as declare target if it is used in capturing
// regions.
if (!OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
@@ -1590,48 +1800,21 @@ VarDecl *Sema::isOpenMPCapturedDecl(ValueDecl *D) {
return VD;
}
}
- // Capture variables captured by reference in lambdas for target-based
- // directives.
- if (VD && !DSAStack->isClauseParsingMode()) {
- if (const auto *RD = VD->getType()
- .getCanonicalType()
- .getNonReferenceType()
- ->getAsCXXRecordDecl()) {
- bool SavedForceCaptureByReferenceInTargetExecutable =
- DSAStack->isForceCaptureByReferenceInTargetExecutable();
- DSAStack->setForceCaptureByReferenceInTargetExecutable(/*V=*/true);
- if (RD->isLambda()) {
- llvm::DenseMap<const VarDecl *, FieldDecl *> Captures;
- FieldDecl *ThisCapture;
- RD->getCaptureFields(Captures, ThisCapture);
- for (const LambdaCapture &LC : RD->captures()) {
- if (LC.getCaptureKind() == LCK_ByRef) {
- VarDecl *VD = LC.getCapturedVar();
- DeclContext *VDC = VD->getDeclContext();
- if (!VDC->Encloses(CurContext))
- continue;
- DSAStackTy::DSAVarData DVarPrivate =
- DSAStack->getTopDSA(VD, /*FromParent=*/false);
- // Do not capture already captured variables.
- if (!OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD) &&
- DVarPrivate.CKind == OMPC_unknown &&
- !DSAStack->checkMappableExprComponentListsForDecl(
- D, /*CurrentRegionOnly=*/true,
- [](OMPClauseMappableExprCommon::
- MappableExprComponentListRef,
- OpenMPClauseKind) { return true; }))
- MarkVariableReferenced(LC.getLocation(), LC.getCapturedVar());
- } else if (LC.getCaptureKind() == LCK_This) {
- QualType ThisTy = getCurrentThisType();
- if (!ThisTy.isNull() &&
- Context.typesAreCompatible(ThisTy, ThisCapture->getType()))
- CheckCXXThisCapture(LC.getLocation());
- }
+
+ if (CheckScopeInfo) {
+ bool OpenMPFound = false;
+ for (unsigned I = StopAt + 1; I > 0; --I) {
+ FunctionScopeInfo *FSI = FunctionScopes[I - 1];
+ if(!isa<CapturingScopeInfo>(FSI))
+ return nullptr;
+ if (auto *RSI = dyn_cast<CapturedRegionScopeInfo>(FSI))
+ if (RSI->CapRegionKind == CR_OpenMP) {
+ OpenMPFound = true;
+ break;
}
- }
- DSAStack->setForceCaptureByReferenceInTargetExecutable(
- SavedForceCaptureByReferenceInTargetExecutable);
}
+ if (!OpenMPFound)
+ return nullptr;
}
if (DSAStack->getCurrentDirective() != OMPD_unknown &&
@@ -1647,10 +1830,16 @@ VarDecl *Sema::isOpenMPCapturedDecl(ValueDecl *D) {
DSAStack->getTopDSA(D, DSAStack->isClauseParsingMode());
if (DVarPrivate.CKind != OMPC_unknown && isOpenMPPrivate(DVarPrivate.CKind))
return VD ? VD : cast<VarDecl>(DVarPrivate.PrivateCopy->getDecl());
+ // Threadprivate variables must not be captured.
+ if (isOpenMPThreadPrivate(DVarPrivate.CKind))
+ return nullptr;
+ // The variable is not private or it is the variable in the directive with
+ // default(none) clause and not used in any clause.
DVarPrivate = DSAStack->hasDSA(D, isOpenMPPrivate,
[](OpenMPDirectiveKind) { return true; },
DSAStack->isClauseParsingMode());
- if (DVarPrivate.CKind != OMPC_unknown)
+ if (DVarPrivate.CKind != OMPC_unknown ||
+ (VD && DSAStack->getDefaultDSA() == DSA_none))
return VD ? VD : cast<VarDecl>(DVarPrivate.PrivateCopy->getDecl());
}
return nullptr;
@@ -1764,6 +1953,9 @@ void Sema::EndOpenMPClause() {
DSAStack->setClauseParsingMode(/*K=*/OMPC_unknown);
}
+static void checkAllocateClauses(Sema &S, DSAStackTy *Stack,
+ ArrayRef<OMPClause *> Clauses);
+
void Sema::EndOpenMPDSABlock(Stmt *CurDirective) {
// OpenMP [2.14.3.5, Restrictions, C/C++, p.1]
// A variable of class type (or array thereof) that appears in a lastprivate
@@ -1794,8 +1986,10 @@ void Sema::EndOpenMPDSABlock(Stmt *CurDirective) {
*this, DE->getExprLoc(), Type.getUnqualifiedType(),
VD->getName(), VD->hasAttrs() ? &VD->getAttrs() : nullptr, DRE);
ActOnUninitializedDecl(VDPrivate);
- if (VDPrivate->isInvalidDecl())
+ if (VDPrivate->isInvalidDecl()) {
+ PrivateCopies.push_back(nullptr);
continue;
+ }
PrivateCopies.push_back(buildDeclRefExpr(
*this, VDPrivate, DE->getType(), DE->getExprLoc()));
} else {
@@ -1804,11 +1998,12 @@ void Sema::EndOpenMPDSABlock(Stmt *CurDirective) {
PrivateCopies.push_back(nullptr);
}
}
- // Set initializers to private copies if no errors were found.
- if (PrivateCopies.size() == Clause->varlist_size())
- Clause->setPrivateCopies(PrivateCopies);
+ Clause->setPrivateCopies(PrivateCopies);
}
}
+ // Check allocate clauses.
+ if (!CurContext->isDependentContext())
+ checkAllocateClauses(*this, DSAStack, D->clauses());
}
DSAStack->pop();
@@ -1837,6 +2032,11 @@ public:
}
return false;
}
+
+ std::unique_ptr<CorrectionCandidateCallback> clone() override {
+ return llvm::make_unique<VarDeclFilterCCC>(*this);
+ }
+
};
class VarOrFuncDeclFilterCCC final : public CorrectionCandidateCallback {
@@ -1847,19 +2047,25 @@ public:
explicit VarOrFuncDeclFilterCCC(Sema &S) : SemaRef(S) {}
bool ValidateCandidate(const TypoCorrection &Candidate) override {
NamedDecl *ND = Candidate.getCorrectionDecl();
- if (ND && (isa<VarDecl>(ND) || isa<FunctionDecl>(ND))) {
+ if (ND && ((isa<VarDecl>(ND) && ND->getKind() == Decl::Var) ||
+ isa<FunctionDecl>(ND))) {
return SemaRef.isDeclInScope(ND, SemaRef.getCurLexicalContext(),
SemaRef.getCurScope());
}
return false;
}
+
+ std::unique_ptr<CorrectionCandidateCallback> clone() override {
+ return llvm::make_unique<VarOrFuncDeclFilterCCC>(*this);
+ }
};
} // namespace
ExprResult Sema::ActOnOpenMPIdExpression(Scope *CurScope,
CXXScopeSpec &ScopeSpec,
- const DeclarationNameInfo &Id) {
+ const DeclarationNameInfo &Id,
+ OpenMPDirectiveKind Kind) {
LookupResult Lookup(*this, Id, LookupOrdinaryName);
LookupParsedName(Lookup, CurScope, &ScopeSpec, true);
@@ -1868,9 +2074,10 @@ ExprResult Sema::ActOnOpenMPIdExpression(Scope *CurScope,
VarDecl *VD;
if (!Lookup.isSingleResult()) {
- if (TypoCorrection Corrected = CorrectTypo(
- Id, LookupOrdinaryName, CurScope, nullptr,
- llvm::make_unique<VarDeclFilterCCC>(*this), CTK_ErrorRecovery)) {
+ VarDeclFilterCCC CCC(*this);
+ if (TypoCorrection Corrected =
+ CorrectTypo(Id, LookupOrdinaryName, CurScope, nullptr, CCC,
+ CTK_ErrorRecovery)) {
diagnoseTypo(Corrected,
PDiag(Lookup.empty()
? diag::err_undeclared_var_use_suggest
@@ -1892,9 +2099,9 @@ ExprResult Sema::ActOnOpenMPIdExpression(Scope *CurScope,
// OpenMP [2.9.2, Syntax, C/C++]
// Variables must be file-scope, namespace-scope, or static block-scope.
- if (!VD->hasGlobalStorage()) {
+ if (Kind == OMPD_threadprivate && !VD->hasGlobalStorage()) {
Diag(Id.getLoc(), diag::err_omp_global_var_arg)
- << getOpenMPDirectiveName(OMPD_threadprivate) << !VD->isStaticLocal();
+ << getOpenMPDirectiveName(Kind) << !VD->isStaticLocal();
bool IsDecl =
VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
Diag(VD->getLocation(),
@@ -1911,7 +2118,7 @@ ExprResult Sema::ActOnOpenMPIdExpression(Scope *CurScope,
if (CanonicalVD->getDeclContext()->isTranslationUnit() &&
!getCurLexicalContext()->isTranslationUnit()) {
Diag(Id.getLoc(), diag::err_omp_var_scope)
- << getOpenMPDirectiveName(OMPD_threadprivate) << VD;
+ << getOpenMPDirectiveName(Kind) << VD;
bool IsDecl =
VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
Diag(VD->getLocation(),
@@ -1926,7 +2133,7 @@ ExprResult Sema::ActOnOpenMPIdExpression(Scope *CurScope,
if (CanonicalVD->isStaticDataMember() &&
!CanonicalVD->getDeclContext()->Equals(getCurLexicalContext())) {
Diag(Id.getLoc(), diag::err_omp_var_scope)
- << getOpenMPDirectiveName(OMPD_threadprivate) << VD;
+ << getOpenMPDirectiveName(Kind) << VD;
bool IsDecl =
VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
Diag(VD->getLocation(),
@@ -1942,7 +2149,7 @@ ExprResult Sema::ActOnOpenMPIdExpression(Scope *CurScope,
(!getCurLexicalContext()->isFileContext() ||
!getCurLexicalContext()->Encloses(CanonicalVD->getDeclContext()))) {
Diag(Id.getLoc(), diag::err_omp_var_scope)
- << getOpenMPDirectiveName(OMPD_threadprivate) << VD;
+ << getOpenMPDirectiveName(Kind) << VD;
bool IsDecl =
VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
Diag(VD->getLocation(),
@@ -1953,10 +2160,10 @@ ExprResult Sema::ActOnOpenMPIdExpression(Scope *CurScope,
// OpenMP [2.9.2, Restrictions, C/C++, p.6]
// A threadprivate directive for static block-scope variables must appear
// in the scope of the variable and not in a nested scope.
- if (CanonicalVD->isStaticLocal() && CurScope &&
+ if (CanonicalVD->isLocalVarDecl() && CurScope &&
!isDeclInScope(ND, getCurLexicalContext(), CurScope)) {
Diag(Id.getLoc(), diag::err_omp_var_scope)
- << getOpenMPDirectiveName(OMPD_threadprivate) << VD;
+ << getOpenMPDirectiveName(Kind) << VD;
bool IsDecl =
VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
Diag(VD->getLocation(),
@@ -1968,9 +2175,10 @@ ExprResult Sema::ActOnOpenMPIdExpression(Scope *CurScope,
// OpenMP [2.9.2, Restrictions, C/C++, p.2-6]
// A threadprivate directive must lexically precede all references to any
// of the variables in its list.
- if (VD->isUsed() && !DSAStack->isThreadPrivate(VD)) {
+ if (Kind == OMPD_threadprivate && VD->isUsed() &&
+ !DSAStack->isThreadPrivate(VD)) {
Diag(Id.getLoc(), diag::err_omp_var_used)
- << getOpenMPDirectiveName(OMPD_threadprivate) << VD;
+ << getOpenMPDirectiveName(Kind) << VD;
return ExprError();
}
@@ -2102,6 +2310,167 @@ Sema::CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList) {
return D;
}
+static OMPAllocateDeclAttr::AllocatorTypeTy
+getAllocatorKind(Sema &S, DSAStackTy *Stack, Expr *Allocator) {
+ if (!Allocator)
+ return OMPAllocateDeclAttr::OMPDefaultMemAlloc;
+ if (Allocator->isTypeDependent() || Allocator->isValueDependent() ||
+ Allocator->isInstantiationDependent() ||
+ Allocator->containsUnexpandedParameterPack())
+ return OMPAllocateDeclAttr::OMPUserDefinedMemAlloc;
+ auto AllocatorKindRes = OMPAllocateDeclAttr::OMPUserDefinedMemAlloc;
+ const Expr *AE = Allocator->IgnoreParenImpCasts();
+ for (int I = OMPAllocateDeclAttr::OMPDefaultMemAlloc;
+ I < OMPAllocateDeclAttr::OMPUserDefinedMemAlloc; ++I) {
+ auto AllocatorKind = static_cast<OMPAllocateDeclAttr::AllocatorTypeTy>(I);
+ const Expr *DefAllocator = Stack->getAllocator(AllocatorKind);
+ llvm::FoldingSetNodeID AEId, DAEId;
+ AE->Profile(AEId, S.getASTContext(), /*Canonical=*/true);
+ DefAllocator->Profile(DAEId, S.getASTContext(), /*Canonical=*/true);
+ if (AEId == DAEId) {
+ AllocatorKindRes = AllocatorKind;
+ break;
+ }
+ }
+ return AllocatorKindRes;
+}
+
+static bool checkPreviousOMPAllocateAttribute(
+ Sema &S, DSAStackTy *Stack, Expr *RefExpr, VarDecl *VD,
+ OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind, Expr *Allocator) {
+ if (!VD->hasAttr<OMPAllocateDeclAttr>())
+ return false;
+ const auto *A = VD->getAttr<OMPAllocateDeclAttr>();
+ Expr *PrevAllocator = A->getAllocator();
+ OMPAllocateDeclAttr::AllocatorTypeTy PrevAllocatorKind =
+ getAllocatorKind(S, Stack, PrevAllocator);
+ bool AllocatorsMatch = AllocatorKind == PrevAllocatorKind;
+ if (AllocatorsMatch &&
+ AllocatorKind == OMPAllocateDeclAttr::OMPUserDefinedMemAlloc &&
+ Allocator && PrevAllocator) {
+ const Expr *AE = Allocator->IgnoreParenImpCasts();
+ const Expr *PAE = PrevAllocator->IgnoreParenImpCasts();
+ llvm::FoldingSetNodeID AEId, PAEId;
+ AE->Profile(AEId, S.Context, /*Canonical=*/true);
+ PAE->Profile(PAEId, S.Context, /*Canonical=*/true);
+ AllocatorsMatch = AEId == PAEId;
+ }
+ if (!AllocatorsMatch) {
+ SmallString<256> AllocatorBuffer;
+ llvm::raw_svector_ostream AllocatorStream(AllocatorBuffer);
+ if (Allocator)
+ Allocator->printPretty(AllocatorStream, nullptr, S.getPrintingPolicy());
+ SmallString<256> PrevAllocatorBuffer;
+ llvm::raw_svector_ostream PrevAllocatorStream(PrevAllocatorBuffer);
+ if (PrevAllocator)
+ PrevAllocator->printPretty(PrevAllocatorStream, nullptr,
+ S.getPrintingPolicy());
+
+ SourceLocation AllocatorLoc =
+ Allocator ? Allocator->getExprLoc() : RefExpr->getExprLoc();
+ SourceRange AllocatorRange =
+ Allocator ? Allocator->getSourceRange() : RefExpr->getSourceRange();
+ SourceLocation PrevAllocatorLoc =
+ PrevAllocator ? PrevAllocator->getExprLoc() : A->getLocation();
+ SourceRange PrevAllocatorRange =
+ PrevAllocator ? PrevAllocator->getSourceRange() : A->getRange();
+ S.Diag(AllocatorLoc, diag::warn_omp_used_different_allocator)
+ << (Allocator ? 1 : 0) << AllocatorStream.str()
+ << (PrevAllocator ? 1 : 0) << PrevAllocatorStream.str()
+ << AllocatorRange;
+ S.Diag(PrevAllocatorLoc, diag::note_omp_previous_allocator)
+ << PrevAllocatorRange;
+ return true;
+ }
+ return false;
+}
+
+static void
+applyOMPAllocateAttribute(Sema &S, VarDecl *VD,
+ OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind,
+ Expr *Allocator, SourceRange SR) {
+ if (VD->hasAttr<OMPAllocateDeclAttr>())
+ return;
+ if (Allocator &&
+ (Allocator->isTypeDependent() || Allocator->isValueDependent() ||
+ Allocator->isInstantiationDependent() ||
+ Allocator->containsUnexpandedParameterPack()))
+ return;
+ auto *A = OMPAllocateDeclAttr::CreateImplicit(S.Context, AllocatorKind,
+ Allocator, SR);
+ VD->addAttr(A);
+ if (ASTMutationListener *ML = S.Context.getASTMutationListener())
+ ML->DeclarationMarkedOpenMPAllocate(VD, A);
+}
+
+Sema::DeclGroupPtrTy Sema::ActOnOpenMPAllocateDirective(
+ SourceLocation Loc, ArrayRef<Expr *> VarList,
+ ArrayRef<OMPClause *> Clauses, DeclContext *Owner) {
+ assert(Clauses.size() <= 1 && "Expected at most one clause.");
+ Expr *Allocator = nullptr;
+ if (Clauses.empty()) {
+ // OpenMP 5.0, 2.11.3 allocate Directive, Restrictions.
+ // allocate directives that appear in a target region must specify an
+ // allocator clause unless a requires directive with the dynamic_allocators
+ // clause is present in the same compilation unit.
+ if (LangOpts.OpenMPIsDevice &&
+ !DSAStack->hasRequiresDeclWithClause<OMPDynamicAllocatorsClause>())
+ targetDiag(Loc, diag::err_expected_allocator_clause);
+ } else {
+ Allocator = cast<OMPAllocatorClause>(Clauses.back())->getAllocator();
+ }
+ OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind =
+ getAllocatorKind(*this, DSAStack, Allocator);
+ SmallVector<Expr *, 8> Vars;
+ for (Expr *RefExpr : VarList) {
+ auto *DE = cast<DeclRefExpr>(RefExpr);
+ auto *VD = cast<VarDecl>(DE->getDecl());
+
+ // Check if this is a TLS variable or global register.
+ if (VD->getTLSKind() != VarDecl::TLS_None ||
+ VD->hasAttr<OMPThreadPrivateDeclAttr>() ||
+ (VD->getStorageClass() == SC_Register && VD->hasAttr<AsmLabelAttr>() &&
+ !VD->isLocalVarDecl()))
+ continue;
+
+ // If the used several times in the allocate directive, the same allocator
+ // must be used.
+ if (checkPreviousOMPAllocateAttribute(*this, DSAStack, RefExpr, VD,
+ AllocatorKind, Allocator))
+ continue;
+
+ // OpenMP, 2.11.3 allocate Directive, Restrictions, C / C++
+ // If a list item has a static storage type, the allocator expression in the
+ // allocator clause must be a constant expression that evaluates to one of
+ // the predefined memory allocator values.
+ if (Allocator && VD->hasGlobalStorage()) {
+ if (AllocatorKind == OMPAllocateDeclAttr::OMPUserDefinedMemAlloc) {
+ Diag(Allocator->getExprLoc(),
+ diag::err_omp_expected_predefined_allocator)
+ << Allocator->getSourceRange();
+ bool IsDecl = VD->isThisDeclarationADefinition(Context) ==
+ VarDecl::DeclarationOnly;
+ Diag(VD->getLocation(),
+ IsDecl ? diag::note_previous_decl : diag::note_defined_here)
+ << VD;
+ continue;
+ }
+ }
+
+ Vars.push_back(RefExpr);
+ applyOMPAllocateAttribute(*this, VD, AllocatorKind, Allocator,
+ DE->getSourceRange());
+ }
+ if (Vars.empty())
+ return nullptr;
+ if (!Owner)
+ Owner = getCurLexicalContext();
+ auto *D = OMPAllocateDecl::Create(Context, Owner, Loc, Vars, Clauses);
+ D->setAccess(AS_public);
+ Owner->addDecl(D);
+ return DeclGroupPtrTy::make(DeclGroupRef(D));
+}
+
Sema::DeclGroupPtrTy
Sema::ActOnOpenMPRequiresDirective(SourceLocation Loc,
ArrayRef<OMPClause *> ClauseList) {
@@ -2120,6 +2489,27 @@ Sema::ActOnOpenMPRequiresDirective(SourceLocation Loc,
OMPRequiresDecl *Sema::CheckOMPRequiresDecl(SourceLocation Loc,
ArrayRef<OMPClause *> ClauseList) {
+ /// For target specific clauses, the requires directive cannot be
+ /// specified after the handling of any of the target regions in the
+ /// current compilation unit.
+ ArrayRef<SourceLocation> TargetLocations =
+ DSAStack->getEncounteredTargetLocs();
+ if (!TargetLocations.empty()) {
+ for (const OMPClause *CNew : ClauseList) {
+ // Check if any of the requires clauses affect target regions.
+ if (isa<OMPUnifiedSharedMemoryClause>(CNew) ||
+ isa<OMPUnifiedAddressClause>(CNew) ||
+ isa<OMPReverseOffloadClause>(CNew) ||
+ isa<OMPDynamicAllocatorsClause>(CNew)) {
+ Diag(Loc, diag::err_omp_target_before_requires)
+ << getOpenMPClauseName(CNew->getClauseKind());
+ for (SourceLocation TargetLoc : TargetLocations) {
+ Diag(TargetLoc, diag::note_omp_requires_encountered_target);
+ }
+ }
+ }
+ }
+
if (!DSAStack->hasDuplicateRequiresClause(ClauseList))
return OMPRequiresDecl::Create(Context, getCurLexicalContext(), Loc,
ClauseList);
@@ -2198,24 +2588,7 @@ class DSAAttrChecker final : public StmtVisitor<DSAAttrChecker, void> {
// Check implicitly captured variables.
if (!S->hasAssociatedStmt() || !S->getAssociatedStmt())
return;
- for (const CapturedStmt::Capture &Cap :
- S->getInnermostCapturedStmt()->captures()) {
- if (!Cap.capturesVariable())
- continue;
- VarDecl *VD = Cap.getCapturedVar();
- // Do not try to map the variable if it or its sub-component was mapped
- // already.
- if (isOpenMPTargetExecutionDirective(Stack->getCurrentDirective()) &&
- Stack->checkMappableExprComponentListsForDecl(
- VD, /*CurrentRegionOnly=*/true,
- [](OMPClauseMappableExprCommon::MappableExprComponentListRef,
- OpenMPClauseKind) { return true; }))
- continue;
- DeclRefExpr *DRE = buildDeclRefExpr(
- SemaRef, VD, VD->getType().getNonLValueExprType(SemaRef.Context),
- Cap.getLocation(), /*RefersToCapture=*/true);
- Visit(DRE);
- }
+ visitSubCaptures(S->getInnermostCapturedStmt());
}
public:
@@ -2224,9 +2597,20 @@ public:
E->containsUnexpandedParameterPack() || E->isInstantiationDependent())
return;
if (auto *VD = dyn_cast<VarDecl>(E->getDecl())) {
+ // Check the datasharing rules for the expressions in the clauses.
+ if (!CS) {
+ if (auto *CED = dyn_cast<OMPCapturedExprDecl>(VD))
+ if (!CED->hasAttr<OMPCaptureNoInitAttr>()) {
+ Visit(CED->getInit());
+ return;
+ }
+ } else if (VD->isImplicit() || isa<OMPCapturedExprDecl>(VD))
+ // Do not analyze internal variables and do not enclose them into
+ // implicit clauses.
+ return;
VD = VD->getCanonicalDecl();
// Skip internally declared variables.
- if (VD->hasLocalStorage() && !CS->capturesVariable(VD))
+ if (VD->hasLocalStorage() && CS && !CS->capturesVariable(VD))
return;
DSAStackTy::DSAVarData DVar = Stack->getTopDSA(VD, /*FromParent=*/false);
@@ -2237,8 +2621,9 @@ public:
// Skip internally declared static variables.
llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
- if (VD->hasGlobalStorage() && !CS->capturesVariable(VD) &&
- (!Res || *Res != OMPDeclareTargetDeclAttr::MT_Link))
+ if (VD->hasGlobalStorage() && CS && !CS->capturesVariable(VD) &&
+ (Stack->hasRequiresDeclWithClause<OMPUnifiedSharedMemoryClause>() ||
+ !Res || *Res != OMPDeclareTargetDeclAttr::MT_Link))
return;
SourceLocation ELoc = E->getExprLoc();
@@ -2315,8 +2700,18 @@ public:
// Define implicit data-sharing attributes for task.
DVar = Stack->getImplicitDSA(VD, /*FromParent=*/false);
if (isOpenMPTaskingDirective(DKind) && DVar.CKind != OMPC_shared &&
- !Stack->isLoopControlVariable(VD).first)
+ !Stack->isLoopControlVariable(VD).first) {
ImplicitFirstprivate.push_back(E);
+ return;
+ }
+
+ // Store implicitly used globals with declare target link for parent
+ // target.
+ if (!isOpenMPTargetExecutionDirective(DKind) && Res &&
+ *Res == OMPDeclareTargetDeclAttr::MT_Link) {
+ Stack->addToParentTargetRegionLinkGlobals(E);
+ return;
+ }
}
}
void VisitMemberExpr(MemberExpr *E) {
@@ -2464,6 +2859,25 @@ public:
}
}
+ void visitSubCaptures(CapturedStmt *S) {
+ for (const CapturedStmt::Capture &Cap : S->captures()) {
+ if (!Cap.capturesVariable() && !Cap.capturesVariableByCopy())
+ continue;
+ VarDecl *VD = Cap.getCapturedVar();
+ // Do not try to map the variable if it or its sub-component was mapped
+ // already.
+ if (isOpenMPTargetExecutionDirective(Stack->getCurrentDirective()) &&
+ Stack->checkMappableExprComponentListsForDecl(
+ VD, /*CurrentRegionOnly=*/true,
+ [](OMPClauseMappableExprCommon::MappableExprComponentListRef,
+ OpenMPClauseKind) { return true; }))
+ continue;
+ DeclRefExpr *DRE = buildDeclRefExpr(
+ SemaRef, VD, VD->getType().getNonLValueExprType(SemaRef.Context),
+ Cap.getLocation(), /*RefersToCapture=*/true);
+ Visit(DRE);
+ }
+ }
bool isErrorFound() const { return ErrorFound; }
ArrayRef<Expr *> getImplicitFirstprivate() const {
return ImplicitFirstprivate;
@@ -2474,7 +2888,13 @@ public:
}
DSAAttrChecker(DSAStackTy *S, Sema &SemaRef, CapturedStmt *CS)
- : Stack(S), SemaRef(SemaRef), ErrorFound(false), CS(CS) {}
+ : Stack(S), SemaRef(SemaRef), ErrorFound(false), CS(CS) {
+ // Process declare target link variables for the target directives.
+ if (isOpenMPTargetExecutionDirective(S->getCurrentDirective())) {
+ for (DeclRefExpr *E : Stack->getLinkGlobals())
+ Visit(E);
+ }
+ }
};
} // namespace
@@ -2802,6 +3222,7 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
break;
}
case OMPD_threadprivate:
+ case OMPD_allocate:
case OMPD_taskyield:
case OMPD_barrier:
case OMPD_taskwait:
@@ -2809,6 +3230,7 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
case OMPD_cancel:
case OMPD_flush:
case OMPD_declare_reduction:
+ case OMPD_declare_mapper:
case OMPD_declare_simd:
case OMPD_declare_target:
case OMPD_end_declare_target:
@@ -2915,6 +3337,46 @@ public:
};
} // namespace
+void Sema::tryCaptureOpenMPLambdas(ValueDecl *V) {
+ // Capture variables captured by reference in lambdas for target-based
+ // directives.
+ if (!CurContext->isDependentContext() &&
+ (isOpenMPTargetExecutionDirective(DSAStack->getCurrentDirective()) ||
+ isOpenMPTargetDataManagementDirective(
+ DSAStack->getCurrentDirective()))) {
+ QualType Type = V->getType();
+ if (const auto *RD = Type.getCanonicalType()
+ .getNonReferenceType()
+ ->getAsCXXRecordDecl()) {
+ bool SavedForceCaptureByReferenceInTargetExecutable =
+ DSAStack->isForceCaptureByReferenceInTargetExecutable();
+ DSAStack->setForceCaptureByReferenceInTargetExecutable(
+ /*V=*/true);
+ if (RD->isLambda()) {
+ llvm::DenseMap<const VarDecl *, FieldDecl *> Captures;
+ FieldDecl *ThisCapture;
+ RD->getCaptureFields(Captures, ThisCapture);
+ for (const LambdaCapture &LC : RD->captures()) {
+ if (LC.getCaptureKind() == LCK_ByRef) {
+ VarDecl *VD = LC.getCapturedVar();
+ DeclContext *VDC = VD->getDeclContext();
+ if (!VDC->Encloses(CurContext))
+ continue;
+ MarkVariableReferenced(LC.getLocation(), VD);
+ } else if (LC.getCaptureKind() == LCK_This) {
+ QualType ThisTy = getCurrentThisType();
+ if (!ThisTy.isNull() &&
+ Context.typesAreCompatible(ThisTy, ThisCapture->getType()))
+ CheckCXXThisCapture(LC.getLocation());
+ }
+ }
+ }
+ DSAStack->setForceCaptureByReferenceInTargetExecutable(
+ SavedForceCaptureByReferenceInTargetExecutable);
+ }
+ }
+}
+
StmtResult Sema::ActOnOpenMPRegionEnd(StmtResult S,
ArrayRef<OMPClause *> Clauses) {
bool ErrorFound = false;
@@ -3004,6 +3466,7 @@ StmtResult Sema::ActOnOpenMPRegionEnd(StmtResult S,
return StmtError();
}
StmtResult SR = S;
+ unsigned CompletedRegions = 0;
for (OpenMPDirectiveKind ThisCaptureRegion : llvm::reverse(CaptureRegions)) {
// Mark all variables in private list clauses as used in inner region.
// Required for proper codegen of combined directives.
@@ -3025,6 +3488,8 @@ StmtResult Sema::ActOnOpenMPRegionEnd(StmtResult S,
}
}
}
+ if (++CompletedRegions == CaptureRegions.size())
+ DSAStack->setBodyComplete();
SR = ActOnCapturedRegionEnd(SR.get());
}
return SR;
@@ -3346,6 +3811,169 @@ static bool checkIfClauses(Sema &S, OpenMPDirectiveKind Kind,
return ErrorFound;
}
+static std::pair<ValueDecl *, bool>
+getPrivateItem(Sema &S, Expr *&RefExpr, SourceLocation &ELoc,
+ SourceRange &ERange, bool AllowArraySection = false) {
+ if (RefExpr->isTypeDependent() || RefExpr->isValueDependent() ||
+ RefExpr->containsUnexpandedParameterPack())
+ return std::make_pair(nullptr, true);
+
+ // OpenMP [3.1, C/C++]
+ // A list item is a variable name.
+ // OpenMP [2.9.3.3, Restrictions, p.1]
+ // A variable that is part of another variable (as an array or
+ // structure element) cannot appear in a private clause.
+ RefExpr = RefExpr->IgnoreParens();
+ enum {
+ NoArrayExpr = -1,
+ ArraySubscript = 0,
+ OMPArraySection = 1
+ } IsArrayExpr = NoArrayExpr;
+ if (AllowArraySection) {
+ if (auto *ASE = dyn_cast_or_null<ArraySubscriptExpr>(RefExpr)) {
+ Expr *Base = ASE->getBase()->IgnoreParenImpCasts();
+ while (auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
+ Base = TempASE->getBase()->IgnoreParenImpCasts();
+ RefExpr = Base;
+ IsArrayExpr = ArraySubscript;
+ } else if (auto *OASE = dyn_cast_or_null<OMPArraySectionExpr>(RefExpr)) {
+ Expr *Base = OASE->getBase()->IgnoreParenImpCasts();
+ while (auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base))
+ Base = TempOASE->getBase()->IgnoreParenImpCasts();
+ while (auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
+ Base = TempASE->getBase()->IgnoreParenImpCasts();
+ RefExpr = Base;
+ IsArrayExpr = OMPArraySection;
+ }
+ }
+ ELoc = RefExpr->getExprLoc();
+ ERange = RefExpr->getSourceRange();
+ RefExpr = RefExpr->IgnoreParenImpCasts();
+ auto *DE = dyn_cast_or_null<DeclRefExpr>(RefExpr);
+ auto *ME = dyn_cast_or_null<MemberExpr>(RefExpr);
+ if ((!DE || !isa<VarDecl>(DE->getDecl())) &&
+ (S.getCurrentThisType().isNull() || !ME ||
+ !isa<CXXThisExpr>(ME->getBase()->IgnoreParenImpCasts()) ||
+ !isa<FieldDecl>(ME->getMemberDecl()))) {
+ if (IsArrayExpr != NoArrayExpr) {
+ S.Diag(ELoc, diag::err_omp_expected_base_var_name) << IsArrayExpr
+ << ERange;
+ } else {
+ S.Diag(ELoc,
+ AllowArraySection
+ ? diag::err_omp_expected_var_name_member_expr_or_array_item
+ : diag::err_omp_expected_var_name_member_expr)
+ << (S.getCurrentThisType().isNull() ? 0 : 1) << ERange;
+ }
+ return std::make_pair(nullptr, false);
+ }
+ return std::make_pair(
+ getCanonicalDecl(DE ? DE->getDecl() : ME->getMemberDecl()), false);
+}
+
+static void checkAllocateClauses(Sema &S, DSAStackTy *Stack,
+ ArrayRef<OMPClause *> Clauses) {
+ assert(!S.CurContext->isDependentContext() &&
+ "Expected non-dependent context.");
+ auto AllocateRange =
+ llvm::make_filter_range(Clauses, OMPAllocateClause::classof);
+ llvm::DenseMap<CanonicalDeclPtr<Decl>, CanonicalDeclPtr<VarDecl>>
+ DeclToCopy;
+ auto PrivateRange = llvm::make_filter_range(Clauses, [](const OMPClause *C) {
+ return isOpenMPPrivate(C->getClauseKind());
+ });
+ for (OMPClause *Cl : PrivateRange) {
+ MutableArrayRef<Expr *>::iterator I, It, Et;
+ if (Cl->getClauseKind() == OMPC_private) {
+ auto *PC = cast<OMPPrivateClause>(Cl);
+ I = PC->private_copies().begin();
+ It = PC->varlist_begin();
+ Et = PC->varlist_end();
+ } else if (Cl->getClauseKind() == OMPC_firstprivate) {
+ auto *PC = cast<OMPFirstprivateClause>(Cl);
+ I = PC->private_copies().begin();
+ It = PC->varlist_begin();
+ Et = PC->varlist_end();
+ } else if (Cl->getClauseKind() == OMPC_lastprivate) {
+ auto *PC = cast<OMPLastprivateClause>(Cl);
+ I = PC->private_copies().begin();
+ It = PC->varlist_begin();
+ Et = PC->varlist_end();
+ } else if (Cl->getClauseKind() == OMPC_linear) {
+ auto *PC = cast<OMPLinearClause>(Cl);
+ I = PC->privates().begin();
+ It = PC->varlist_begin();
+ Et = PC->varlist_end();
+ } else if (Cl->getClauseKind() == OMPC_reduction) {
+ auto *PC = cast<OMPReductionClause>(Cl);
+ I = PC->privates().begin();
+ It = PC->varlist_begin();
+ Et = PC->varlist_end();
+ } else if (Cl->getClauseKind() == OMPC_task_reduction) {
+ auto *PC = cast<OMPTaskReductionClause>(Cl);
+ I = PC->privates().begin();
+ It = PC->varlist_begin();
+ Et = PC->varlist_end();
+ } else if (Cl->getClauseKind() == OMPC_in_reduction) {
+ auto *PC = cast<OMPInReductionClause>(Cl);
+ I = PC->privates().begin();
+ It = PC->varlist_begin();
+ Et = PC->varlist_end();
+ } else {
+ llvm_unreachable("Expected private clause.");
+ }
+ for (Expr *E : llvm::make_range(It, Et)) {
+ if (!*I) {
+ ++I;
+ continue;
+ }
+ SourceLocation ELoc;
+ SourceRange ERange;
+ Expr *SimpleRefExpr = E;
+ auto Res = getPrivateItem(S, SimpleRefExpr, ELoc, ERange,
+ /*AllowArraySection=*/true);
+ DeclToCopy.try_emplace(Res.first,
+ cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()));
+ ++I;
+ }
+ }
+ for (OMPClause *C : AllocateRange) {
+ auto *AC = cast<OMPAllocateClause>(C);
+ OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind =
+ getAllocatorKind(S, Stack, AC->getAllocator());
+ // OpenMP, 2.11.4 allocate Clause, Restrictions.
+ // For task, taskloop or target directives, allocation requests to memory
+ // allocators with the trait access set to thread result in unspecified
+ // behavior.
+ if (AllocatorKind == OMPAllocateDeclAttr::OMPThreadMemAlloc &&
+ (isOpenMPTaskingDirective(Stack->getCurrentDirective()) ||
+ isOpenMPTargetExecutionDirective(Stack->getCurrentDirective()))) {
+ S.Diag(AC->getAllocator()->getExprLoc(),
+ diag::warn_omp_allocate_thread_on_task_target_directive)
+ << getOpenMPDirectiveName(Stack->getCurrentDirective());
+ }
+ for (Expr *E : AC->varlists()) {
+ SourceLocation ELoc;
+ SourceRange ERange;
+ Expr *SimpleRefExpr = E;
+ auto Res = getPrivateItem(S, SimpleRefExpr, ELoc, ERange);
+ ValueDecl *VD = Res.first;
+ DSAStackTy::DSAVarData Data = Stack->getTopDSA(VD, /*FromParent=*/false);
+ if (!isOpenMPPrivate(Data.CKind)) {
+ S.Diag(E->getExprLoc(),
+ diag::err_omp_expected_private_copy_for_allocate);
+ continue;
+ }
+ VarDecl *PrivateVD = DeclToCopy[VD];
+ if (checkPreviousOMPAllocateAttribute(S, Stack, E, PrivateVD,
+ AllocatorKind, AC->getAllocator()))
+ continue;
+ applyOMPAllocateAttribute(S, PrivateVD, AllocatorKind, AC->getAllocator(),
+ E->getSourceRange());
+ }
+ }
+}
+
StmtResult Sema::ActOnOpenMPExecutableDirective(
OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName,
OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
@@ -3371,6 +3999,17 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
while (--ThisCaptureLevel >= 0)
S = cast<CapturedStmt>(S)->getCapturedStmt();
DSAChecker.Visit(S);
+ if (!isOpenMPTargetDataManagementDirective(Kind) &&
+ !isOpenMPTaskingDirective(Kind)) {
+ // Visit subcaptures to generate implicit clauses for captured vars.
+ auto *CS = cast<CapturedStmt>(AStmt);
+ SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
+ getOpenMPCaptureRegions(CaptureRegions, Kind);
+ // Ignore outer tasking regions for target directives.
+ if (CaptureRegions.size() > 1 && CaptureRegions.front() == OMPD_task)
+ CS = cast<CapturedStmt>(CS->getCapturedStmt());
+ DSAChecker.visitSubCaptures(CS);
+ }
if (DSAChecker.isErrorFound())
return StmtError();
// Generate list of implicitly defined firstprivate variables.
@@ -3401,11 +4040,12 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
}
}
if (!ImplicitMaps.empty()) {
+ CXXScopeSpec MapperIdScopeSpec;
+ DeclarationNameInfo MapperId;
if (OMPClause *Implicit = ActOnOpenMPMapClause(
- llvm::None, llvm::None, OMPC_MAP_tofrom,
- /*IsMapTypeImplicit=*/true, SourceLocation(), SourceLocation(),
- ImplicitMaps, SourceLocation(), SourceLocation(),
- SourceLocation())) {
+ llvm::None, llvm::None, MapperIdScopeSpec, MapperId,
+ OMPC_MAP_tofrom, /*IsMapTypeImplicit=*/true, SourceLocation(),
+ SourceLocation(), ImplicitMaps, OMPVarListLocTy())) {
ClausesWithImplicit.emplace_back(Implicit);
ErrorFound |=
cast<OMPMapClause>(Implicit)->varlist_size() != ImplicitMaps.size();
@@ -3656,7 +4296,9 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_threadprivate:
+ case OMPD_allocate:
case OMPD_declare_reduction:
+ case OMPD_declare_mapper:
case OMPD_declare_simd:
case OMPD_requires:
llvm_unreachable("OpenMP Directive is not allowed");
@@ -3664,11 +4306,99 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
llvm_unreachable("Unknown OpenMP directive");
}
+ ErrorFound = Res.isInvalid() || ErrorFound;
+
+ // Check variables in the clauses if default(none) was specified.
+ if (DSAStack->getDefaultDSA() == DSA_none) {
+ DSAAttrChecker DSAChecker(DSAStack, *this, nullptr);
+ for (OMPClause *C : Clauses) {
+ switch (C->getClauseKind()) {
+ case OMPC_num_threads:
+ case OMPC_dist_schedule:
+ // Do not analyse if no parent teams directive.
+ if (isOpenMPTeamsDirective(DSAStack->getCurrentDirective()))
+ break;
+ continue;
+ case OMPC_if:
+ if (isOpenMPTeamsDirective(DSAStack->getCurrentDirective()) &&
+ cast<OMPIfClause>(C)->getNameModifier() != OMPD_target)
+ break;
+ continue;
+ case OMPC_schedule:
+ break;
+ case OMPC_ordered:
+ case OMPC_device:
+ case OMPC_num_teams:
+ case OMPC_thread_limit:
+ case OMPC_priority:
+ case OMPC_grainsize:
+ case OMPC_num_tasks:
+ case OMPC_hint:
+ case OMPC_collapse:
+ case OMPC_safelen:
+ case OMPC_simdlen:
+ case OMPC_final:
+ case OMPC_default:
+ case OMPC_proc_bind:
+ case OMPC_private:
+ case OMPC_firstprivate:
+ case OMPC_lastprivate:
+ case OMPC_shared:
+ case OMPC_reduction:
+ case OMPC_task_reduction:
+ case OMPC_in_reduction:
+ case OMPC_linear:
+ case OMPC_aligned:
+ case OMPC_copyin:
+ case OMPC_copyprivate:
+ case OMPC_nowait:
+ case OMPC_untied:
+ case OMPC_mergeable:
+ case OMPC_allocate:
+ case OMPC_read:
+ case OMPC_write:
+ case OMPC_update:
+ case OMPC_capture:
+ case OMPC_seq_cst:
+ case OMPC_depend:
+ case OMPC_threads:
+ case OMPC_simd:
+ case OMPC_map:
+ case OMPC_nogroup:
+ case OMPC_defaultmap:
+ case OMPC_to:
+ case OMPC_from:
+ case OMPC_use_device_ptr:
+ case OMPC_is_device_ptr:
+ continue;
+ case OMPC_allocator:
+ case OMPC_flush:
+ case OMPC_threadprivate:
+ case OMPC_uniform:
+ case OMPC_unknown:
+ case OMPC_unified_address:
+ case OMPC_unified_shared_memory:
+ case OMPC_reverse_offload:
+ case OMPC_dynamic_allocators:
+ case OMPC_atomic_default_mem_order:
+ llvm_unreachable("Unexpected clause");
+ }
+ for (Stmt *CC : C->children()) {
+ if (CC)
+ DSAChecker.Visit(CC);
+ }
+ }
+ for (auto &P : DSAChecker.getVarsWithInheritedDSA())
+ VarsWithInheritedDSA[P.getFirst()] = P.getSecond();
+ }
for (const auto &P : VarsWithInheritedDSA) {
+ if (P.getFirst()->isImplicit() || isa<OMPCapturedExprDecl>(P.getFirst()))
+ continue;
+ ErrorFound = true;
Diag(P.second->getExprLoc(), diag::err_omp_no_dsa_for_variable)
<< P.first << P.second->getSourceRange();
+ Diag(DSAStack->getDefaultDSALocation(), diag::note_omp_default_dsa_none);
}
- ErrorFound = !VarsWithInheritedDSA.empty() || ErrorFound;
if (!AllowedNameModifiers.empty())
ErrorFound = checkIfClauses(*this, Kind, Clauses, AllowedNameModifiers) ||
@@ -3676,6 +4406,23 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
if (ErrorFound)
return StmtError();
+
+ if (!(Res.getAs<OMPExecutableDirective>()->isStandaloneDirective())) {
+ Res.getAs<OMPExecutableDirective>()
+ ->getStructuredBlock()
+ ->setIsOMPStructuredBlock(true);
+ }
+
+ if (!CurContext->isDependentContext() &&
+ isOpenMPTargetExecutionDirective(Kind) &&
+ !(DSAStack->hasRequiresDeclWithClause<OMPUnifiedSharedMemoryClause>() ||
+ DSAStack->hasRequiresDeclWithClause<OMPUnifiedAddressClause>() ||
+ DSAStack->hasRequiresDeclWithClause<OMPReverseOffloadClause>() ||
+ DSAStack->hasRequiresDeclWithClause<OMPDynamicAllocatorsClause>())) {
+ // Register target to DSA Stack.
+ DSAStack->addTargetDirLocation(StartLoc);
+ }
+
return Res;
}
@@ -3953,6 +4700,8 @@ namespace {
class OpenMPIterationSpaceChecker {
/// Reference to Sema.
Sema &SemaRef;
+ /// Data-sharing stack.
+ DSAStackTy &Stack;
/// A location for diagnostics (when there is no some better location).
SourceLocation DefaultLoc;
/// A location for diagnostics (when increment is not compatible).
@@ -3984,10 +4733,22 @@ class OpenMPIterationSpaceChecker {
bool TestIsStrictOp = false;
/// This flag is true when step is subtracted on each iteration.
bool SubtractStep = false;
+ /// The outer loop counter this loop depends on (if any).
+ const ValueDecl *DepDecl = nullptr;
+ /// Contains number of loop (starts from 1) on which loop counter init
+ /// expression of this loop depends on.
+ Optional<unsigned> InitDependOnLC;
+ /// Contains number of loop (starts from 1) on which loop counter condition
+ /// expression of this loop depends on.
+ Optional<unsigned> CondDependOnLC;
+ /// Checks if the provide statement depends on the loop counter.
+ Optional<unsigned> doesDependOnLoopCounter(const Stmt *S, bool IsInitializer);
public:
- OpenMPIterationSpaceChecker(Sema &SemaRef, SourceLocation DefaultLoc)
- : SemaRef(SemaRef), DefaultLoc(DefaultLoc), ConditionLoc(DefaultLoc) {}
+ OpenMPIterationSpaceChecker(Sema &SemaRef, DSAStackTy &Stack,
+ SourceLocation DefaultLoc)
+ : SemaRef(SemaRef), Stack(Stack), DefaultLoc(DefaultLoc),
+ ConditionLoc(DefaultLoc) {}
/// Check init-expr for canonical loop form and save loop counter
/// variable - #Var and its initialization value - #LB.
bool checkAndSetInit(Stmt *S, bool EmitDiags = true);
@@ -4009,6 +4770,8 @@ public:
SourceRange getIncrementSrcRange() const { return IncrementSrcRange; }
/// True if the step should be subtracted.
bool shouldSubtractStep() const { return SubtractStep; }
+ /// True, if the compare operator is strict (<, > or !=).
+ bool isStrictTestOp() const { return TestIsStrictOp; }
/// Build the expression to calculate the number of iterations.
Expr *buildNumIterations(
Scope *S, const bool LimitedType,
@@ -4043,7 +4806,8 @@ private:
/// expression.
bool checkAndSetIncRHS(Expr *RHS);
/// Helper to set loop counter variable and its initializer.
- bool setLCDeclAndLB(ValueDecl *NewLCDecl, Expr *NewDeclRefExpr, Expr *NewLB);
+ bool setLCDeclAndLB(ValueDecl *NewLCDecl, Expr *NewDeclRefExpr, Expr *NewLB,
+ bool EmitDiags);
/// Helper to set upper bound.
bool setUB(Expr *NewUB, llvm::Optional<bool> LessOp, bool StrictOp,
SourceRange SR, SourceLocation SL);
@@ -4063,7 +4827,7 @@ bool OpenMPIterationSpaceChecker::dependent() const {
bool OpenMPIterationSpaceChecker::setLCDeclAndLB(ValueDecl *NewLCDecl,
Expr *NewLCRefExpr,
- Expr *NewLB) {
+ Expr *NewLB, bool EmitDiags) {
// State consistency checking to ensure correct usage.
assert(LCDecl == nullptr && LB == nullptr && LCRef == nullptr &&
UB == nullptr && Step == nullptr && !TestIsLessOp && !TestIsStrictOp);
@@ -4078,10 +4842,13 @@ bool OpenMPIterationSpaceChecker::setLCDeclAndLB(ValueDecl *NewLCDecl,
CE->getNumArgs() > 0 && CE->getArg(0) != nullptr)
NewLB = CE->getArg(0)->IgnoreParenImpCasts();
LB = NewLB;
+ if (EmitDiags)
+ InitDependOnLC = doesDependOnLoopCounter(LB, /*IsInitializer=*/true);
return false;
}
-bool OpenMPIterationSpaceChecker::setUB(Expr *NewUB, llvm::Optional<bool> LessOp,
+bool OpenMPIterationSpaceChecker::setUB(Expr *NewUB,
+ llvm::Optional<bool> LessOp,
bool StrictOp, SourceRange SR,
SourceLocation SL) {
// State consistency checking to ensure correct usage.
@@ -4095,6 +4862,7 @@ bool OpenMPIterationSpaceChecker::setUB(Expr *NewUB, llvm::Optional<bool> LessOp
TestIsStrictOp = StrictOp;
ConditionSrcRange = SR;
ConditionLoc = SL;
+ CondDependOnLC = doesDependOnLoopCounter(UB, /*IsInitializer=*/false);
return false;
}
@@ -4160,6 +4928,110 @@ bool OpenMPIterationSpaceChecker::setStep(Expr *NewStep, bool Subtract) {
return false;
}
+namespace {
+/// Checker for the non-rectangular loops. Checks if the initializer or
+/// condition expression references loop counter variable.
+class LoopCounterRefChecker final
+ : public ConstStmtVisitor<LoopCounterRefChecker, bool> {
+ Sema &SemaRef;
+ DSAStackTy &Stack;
+ const ValueDecl *CurLCDecl = nullptr;
+ const ValueDecl *DepDecl = nullptr;
+ const ValueDecl *PrevDepDecl = nullptr;
+ bool IsInitializer = true;
+ unsigned BaseLoopId = 0;
+ bool checkDecl(const Expr *E, const ValueDecl *VD) {
+ if (getCanonicalDecl(VD) == getCanonicalDecl(CurLCDecl)) {
+ SemaRef.Diag(E->getExprLoc(), diag::err_omp_stmt_depends_on_loop_counter)
+ << (IsInitializer ? 0 : 1);
+ return false;
+ }
+ const auto &&Data = Stack.isLoopControlVariable(VD);
+ // OpenMP, 2.9.1 Canonical Loop Form, Restrictions.
+ // The type of the loop iterator on which we depend may not have a random
+ // access iterator type.
+ if (Data.first && VD->getType()->isRecordType()) {
+ SmallString<128> Name;
+ llvm::raw_svector_ostream OS(Name);
+ VD->getNameForDiagnostic(OS, SemaRef.getPrintingPolicy(),
+ /*Qualified=*/true);
+ SemaRef.Diag(E->getExprLoc(),
+ diag::err_omp_wrong_dependency_iterator_type)
+ << OS.str();
+ SemaRef.Diag(VD->getLocation(), diag::note_previous_decl) << VD;
+ return false;
+ }
+ if (Data.first &&
+ (DepDecl || (PrevDepDecl &&
+ getCanonicalDecl(VD) != getCanonicalDecl(PrevDepDecl)))) {
+ if (!DepDecl && PrevDepDecl)
+ DepDecl = PrevDepDecl;
+ SmallString<128> Name;
+ llvm::raw_svector_ostream OS(Name);
+ DepDecl->getNameForDiagnostic(OS, SemaRef.getPrintingPolicy(),
+ /*Qualified=*/true);
+ SemaRef.Diag(E->getExprLoc(),
+ diag::err_omp_invariant_or_linear_dependency)
+ << OS.str();
+ return false;
+ }
+ if (Data.first) {
+ DepDecl = VD;
+ BaseLoopId = Data.first;
+ }
+ return Data.first;
+ }
+
+public:
+ bool VisitDeclRefExpr(const DeclRefExpr *E) {
+ const ValueDecl *VD = E->getDecl();
+ if (isa<VarDecl>(VD))
+ return checkDecl(E, VD);
+ return false;
+ }
+ bool VisitMemberExpr(const MemberExpr *E) {
+ if (isa<CXXThisExpr>(E->getBase()->IgnoreParens())) {
+ const ValueDecl *VD = E->getMemberDecl();
+ if (isa<VarDecl>(VD) || isa<FieldDecl>(VD))
+ return checkDecl(E, VD);
+ }
+ return false;
+ }
+ bool VisitStmt(const Stmt *S) {
+ bool Res = true;
+ for (const Stmt *Child : S->children())
+ Res = Child && Visit(Child) && Res;
+ return Res;
+ }
+ explicit LoopCounterRefChecker(Sema &SemaRef, DSAStackTy &Stack,
+ const ValueDecl *CurLCDecl, bool IsInitializer,
+ const ValueDecl *PrevDepDecl = nullptr)
+ : SemaRef(SemaRef), Stack(Stack), CurLCDecl(CurLCDecl),
+ PrevDepDecl(PrevDepDecl), IsInitializer(IsInitializer) {}
+ unsigned getBaseLoopId() const {
+ assert(CurLCDecl && "Expected loop dependency.");
+ return BaseLoopId;
+ }
+ const ValueDecl *getDepDecl() const {
+ assert(CurLCDecl && "Expected loop dependency.");
+ return DepDecl;
+ }
+};
+} // namespace
+
+Optional<unsigned>
+OpenMPIterationSpaceChecker::doesDependOnLoopCounter(const Stmt *S,
+ bool IsInitializer) {
+ // Check for the non-rectangular loops.
+ LoopCounterRefChecker LoopStmtChecker(SemaRef, Stack, LCDecl, IsInitializer,
+ DepDecl);
+ if (LoopStmtChecker.Visit(S)) {
+ DepDecl = LoopStmtChecker.getDepDecl();
+ return LoopStmtChecker.getBaseLoopId();
+ }
+ return llvm::None;
+}
+
bool OpenMPIterationSpaceChecker::checkAndSetInit(Stmt *S, bool EmitDiags) {
// Check init-expr for canonical loop form and save loop counter
// variable - #Var and its initialization value - #LB.
@@ -4188,13 +5060,15 @@ bool OpenMPIterationSpaceChecker::checkAndSetInit(Stmt *S, bool EmitDiags) {
if (auto *DRE = dyn_cast<DeclRefExpr>(LHS)) {
if (auto *CED = dyn_cast<OMPCapturedExprDecl>(DRE->getDecl()))
if (auto *ME = dyn_cast<MemberExpr>(getExprAsWritten(CED->getInit())))
- return setLCDeclAndLB(ME->getMemberDecl(), ME, BO->getRHS());
- return setLCDeclAndLB(DRE->getDecl(), DRE, BO->getRHS());
+ return setLCDeclAndLB(ME->getMemberDecl(), ME, BO->getRHS(),
+ EmitDiags);
+ return setLCDeclAndLB(DRE->getDecl(), DRE, BO->getRHS(), EmitDiags);
}
if (auto *ME = dyn_cast<MemberExpr>(LHS)) {
if (ME->isArrow() &&
isa<CXXThisExpr>(ME->getBase()->IgnoreParenImpCasts()))
- return setLCDeclAndLB(ME->getMemberDecl(), ME, BO->getRHS());
+ return setLCDeclAndLB(ME->getMemberDecl(), ME, BO->getRHS(),
+ EmitDiags);
}
}
} else if (auto *DS = dyn_cast<DeclStmt>(S)) {
@@ -4211,7 +5085,7 @@ bool OpenMPIterationSpaceChecker::checkAndSetInit(Stmt *S, bool EmitDiags) {
buildDeclRefExpr(SemaRef, Var,
Var->getType().getNonReferenceType(),
DS->getBeginLoc()),
- Var->getInit());
+ Var->getInit(), EmitDiags);
}
}
}
@@ -4221,13 +5095,15 @@ bool OpenMPIterationSpaceChecker::checkAndSetInit(Stmt *S, bool EmitDiags) {
if (auto *DRE = dyn_cast<DeclRefExpr>(LHS)) {
if (auto *CED = dyn_cast<OMPCapturedExprDecl>(DRE->getDecl()))
if (auto *ME = dyn_cast<MemberExpr>(getExprAsWritten(CED->getInit())))
- return setLCDeclAndLB(ME->getMemberDecl(), ME, BO->getRHS());
- return setLCDeclAndLB(DRE->getDecl(), DRE, CE->getArg(1));
+ return setLCDeclAndLB(ME->getMemberDecl(), ME, BO->getRHS(),
+ EmitDiags);
+ return setLCDeclAndLB(DRE->getDecl(), DRE, CE->getArg(1), EmitDiags);
}
if (auto *ME = dyn_cast<MemberExpr>(LHS)) {
if (ME->isArrow() &&
isa<CXXThisExpr>(ME->getBase()->IgnoreParenImpCasts()))
- return setLCDeclAndLB(ME->getMemberDecl(), ME, BO->getRHS());
+ return setLCDeclAndLB(ME->getMemberDecl(), ME, BO->getRHS(),
+ EmitDiags);
}
}
}
@@ -4581,7 +5457,7 @@ Expr *OpenMPIterationSpaceChecker::buildPreCond(
/*AllowExplicit=*/true);
}
SemaRef.getDiagnostics().setSuppressAllDiagnostics(Suppress);
- // Otherwise use original loop conditon and evaluate it in runtime.
+ // Otherwise use original loop condition and evaluate it in runtime.
return CondExpr.isUsable() ? CondExpr.get() : Cond;
}
@@ -4602,8 +5478,7 @@ DeclRefExpr *OpenMPIterationSpaceChecker::buildCounterVar(
Captures.insert(std::make_pair(LCRef, Ref));
return Ref;
}
- return buildDeclRefExpr(SemaRef, VD, VD->getType().getNonReferenceType(),
- DefaultLoc);
+ return cast<DeclRefExpr>(LCRef);
}
Expr *OpenMPIterationSpaceChecker::buildPrivateCounterVar() const {
@@ -4648,10 +5523,12 @@ Expr *OpenMPIterationSpaceChecker::buildOrderedLoopData(
if (VarType->isIntegerType() || VarType->isPointerType() ||
SemaRef.getLangOpts().CPlusPlus) {
// Upper - Lower
- Expr *Upper =
- TestIsLessOp.getValue() ? Cnt : tryBuildCapture(SemaRef, UB, Captures).get();
- Expr *Lower =
- TestIsLessOp.getValue() ? tryBuildCapture(SemaRef, LB, Captures).get() : Cnt;
+ Expr *Upper = TestIsLessOp.getValue()
+ ? Cnt
+ : tryBuildCapture(SemaRef, UB, Captures).get();
+ Expr *Lower = TestIsLessOp.getValue()
+ ? tryBuildCapture(SemaRef, LB, Captures).get()
+ : Cnt;
if (!Upper || !Lower)
return nullptr;
@@ -4687,6 +5564,9 @@ Expr *OpenMPIterationSpaceChecker::buildOrderedLoopData(
/// Iteration space of a single for loop.
struct LoopIterationSpace final {
+ /// True if the condition operator is the strict compare operator (<, > or
+ /// !=).
+ bool IsStrictCompare = false;
/// Condition of the loop.
Expr *PreCond = nullptr;
/// This expression calculates the number of iterations in the loop.
@@ -4720,7 +5600,7 @@ void Sema::ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init) {
if (AssociatedLoops > 0 &&
isOpenMPLoopDirective(DSAStack->getCurrentDirective())) {
DSAStack->loopStart();
- OpenMPIterationSpaceChecker ISC(*this, ForLoc);
+ OpenMPIterationSpaceChecker ISC(*this, *DSAStack, ForLoc);
if (!ISC.checkAndSetInit(Init, /*EmitDiags=*/false)) {
if (ValueDecl *D = ISC.getLoopDecl()) {
auto *VD = dyn_cast<VarDecl>(D);
@@ -4786,7 +5666,7 @@ static bool checkOpenMPIterationSpace(
}
assert(For->getBody());
- OpenMPIterationSpaceChecker ISC(SemaRef, For->getForLoc());
+ OpenMPIterationSpaceChecker ISC(SemaRef, DSA, For->getForLoc());
// Check init.
Stmt *Init = For->getInit();
@@ -4840,12 +5720,14 @@ static bool checkOpenMPIterationSpace(
? ((NestedLoopCount == 1) ? OMPC_linear : OMPC_lastprivate)
: OMPC_private;
if (((isOpenMPSimdDirective(DKind) && DVar.CKind != OMPC_unknown &&
- DVar.CKind != PredeterminedCKind) ||
+ DVar.CKind != PredeterminedCKind && DVar.RefExpr &&
+ (SemaRef.getLangOpts().OpenMP <= 45 ||
+ (DVar.CKind != OMPC_lastprivate && DVar.CKind != OMPC_private))) ||
((isOpenMPWorksharingDirective(DKind) || DKind == OMPD_taskloop ||
isOpenMPDistributeDirective(DKind)) &&
!isOpenMPSimdDirective(DKind) && DVar.CKind != OMPC_unknown &&
DVar.CKind != OMPC_private && DVar.CKind != OMPC_lastprivate)) &&
- (DVar.CKind != OMPC_private || DVar.RefExpr != nullptr)) {
+ (DVar.CKind != OMPC_private || DVar.RefExpr)) {
SemaRef.Diag(Init->getBeginLoc(), diag::err_omp_loop_var_dsa)
<< getOpenMPClauseName(DVar.CKind) << getOpenMPDirectiveName(DKind)
<< getOpenMPClauseName(PredeterminedCKind);
@@ -4859,10 +5741,7 @@ static bool checkOpenMPIterationSpace(
// lastprivate (for simd directives with several collapsed or ordered
// loops).
if (DVar.CKind == OMPC_unknown)
- DVar = DSA.hasDSA(LCDecl, isOpenMPPrivate,
- [](OpenMPDirectiveKind) -> bool { return true; },
- /*FromParent=*/false);
- DSA.addDSA(LCDecl, LoopDeclRefExpr, PredeterminedCKind);
+ DSA.addDSA(LCDecl, LoopDeclRefExpr, PredeterminedCKind);
}
assert(isOpenMPLoopDirective(DKind) && "DSA for non-loop vars");
@@ -4893,6 +5772,7 @@ static bool checkOpenMPIterationSpace(
ResultIterSpace.CondSrcRange = ISC.getConditionSrcRange();
ResultIterSpace.IncSrcRange = ISC.getIncrementSrcRange();
ResultIterSpace.Subtract = ISC.shouldSubtractStep();
+ ResultIterSpace.IsStrictCompare = ISC.isStrictTestOp();
HasErrors |= (ResultIterSpace.PreCond == nullptr ||
ResultIterSpace.NumIterations == nullptr ||
@@ -5117,14 +5997,21 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
if (CollapseLoopCountExpr) {
// Found 'collapse' clause - calculate collapse number.
Expr::EvalResult Result;
- if (CollapseLoopCountExpr->EvaluateAsInt(Result, SemaRef.getASTContext()))
+ if (!CollapseLoopCountExpr->isValueDependent() &&
+ CollapseLoopCountExpr->EvaluateAsInt(Result, SemaRef.getASTContext())) {
NestedLoopCount = Result.Val.getInt().getLimitedValue();
+ } else {
+ Built.clear(/*Size=*/1);
+ return 1;
+ }
}
unsigned OrderedLoopCount = 1;
if (OrderedLoopCountExpr) {
// Found 'ordered' clause - calculate collapse number.
Expr::EvalResult EVResult;
- if (OrderedLoopCountExpr->EvaluateAsInt(EVResult, SemaRef.getASTContext())) {
+ if (!OrderedLoopCountExpr->isValueDependent() &&
+ OrderedLoopCountExpr->EvaluateAsInt(EVResult,
+ SemaRef.getASTContext())) {
llvm::APSInt Result = EVResult.Val.getInt();
if (Result.getLimitedValue() < NestedLoopCount) {
SemaRef.Diag(OrderedLoopCountExpr->getExprLoc(),
@@ -5135,13 +6022,16 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
<< CollapseLoopCountExpr->getSourceRange();
}
OrderedLoopCount = Result.getLimitedValue();
+ } else {
+ Built.clear(/*Size=*/1);
+ return 1;
}
}
// This is helper routine for loop directives (e.g., 'for', 'simd',
// 'for simd', etc.).
llvm::MapVector<const Expr *, DeclRefExpr *> Captures;
- SmallVector<LoopIterationSpace, 4> IterSpaces;
- IterSpaces.resize(std::max(OrderedLoopCount, NestedLoopCount));
+ SmallVector<LoopIterationSpace, 4> IterSpaces(
+ std::max(OrderedLoopCount, NestedLoopCount));
Stmt *CurStmt = AStmt->IgnoreContainers(/* IgnoreCaptured */ true);
for (unsigned Cnt = 0; Cnt < NestedLoopCount; ++Cnt) {
if (checkOpenMPIterationSpace(
@@ -5447,25 +6337,55 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
}
}
- // Loop condition (IV < NumIterations) or (IV <= UB) for worksharing loops.
+ bool UseStrictCompare =
+ RealVType->hasUnsignedIntegerRepresentation() &&
+ llvm::all_of(IterSpaces, [](const LoopIterationSpace &LIS) {
+ return LIS.IsStrictCompare;
+ });
+ // Loop condition (IV < NumIterations) or (IV <= UB or IV < UB + 1 (for
+ // unsigned IV)) for worksharing loops.
SourceLocation CondLoc = AStmt->getBeginLoc();
+ Expr *BoundUB = UB.get();
+ if (UseStrictCompare) {
+ BoundUB =
+ SemaRef
+ .BuildBinOp(CurScope, CondLoc, BO_Add, BoundUB,
+ SemaRef.ActOnIntegerConstant(SourceLocation(), 1).get())
+ .get();
+ BoundUB =
+ SemaRef.ActOnFinishFullExpr(BoundUB, /*DiscardedValue*/ false).get();
+ }
ExprResult Cond =
(isOpenMPWorksharingDirective(DKind) ||
isOpenMPTaskLoopDirective(DKind) || isOpenMPDistributeDirective(DKind))
- ? SemaRef.BuildBinOp(CurScope, CondLoc, BO_LE, IV.get(), UB.get())
+ ? SemaRef.BuildBinOp(CurScope, CondLoc,
+ UseStrictCompare ? BO_LT : BO_LE, IV.get(),
+ BoundUB)
: SemaRef.BuildBinOp(CurScope, CondLoc, BO_LT, IV.get(),
NumIterations.get());
ExprResult CombDistCond;
if (isOpenMPLoopBoundSharingDirective(DKind)) {
- CombDistCond =
- SemaRef.BuildBinOp(
- CurScope, CondLoc, BO_LT, IV.get(), NumIterations.get());
+ CombDistCond = SemaRef.BuildBinOp(CurScope, CondLoc, BO_LT, IV.get(),
+ NumIterations.get());
}
ExprResult CombCond;
if (isOpenMPLoopBoundSharingDirective(DKind)) {
+ Expr *BoundCombUB = CombUB.get();
+ if (UseStrictCompare) {
+ BoundCombUB =
+ SemaRef
+ .BuildBinOp(
+ CurScope, CondLoc, BO_Add, BoundCombUB,
+ SemaRef.ActOnIntegerConstant(SourceLocation(), 1).get())
+ .get();
+ BoundCombUB =
+ SemaRef.ActOnFinishFullExpr(BoundCombUB, /*DiscardedValue*/ false)
+ .get();
+ }
CombCond =
- SemaRef.BuildBinOp(CurScope, CondLoc, BO_LE, IV.get(), CombUB.get());
+ SemaRef.BuildBinOp(CurScope, CondLoc, UseStrictCompare ? BO_LT : BO_LE,
+ IV.get(), BoundCombUB);
}
// Loop increment (IV = IV + 1)
SourceLocation IncLoc = AStmt->getBeginLoc();
@@ -5542,7 +6462,8 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
SourceLocation DistIncLoc = AStmt->getBeginLoc();
ExprResult DistCond, DistInc, PrevEUB, ParForInDistCond;
if (isOpenMPLoopBoundSharingDirective(DKind)) {
- DistCond = SemaRef.BuildBinOp(CurScope, CondLoc, BO_LE, IV.get(), UB.get());
+ DistCond = SemaRef.BuildBinOp(
+ CurScope, CondLoc, UseStrictCompare ? BO_LT : BO_LE, IV.get(), BoundUB);
assert(DistCond.isUsable() && "distribute cond expr was not built");
DistInc =
@@ -5566,10 +6487,24 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
PrevEUB =
SemaRef.ActOnFinishFullExpr(PrevEUB.get(), /*DiscardedValue*/ false);
- // Build IV <= PrevUB to be used in parallel for is in combination with
- // a distribute directive with schedule(static, 1)
+ // Build IV <= PrevUB or IV < PrevUB + 1 for unsigned IV to be used in
+ // parallel for is in combination with a distribute directive with
+ // schedule(static, 1)
+ Expr *BoundPrevUB = PrevUB.get();
+ if (UseStrictCompare) {
+ BoundPrevUB =
+ SemaRef
+ .BuildBinOp(
+ CurScope, CondLoc, BO_Add, BoundPrevUB,
+ SemaRef.ActOnIntegerConstant(SourceLocation(), 1).get())
+ .get();
+ BoundPrevUB =
+ SemaRef.ActOnFinishFullExpr(BoundPrevUB, /*DiscardedValue*/ false)
+ .get();
+ }
ParForInDistCond =
- SemaRef.BuildBinOp(CurScope, CondLoc, BO_LE, IV.get(), PrevUB.get());
+ SemaRef.BuildBinOp(CurScope, CondLoc, UseStrictCompare ? BO_LT : BO_LE,
+ IV.get(), BoundPrevUB);
}
// Build updates and final values of the loop counters.
@@ -7015,7 +7950,9 @@ StmtResult Sema::ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
auto I = CS->body_begin();
while (I != CS->body_end()) {
const auto *OED = dyn_cast<OMPExecutableDirective>(*I);
- if (!OED || !isOpenMPTeamsDirective(OED->getDirectiveKind())) {
+ if (!OED || !isOpenMPTeamsDirective(OED->getDirectiveKind()) ||
+ OMPTeamsFound) {
+
OMPTeamsFound = false;
break;
}
@@ -8235,6 +9172,9 @@ OMPClause *Sema::ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr,
case OMPC_simdlen:
Res = ActOnOpenMPSimdlenClause(Expr, StartLoc, LParenLoc, EndLoc);
break;
+ case OMPC_allocator:
+ Res = ActOnOpenMPAllocatorClause(Expr, StartLoc, LParenLoc, EndLoc);
+ break;
case OMPC_collapse:
Res = ActOnOpenMPCollapseClause(Expr, StartLoc, LParenLoc, EndLoc);
break;
@@ -8281,6 +9221,7 @@ OMPClause *Sema::ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr,
case OMPC_untied:
case OMPC_mergeable:
case OMPC_threadprivate:
+ case OMPC_allocate:
case OMPC_flush:
case OMPC_read:
case OMPC_write:
@@ -8365,12 +9306,14 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
// Do not capture if-clause expressions.
break;
case OMPD_threadprivate:
+ case OMPD_allocate:
case OMPD_taskyield:
case OMPD_barrier:
case OMPD_taskwait:
case OMPD_cancellation_point:
case OMPD_flush:
case OMPD_declare_reduction:
+ case OMPD_declare_mapper:
case OMPD_declare_simd:
case OMPD_declare_target:
case OMPD_end_declare_target:
@@ -8431,12 +9374,14 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_taskloop:
case OMPD_taskloop_simd:
case OMPD_threadprivate:
+ case OMPD_allocate:
case OMPD_taskyield:
case OMPD_barrier:
case OMPD_taskwait:
case OMPD_cancellation_point:
case OMPD_flush:
case OMPD_declare_reduction:
+ case OMPD_declare_mapper:
case OMPD_declare_simd:
case OMPD_declare_target:
case OMPD_end_declare_target:
@@ -8498,12 +9443,14 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_target_parallel_for:
case OMPD_target_parallel_for_simd:
case OMPD_threadprivate:
+ case OMPD_allocate:
case OMPD_taskyield:
case OMPD_barrier:
case OMPD_taskwait:
case OMPD_cancellation_point:
case OMPD_flush:
case OMPD_declare_reduction:
+ case OMPD_declare_mapper:
case OMPD_declare_simd:
case OMPD_declare_target:
case OMPD_end_declare_target:
@@ -8562,12 +9509,14 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_target_parallel_for:
case OMPD_target_parallel_for_simd:
case OMPD_threadprivate:
+ case OMPD_allocate:
case OMPD_taskyield:
case OMPD_barrier:
case OMPD_taskwait:
case OMPD_cancellation_point:
case OMPD_flush:
case OMPD_declare_reduction:
+ case OMPD_declare_mapper:
case OMPD_declare_simd:
case OMPD_declare_target:
case OMPD_end_declare_target:
@@ -8627,12 +9576,14 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_parallel:
case OMPD_parallel_sections:
case OMPD_threadprivate:
+ case OMPD_allocate:
case OMPD_taskyield:
case OMPD_barrier:
case OMPD_taskwait:
case OMPD_cancellation_point:
case OMPD_flush:
case OMPD_declare_reduction:
+ case OMPD_declare_mapper:
case OMPD_declare_simd:
case OMPD_declare_target:
case OMPD_end_declare_target:
@@ -8691,12 +9642,14 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_parallel:
case OMPD_parallel_sections:
case OMPD_threadprivate:
+ case OMPD_allocate:
case OMPD_taskyield:
case OMPD_barrier:
case OMPD_taskwait:
case OMPD_cancellation_point:
case OMPD_flush:
case OMPD_declare_reduction:
+ case OMPD_declare_mapper:
case OMPD_declare_simd:
case OMPD_declare_target:
case OMPD_end_declare_target:
@@ -8754,12 +9707,14 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_parallel_for:
case OMPD_parallel_for_simd:
case OMPD_threadprivate:
+ case OMPD_allocate:
case OMPD_taskyield:
case OMPD_barrier:
case OMPD_taskwait:
case OMPD_cancellation_point:
case OMPD_flush:
case OMPD_declare_reduction:
+ case OMPD_declare_mapper:
case OMPD_declare_simd:
case OMPD_declare_target:
case OMPD_end_declare_target:
@@ -8793,6 +9748,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPC_final:
case OMPC_safelen:
case OMPC_simdlen:
+ case OMPC_allocator:
case OMPC_collapse:
case OMPC_private:
case OMPC_shared:
@@ -8804,6 +9760,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPC_untied:
case OMPC_mergeable:
case OMPC_threadprivate:
+ case OMPC_allocate:
case OMPC_flush:
case OMPC_read:
case OMPC_write:
@@ -9042,6 +9999,71 @@ OMPClause *Sema::ActOnOpenMPSimdlenClause(Expr *Len, SourceLocation StartLoc,
OMPSimdlenClause(Simdlen.get(), StartLoc, LParenLoc, EndLoc);
}
+/// Tries to find omp_allocator_handle_t type.
+static bool findOMPAllocatorHandleT(Sema &S, SourceLocation Loc,
+ DSAStackTy *Stack) {
+ QualType OMPAllocatorHandleT = Stack->getOMPAllocatorHandleT();
+ if (!OMPAllocatorHandleT.isNull())
+ return true;
+ // Build the predefined allocator expressions.
+ bool ErrorFound = false;
+ for (int I = OMPAllocateDeclAttr::OMPDefaultMemAlloc;
+ I < OMPAllocateDeclAttr::OMPUserDefinedMemAlloc; ++I) {
+ auto AllocatorKind = static_cast<OMPAllocateDeclAttr::AllocatorTypeTy>(I);
+ StringRef Allocator =
+ OMPAllocateDeclAttr::ConvertAllocatorTypeTyToStr(AllocatorKind);
+ DeclarationName AllocatorName = &S.getASTContext().Idents.get(Allocator);
+ auto *VD = dyn_cast_or_null<ValueDecl>(
+ S.LookupSingleName(S.TUScope, AllocatorName, Loc, Sema::LookupAnyName));
+ if (!VD) {
+ ErrorFound = true;
+ break;
+ }
+ QualType AllocatorType =
+ VD->getType().getNonLValueExprType(S.getASTContext());
+ ExprResult Res = S.BuildDeclRefExpr(VD, AllocatorType, VK_LValue, Loc);
+ if (!Res.isUsable()) {
+ ErrorFound = true;
+ break;
+ }
+ if (OMPAllocatorHandleT.isNull())
+ OMPAllocatorHandleT = AllocatorType;
+ if (!S.getASTContext().hasSameType(OMPAllocatorHandleT, AllocatorType)) {
+ ErrorFound = true;
+ break;
+ }
+ Stack->setAllocator(AllocatorKind, Res.get());
+ }
+ if (ErrorFound) {
+ S.Diag(Loc, diag::err_implied_omp_allocator_handle_t_not_found);
+ return false;
+ }
+ OMPAllocatorHandleT.addConst();
+ Stack->setOMPAllocatorHandleT(OMPAllocatorHandleT);
+ return true;
+}
+
+OMPClause *Sema::ActOnOpenMPAllocatorClause(Expr *A, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ // OpenMP [2.11.3, allocate Directive, Description]
+ // allocator is an expression of omp_allocator_handle_t type.
+ if (!findOMPAllocatorHandleT(*this, A->getExprLoc(), DSAStack))
+ return nullptr;
+
+ ExprResult Allocator = DefaultLvalueConversion(A);
+ if (Allocator.isInvalid())
+ return nullptr;
+ Allocator = PerformImplicitConversion(Allocator.get(),
+ DSAStack->getOMPAllocatorHandleT(),
+ Sema::AA_Initializing,
+ /*AllowExplicit=*/true);
+ if (Allocator.isInvalid())
+ return nullptr;
+ return new (Context)
+ OMPAllocatorClause(Allocator.get(), StartLoc, LParenLoc, EndLoc);
+}
+
OMPClause *Sema::ActOnOpenMPCollapseClause(Expr *NumForLoops,
SourceLocation StartLoc,
SourceLocation LParenLoc,
@@ -9109,6 +10131,7 @@ OMPClause *Sema::ActOnOpenMPSimpleClause(
case OMPC_num_threads:
case OMPC_safelen:
case OMPC_simdlen:
+ case OMPC_allocator:
case OMPC_collapse:
case OMPC_schedule:
case OMPC_private:
@@ -9127,6 +10150,7 @@ OMPClause *Sema::ActOnOpenMPSimpleClause(
case OMPC_untied:
case OMPC_mergeable:
case OMPC_threadprivate:
+ case OMPC_allocate:
case OMPC_flush:
case OMPC_read:
case OMPC_write:
@@ -9285,6 +10309,7 @@ OMPClause *Sema::ActOnOpenMPSingleExprWithArgClause(
case OMPC_num_threads:
case OMPC_safelen:
case OMPC_simdlen:
+ case OMPC_allocator:
case OMPC_collapse:
case OMPC_default:
case OMPC_proc_bind:
@@ -9304,6 +10329,7 @@ OMPClause *Sema::ActOnOpenMPSingleExprWithArgClause(
case OMPC_untied:
case OMPC_mergeable:
case OMPC_threadprivate:
+ case OMPC_allocate:
case OMPC_flush:
case OMPC_read:
case OMPC_write:
@@ -9505,6 +10531,7 @@ OMPClause *Sema::ActOnOpenMPClause(OpenMPClauseKind Kind,
case OMPC_num_threads:
case OMPC_safelen:
case OMPC_simdlen:
+ case OMPC_allocator:
case OMPC_collapse:
case OMPC_schedule:
case OMPC_private:
@@ -9521,6 +10548,7 @@ OMPClause *Sema::ActOnOpenMPClause(OpenMPClauseKind Kind,
case OMPC_default:
case OMPC_proc_bind:
case OMPC_threadprivate:
+ case OMPC_allocate:
case OMPC_flush:
case OMPC_depend:
case OMPC_device:
@@ -9623,14 +10651,16 @@ OMPClause *Sema::ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc,
OMPClause *Sema::ActOnOpenMPVarListClause(
OpenMPClauseKind Kind, ArrayRef<Expr *> VarList, Expr *TailExpr,
- SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc,
- SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec,
- const DeclarationNameInfo &ReductionId, OpenMPDependClauseKind DepKind,
+ const OMPVarListLocTy &Locs, SourceLocation ColonLoc,
+ CXXScopeSpec &ReductionOrMapperIdScopeSpec,
+ DeclarationNameInfo &ReductionOrMapperId, OpenMPDependClauseKind DepKind,
OpenMPLinearClauseKind LinKind,
ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
- ArrayRef<SourceLocation> MapTypeModifiersLoc,
- OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
- SourceLocation DepLinMapLoc) {
+ ArrayRef<SourceLocation> MapTypeModifiersLoc, OpenMPMapClauseKind MapType,
+ bool IsMapTypeImplicit, SourceLocation DepLinMapLoc) {
+ SourceLocation StartLoc = Locs.StartLoc;
+ SourceLocation LParenLoc = Locs.LParenLoc;
+ SourceLocation EndLoc = Locs.EndLoc;
OMPClause *Res = nullptr;
switch (Kind) {
case OMPC_private:
@@ -9647,17 +10677,18 @@ OMPClause *Sema::ActOnOpenMPVarListClause(
break;
case OMPC_reduction:
Res = ActOnOpenMPReductionClause(VarList, StartLoc, LParenLoc, ColonLoc,
- EndLoc, ReductionIdScopeSpec, ReductionId);
+ EndLoc, ReductionOrMapperIdScopeSpec,
+ ReductionOrMapperId);
break;
case OMPC_task_reduction:
Res = ActOnOpenMPTaskReductionClause(VarList, StartLoc, LParenLoc, ColonLoc,
- EndLoc, ReductionIdScopeSpec,
- ReductionId);
+ EndLoc, ReductionOrMapperIdScopeSpec,
+ ReductionOrMapperId);
break;
case OMPC_in_reduction:
- Res =
- ActOnOpenMPInReductionClause(VarList, StartLoc, LParenLoc, ColonLoc,
- EndLoc, ReductionIdScopeSpec, ReductionId);
+ Res = ActOnOpenMPInReductionClause(VarList, StartLoc, LParenLoc, ColonLoc,
+ EndLoc, ReductionOrMapperIdScopeSpec,
+ ReductionOrMapperId);
break;
case OMPC_linear:
Res = ActOnOpenMPLinearClause(VarList, TailExpr, StartLoc, LParenLoc,
@@ -9681,27 +10712,35 @@ OMPClause *Sema::ActOnOpenMPVarListClause(
StartLoc, LParenLoc, EndLoc);
break;
case OMPC_map:
- Res = ActOnOpenMPMapClause(MapTypeModifiers, MapTypeModifiersLoc, MapType,
- IsMapTypeImplicit, DepLinMapLoc, ColonLoc,
- VarList, StartLoc, LParenLoc, EndLoc);
+ Res = ActOnOpenMPMapClause(MapTypeModifiers, MapTypeModifiersLoc,
+ ReductionOrMapperIdScopeSpec,
+ ReductionOrMapperId, MapType, IsMapTypeImplicit,
+ DepLinMapLoc, ColonLoc, VarList, Locs);
break;
case OMPC_to:
- Res = ActOnOpenMPToClause(VarList, StartLoc, LParenLoc, EndLoc);
+ Res = ActOnOpenMPToClause(VarList, ReductionOrMapperIdScopeSpec,
+ ReductionOrMapperId, Locs);
break;
case OMPC_from:
- Res = ActOnOpenMPFromClause(VarList, StartLoc, LParenLoc, EndLoc);
+ Res = ActOnOpenMPFromClause(VarList, ReductionOrMapperIdScopeSpec,
+ ReductionOrMapperId, Locs);
break;
case OMPC_use_device_ptr:
- Res = ActOnOpenMPUseDevicePtrClause(VarList, StartLoc, LParenLoc, EndLoc);
+ Res = ActOnOpenMPUseDevicePtrClause(VarList, Locs);
break;
case OMPC_is_device_ptr:
- Res = ActOnOpenMPIsDevicePtrClause(VarList, StartLoc, LParenLoc, EndLoc);
+ Res = ActOnOpenMPIsDevicePtrClause(VarList, Locs);
+ break;
+ case OMPC_allocate:
+ Res = ActOnOpenMPAllocateClause(TailExpr, VarList, StartLoc, LParenLoc,
+ ColonLoc, EndLoc);
break;
case OMPC_if:
case OMPC_final:
case OMPC_num_threads:
case OMPC_safelen:
case OMPC_simdlen:
+ case OMPC_allocator:
case OMPC_collapse:
case OMPC_default:
case OMPC_proc_bind:
@@ -9759,66 +10798,6 @@ ExprResult Sema::getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK,
return Res;
}
-static std::pair<ValueDecl *, bool>
-getPrivateItem(Sema &S, Expr *&RefExpr, SourceLocation &ELoc,
- SourceRange &ERange, bool AllowArraySection = false) {
- if (RefExpr->isTypeDependent() || RefExpr->isValueDependent() ||
- RefExpr->containsUnexpandedParameterPack())
- return std::make_pair(nullptr, true);
-
- // OpenMP [3.1, C/C++]
- // A list item is a variable name.
- // OpenMP [2.9.3.3, Restrictions, p.1]
- // A variable that is part of another variable (as an array or
- // structure element) cannot appear in a private clause.
- RefExpr = RefExpr->IgnoreParens();
- enum {
- NoArrayExpr = -1,
- ArraySubscript = 0,
- OMPArraySection = 1
- } IsArrayExpr = NoArrayExpr;
- if (AllowArraySection) {
- if (auto *ASE = dyn_cast_or_null<ArraySubscriptExpr>(RefExpr)) {
- Expr *Base = ASE->getBase()->IgnoreParenImpCasts();
- while (auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
- Base = TempASE->getBase()->IgnoreParenImpCasts();
- RefExpr = Base;
- IsArrayExpr = ArraySubscript;
- } else if (auto *OASE = dyn_cast_or_null<OMPArraySectionExpr>(RefExpr)) {
- Expr *Base = OASE->getBase()->IgnoreParenImpCasts();
- while (auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base))
- Base = TempOASE->getBase()->IgnoreParenImpCasts();
- while (auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
- Base = TempASE->getBase()->IgnoreParenImpCasts();
- RefExpr = Base;
- IsArrayExpr = OMPArraySection;
- }
- }
- ELoc = RefExpr->getExprLoc();
- ERange = RefExpr->getSourceRange();
- RefExpr = RefExpr->IgnoreParenImpCasts();
- auto *DE = dyn_cast_or_null<DeclRefExpr>(RefExpr);
- auto *ME = dyn_cast_or_null<MemberExpr>(RefExpr);
- if ((!DE || !isa<VarDecl>(DE->getDecl())) &&
- (S.getCurrentThisType().isNull() || !ME ||
- !isa<CXXThisExpr>(ME->getBase()->IgnoreParenImpCasts()) ||
- !isa<FieldDecl>(ME->getMemberDecl()))) {
- if (IsArrayExpr != NoArrayExpr) {
- S.Diag(ELoc, diag::err_omp_expected_base_var_name) << IsArrayExpr
- << ERange;
- } else {
- S.Diag(ELoc,
- AllowArraySection
- ? diag::err_omp_expected_var_name_member_expr_or_array_item
- : diag::err_omp_expected_var_name_member_expr)
- << (S.getCurrentThisType().isNull() ? 0 : 1) << ERange;
- }
- return std::make_pair(nullptr, false);
- }
- return std::make_pair(
- getCanonicalDecl(DE ? DE->getDecl() : ME->getMemberDecl()), false);
-}
-
OMPClause *Sema::ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
@@ -10511,8 +11490,8 @@ public:
} // namespace
template <typename T, typename U>
-static T filterLookupForUDR(SmallVectorImpl<U> &Lookups,
- const llvm::function_ref<T(ValueDecl *)> Gen) {
+static T filterLookupForUDReductionAndMapper(
+ SmallVectorImpl<U> &Lookups, const llvm::function_ref<T(ValueDecl *)> Gen) {
for (U &Set : Lookups) {
for (auto *D : Set) {
if (T Res = Gen(cast<ValueDecl>(D)))
@@ -10539,7 +11518,7 @@ static NamedDecl *findAcceptableDecl(Sema &SemaRef, NamedDecl *D) {
}
static void
-argumentDependentLookup(Sema &SemaRef, const DeclarationNameInfo &ReductionId,
+argumentDependentLookup(Sema &SemaRef, const DeclarationNameInfo &Id,
SourceLocation Loc, QualType Ty,
SmallVectorImpl<UnresolvedSet<8>> &Lookups) {
// Find all of the associated namespaces and classes based on the
@@ -10573,13 +11552,14 @@ argumentDependentLookup(Sema &SemaRef, const DeclarationNameInfo &ReductionId,
// associated classes are visible within their respective
// namespaces even if they are not visible during an ordinary
// lookup (11.4).
- DeclContext::lookup_result R = NS->lookup(ReductionId.getName());
+ DeclContext::lookup_result R = NS->lookup(Id.getName());
for (auto *D : R) {
auto *Underlying = D;
if (auto *USD = dyn_cast<UsingShadowDecl>(D))
Underlying = USD->getTargetDecl();
- if (!isa<OMPDeclareReductionDecl>(Underlying))
+ if (!isa<OMPDeclareReductionDecl>(Underlying) &&
+ !isa<OMPDeclareMapperDecl>(Underlying))
continue;
if (!SemaRef.isVisible(D)) {
@@ -10624,7 +11604,7 @@ buildDeclareReductionRef(Sema &SemaRef, SourceLocation Loc, SourceRange Range,
for (NamedDecl *D : ULE->decls()) {
if (D == PrevD)
Lookups.push_back(UnresolvedSet<8>());
- else if (auto *DRD = cast<OMPDeclareReductionDecl>(D))
+ else if (auto *DRD = dyn_cast<OMPDeclareReductionDecl>(D))
Lookups.back().addDecl(DRD);
PrevD = D;
}
@@ -10632,7 +11612,7 @@ buildDeclareReductionRef(Sema &SemaRef, SourceLocation Loc, SourceRange Range,
if (SemaRef.CurContext->isDependentContext() || Ty->isDependentType() ||
Ty->isInstantiationDependentType() ||
Ty->containsUnexpandedParameterPack() ||
- filterLookupForUDR<bool>(Lookups, [](ValueDecl *D) {
+ filterLookupForUDReductionAndMapper<bool>(Lookups, [](ValueDecl *D) {
return !D->isInvalidDecl() &&
(D->getType()->isDependentType() ||
D->getType()->isInstantiationDependentType() ||
@@ -10680,33 +11660,38 @@ buildDeclareReductionRef(Sema &SemaRef, SourceLocation Loc, SourceRange Range,
}
}
// Perform ADL.
- argumentDependentLookup(SemaRef, ReductionId, Loc, Ty, Lookups);
- if (auto *VD = filterLookupForUDR<ValueDecl *>(
+ if (SemaRef.getLangOpts().CPlusPlus)
+ argumentDependentLookup(SemaRef, ReductionId, Loc, Ty, Lookups);
+ if (auto *VD = filterLookupForUDReductionAndMapper<ValueDecl *>(
Lookups, [&SemaRef, Ty](ValueDecl *D) -> ValueDecl * {
if (!D->isInvalidDecl() &&
SemaRef.Context.hasSameType(D->getType(), Ty))
return D;
return nullptr;
}))
- return SemaRef.BuildDeclRefExpr(VD, Ty, VK_LValue, Loc);
- if (auto *VD = filterLookupForUDR<ValueDecl *>(
- Lookups, [&SemaRef, Ty, Loc](ValueDecl *D) -> ValueDecl * {
- if (!D->isInvalidDecl() &&
- SemaRef.IsDerivedFrom(Loc, Ty, D->getType()) &&
- !Ty.isMoreQualifiedThan(D->getType()))
- return D;
- return nullptr;
- })) {
- CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
- /*DetectVirtual=*/false);
- if (SemaRef.IsDerivedFrom(Loc, Ty, VD->getType(), Paths)) {
- if (!Paths.isAmbiguous(SemaRef.Context.getCanonicalType(
- VD->getType().getUnqualifiedType()))) {
- if (SemaRef.CheckBaseClassAccess(Loc, VD->getType(), Ty, Paths.front(),
- /*DiagID=*/0) !=
- Sema::AR_inaccessible) {
- SemaRef.BuildBasePathArray(Paths, BasePath);
- return SemaRef.BuildDeclRefExpr(VD, Ty, VK_LValue, Loc);
+ return SemaRef.BuildDeclRefExpr(VD, VD->getType().getNonReferenceType(),
+ VK_LValue, Loc);
+ if (SemaRef.getLangOpts().CPlusPlus) {
+ if (auto *VD = filterLookupForUDReductionAndMapper<ValueDecl *>(
+ Lookups, [&SemaRef, Ty, Loc](ValueDecl *D) -> ValueDecl * {
+ if (!D->isInvalidDecl() &&
+ SemaRef.IsDerivedFrom(Loc, Ty, D->getType()) &&
+ !Ty.isMoreQualifiedThan(D->getType()))
+ return D;
+ return nullptr;
+ })) {
+ CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
+ /*DetectVirtual=*/false);
+ if (SemaRef.IsDerivedFrom(Loc, Ty, VD->getType(), Paths)) {
+ if (!Paths.isAmbiguous(SemaRef.Context.getCanonicalType(
+ VD->getType().getUnqualifiedType()))) {
+ if (SemaRef.CheckBaseClassAccess(
+ Loc, VD->getType(), Ty, Paths.front(),
+ /*DiagID=*/0) != Sema::AR_inaccessible) {
+ SemaRef.BuildBasePathArray(Paths, BasePath);
+ return SemaRef.BuildDeclRefExpr(
+ VD, VD->getType().getNonReferenceType(), VK_LValue, Loc);
+ }
}
}
}
@@ -11156,10 +12141,14 @@ static bool actOnOMPReductionKindClause(
if ((OASE && !ConstantLengthOASE) ||
(!OASE && !ASE &&
D->getType().getNonReferenceType()->isVariablyModifiedType())) {
- if (!Context.getTargetInfo().isVLASupported() &&
- S.shouldDiagnoseTargetSupportFromOpenMP()) {
- S.Diag(ELoc, diag::err_omp_reduction_vla_unsupported) << !!OASE;
- S.Diag(ELoc, diag::note_vla_unsupported);
+ if (!Context.getTargetInfo().isVLASupported()) {
+ if (isOpenMPTargetExecutionDirective(Stack->getCurrentDirective())) {
+ S.Diag(ELoc, diag::err_omp_reduction_vla_unsupported) << !!OASE;
+ S.Diag(ELoc, diag::note_vla_unsupported);
+ } else {
+ S.targetDiag(ELoc, diag::err_omp_reduction_vla_unsupported) << !!OASE;
+ S.targetDiag(ELoc, diag::note_vla_unsupported);
+ }
continue;
}
// For arrays/array sections only:
@@ -11300,7 +12289,8 @@ static bool actOnOMPReductionKindClause(
S.ActOnUninitializedDecl(RHSVD);
if (RHSVD->isInvalidDecl())
continue;
- if (!RHSVD->hasInit() && DeclareReductionRef.isUnset()) {
+ if (!RHSVD->hasInit() &&
+ (DeclareReductionRef.isUnset() || !S.LangOpts.CPlusPlus)) {
S.Diag(ELoc, diag::err_omp_reduction_id_not_compatible)
<< Type << ReductionIdRange;
bool IsDecl = !VD || VD->isThisDeclarationADefinition(Context) ==
@@ -12885,6 +13875,110 @@ static bool checkMapConflicts(
return FoundError;
}
+// Look up the user-defined mapper given the mapper name and mapped type, and
+// build a reference to it.
+static ExprResult buildUserDefinedMapperRef(Sema &SemaRef, Scope *S,
+ CXXScopeSpec &MapperIdScopeSpec,
+ const DeclarationNameInfo &MapperId,
+ QualType Type,
+ Expr *UnresolvedMapper) {
+ if (MapperIdScopeSpec.isInvalid())
+ return ExprError();
+ // Find all user-defined mappers with the given MapperId.
+ SmallVector<UnresolvedSet<8>, 4> Lookups;
+ LookupResult Lookup(SemaRef, MapperId, Sema::LookupOMPMapperName);
+ Lookup.suppressDiagnostics();
+ if (S) {
+ while (S && SemaRef.LookupParsedName(Lookup, S, &MapperIdScopeSpec)) {
+ NamedDecl *D = Lookup.getRepresentativeDecl();
+ while (S && !S->isDeclScope(D))
+ S = S->getParent();
+ if (S)
+ S = S->getParent();
+ Lookups.emplace_back();
+ Lookups.back().append(Lookup.begin(), Lookup.end());
+ Lookup.clear();
+ }
+ } else if (auto *ULE = cast_or_null<UnresolvedLookupExpr>(UnresolvedMapper)) {
+ // Extract the user-defined mappers with the given MapperId.
+ Lookups.push_back(UnresolvedSet<8>());
+ for (NamedDecl *D : ULE->decls()) {
+ auto *DMD = cast<OMPDeclareMapperDecl>(D);
+ assert(DMD && "Expect valid OMPDeclareMapperDecl during instantiation.");
+ Lookups.back().addDecl(DMD);
+ }
+ }
+ // Defer the lookup for dependent types. The results will be passed through
+ // UnresolvedMapper on instantiation.
+ if (SemaRef.CurContext->isDependentContext() || Type->isDependentType() ||
+ Type->isInstantiationDependentType() ||
+ Type->containsUnexpandedParameterPack() ||
+ filterLookupForUDReductionAndMapper<bool>(Lookups, [](ValueDecl *D) {
+ return !D->isInvalidDecl() &&
+ (D->getType()->isDependentType() ||
+ D->getType()->isInstantiationDependentType() ||
+ D->getType()->containsUnexpandedParameterPack());
+ })) {
+ UnresolvedSet<8> URS;
+ for (const UnresolvedSet<8> &Set : Lookups) {
+ if (Set.empty())
+ continue;
+ URS.append(Set.begin(), Set.end());
+ }
+ return UnresolvedLookupExpr::Create(
+ SemaRef.Context, /*NamingClass=*/nullptr,
+ MapperIdScopeSpec.getWithLocInContext(SemaRef.Context), MapperId,
+ /*ADL=*/false, /*Overloaded=*/true, URS.begin(), URS.end());
+ }
+ // [OpenMP 5.0], 2.19.7.3 declare mapper Directive, Restrictions
+ // The type must be of struct, union or class type in C and C++
+ if (!Type->isStructureOrClassType() && !Type->isUnionType())
+ return ExprEmpty();
+ SourceLocation Loc = MapperId.getLoc();
+ // Perform argument dependent lookup.
+ if (SemaRef.getLangOpts().CPlusPlus && !MapperIdScopeSpec.isSet())
+ argumentDependentLookup(SemaRef, MapperId, Loc, Type, Lookups);
+ // Return the first user-defined mapper with the desired type.
+ if (auto *VD = filterLookupForUDReductionAndMapper<ValueDecl *>(
+ Lookups, [&SemaRef, Type](ValueDecl *D) -> ValueDecl * {
+ if (!D->isInvalidDecl() &&
+ SemaRef.Context.hasSameType(D->getType(), Type))
+ return D;
+ return nullptr;
+ }))
+ return SemaRef.BuildDeclRefExpr(VD, Type, VK_LValue, Loc);
+ // Find the first user-defined mapper with a type derived from the desired
+ // type.
+ if (auto *VD = filterLookupForUDReductionAndMapper<ValueDecl *>(
+ Lookups, [&SemaRef, Type, Loc](ValueDecl *D) -> ValueDecl * {
+ if (!D->isInvalidDecl() &&
+ SemaRef.IsDerivedFrom(Loc, Type, D->getType()) &&
+ !Type.isMoreQualifiedThan(D->getType()))
+ return D;
+ return nullptr;
+ })) {
+ CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
+ /*DetectVirtual=*/false);
+ if (SemaRef.IsDerivedFrom(Loc, Type, VD->getType(), Paths)) {
+ if (!Paths.isAmbiguous(SemaRef.Context.getCanonicalType(
+ VD->getType().getUnqualifiedType()))) {
+ if (SemaRef.CheckBaseClassAccess(
+ Loc, VD->getType(), Type, Paths.front(),
+ /*DiagID=*/0) != Sema::AR_inaccessible) {
+ return SemaRef.BuildDeclRefExpr(VD, Type, VK_LValue, Loc);
+ }
+ }
+ }
+ }
+ // Report error if a mapper is specified, but cannot be found.
+ if (MapperIdScopeSpec.isSet() || MapperId.getAsString() != "default") {
+ SemaRef.Diag(Loc, diag::err_omp_invalid_mapper)
+ << Type << MapperId.getName();
+ return ExprError();
+ }
+ return ExprEmpty();
+}
+
namespace {
// Utility struct that gathers all the related lists associated with a mappable
// expression.
@@ -12897,6 +13991,8 @@ struct MappableVarListInfo {
OMPClauseMappableExprCommon::MappableExprComponentLists VarComponents;
// The base declaration of the variable.
SmallVector<ValueDecl *, 16> VarBaseDeclarations;
+ // The reference to the user-defined mapper associated with every expression.
+ SmallVector<Expr *, 16> UDMapperList;
MappableVarListInfo(ArrayRef<Expr *> VarList) : VarList(VarList) {
// We have a list of components and base declarations for each entry in the
@@ -12908,20 +14004,37 @@ struct MappableVarListInfo {
}
// Check the validity of the provided variable list for the provided clause kind
-// \a CKind. In the check process the valid expressions, and mappable expression
-// components and variables are extracted and used to fill \a Vars,
-// \a ClauseComponents, and \a ClauseBaseDeclarations. \a MapType and
-// \a IsMapTypeImplicit are expected to be valid if the clause kind is 'map'.
-static void
-checkMappableExpressionList(Sema &SemaRef, DSAStackTy *DSAS,
- OpenMPClauseKind CKind, MappableVarListInfo &MVLI,
- SourceLocation StartLoc,
- OpenMPMapClauseKind MapType = OMPC_MAP_unknown,
- bool IsMapTypeImplicit = false) {
+// \a CKind. In the check process the valid expressions, mappable expression
+// components, variables, and user-defined mappers are extracted and used to
+// fill \a ProcessedVarList, \a VarComponents, \a VarBaseDeclarations, and \a
+// UDMapperList in MVLI. \a MapType, \a IsMapTypeImplicit, \a MapperIdScopeSpec,
+// and \a MapperId are expected to be valid if the clause kind is 'map'.
+static void checkMappableExpressionList(
+ Sema &SemaRef, DSAStackTy *DSAS, OpenMPClauseKind CKind,
+ MappableVarListInfo &MVLI, SourceLocation StartLoc,
+ CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo MapperId,
+ ArrayRef<Expr *> UnresolvedMappers,
+ OpenMPMapClauseKind MapType = OMPC_MAP_unknown,
+ bool IsMapTypeImplicit = false) {
// We only expect mappable expressions in 'to', 'from', and 'map' clauses.
assert((CKind == OMPC_map || CKind == OMPC_to || CKind == OMPC_from) &&
"Unexpected clause kind with mappable expressions!");
+ // If the identifier of user-defined mapper is not specified, it is "default".
+ // We do not change the actual name in this clause to distinguish whether a
+ // mapper is specified explicitly, i.e., it is not explicitly specified when
+ // MapperId.getName() is empty.
+ if (!MapperId.getName() || MapperId.getName().isEmpty()) {
+ auto &DeclNames = SemaRef.getASTContext().DeclarationNames;
+ MapperId.setName(DeclNames.getIdentifier(
+ &SemaRef.getASTContext().Idents.get("default")));
+ }
+
+ // Iterators to find the current unresolved mapper expression.
+ auto UMIt = UnresolvedMappers.begin(), UMEnd = UnresolvedMappers.end();
+ bool UpdateUMIt = false;
+ Expr *UnresolvedMapper = nullptr;
+
// Keep track of the mappable components and base declarations in this clause.
// Each entry in the list is going to have a list of components associated. We
// record each set of the components so that we can build the clause later on.
@@ -12932,11 +14045,29 @@ checkMappableExpressionList(Sema &SemaRef, DSAStackTy *DSAS,
assert(RE && "Null expr in omp to/from/map clause");
SourceLocation ELoc = RE->getExprLoc();
+ // Find the current unresolved mapper expression.
+ if (UpdateUMIt && UMIt != UMEnd) {
+ UMIt++;
+ assert(
+ UMIt != UMEnd &&
+ "Expect the size of UnresolvedMappers to match with that of VarList");
+ }
+ UpdateUMIt = true;
+ if (UMIt != UMEnd)
+ UnresolvedMapper = *UMIt;
+
const Expr *VE = RE->IgnoreParenLValueCasts();
if (VE->isValueDependent() || VE->isTypeDependent() ||
VE->isInstantiationDependent() ||
VE->containsUnexpandedParameterPack()) {
+ // Try to find the associated user-defined mapper.
+ ExprResult ER = buildUserDefinedMapperRef(
+ SemaRef, DSAS->getCurScope(), MapperIdScopeSpec, MapperId,
+ VE->getType().getCanonicalType(), UnresolvedMapper);
+ if (ER.isInvalid())
+ continue;
+ MVLI.UDMapperList.push_back(ER.get());
// We can only analyze this information once the missing information is
// resolved.
MVLI.ProcessedVarList.push_back(RE);
@@ -12968,6 +14099,13 @@ checkMappableExpressionList(Sema &SemaRef, DSAStackTy *DSAS,
if (const auto *TE = dyn_cast<CXXThisExpr>(BE)) {
// Add store "this" pointer to class in DSAStackTy for future checking
DSAS->addMappedClassesQualTypes(TE->getType());
+ // Try to find the associated user-defined mapper.
+ ExprResult ER = buildUserDefinedMapperRef(
+ SemaRef, DSAS->getCurScope(), MapperIdScopeSpec, MapperId,
+ VE->getType().getCanonicalType(), UnresolvedMapper);
+ if (ER.isInvalid())
+ continue;
+ MVLI.UDMapperList.push_back(ER.get());
// Skip restriction checking for variable or field declarations
MVLI.ProcessedVarList.push_back(RE);
MVLI.VarComponents.resize(MVLI.VarComponents.size() + 1);
@@ -13086,6 +14224,14 @@ checkMappableExpressionList(Sema &SemaRef, DSAStackTy *DSAS,
}
}
+ // Try to find the associated user-defined mapper.
+ ExprResult ER = buildUserDefinedMapperRef(
+ SemaRef, DSAS->getCurScope(), MapperIdScopeSpec, MapperId,
+ Type.getCanonicalType(), UnresolvedMapper);
+ if (ER.isInvalid())
+ continue;
+ MVLI.UDMapperList.push_back(ER.get());
+
// Save the current expression.
MVLI.ProcessedVarList.push_back(RE);
@@ -13105,19 +14251,16 @@ checkMappableExpressionList(Sema &SemaRef, DSAStackTy *DSAS,
}
}
-OMPClause *
-Sema::ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
- ArrayRef<SourceLocation> MapTypeModifiersLoc,
- OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
- SourceLocation MapLoc, SourceLocation ColonLoc,
- ArrayRef<Expr *> VarList, SourceLocation StartLoc,
- SourceLocation LParenLoc, SourceLocation EndLoc) {
- MappableVarListInfo MVLI(VarList);
- checkMappableExpressionList(*this, DSAStack, OMPC_map, MVLI, StartLoc,
- MapType, IsMapTypeImplicit);
-
- OpenMPMapModifierKind Modifiers[] = { OMPC_MAP_MODIFIER_unknown,
- OMPC_MAP_MODIFIER_unknown };
+OMPClause *Sema::ActOnOpenMPMapClause(
+ ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
+ ArrayRef<SourceLocation> MapTypeModifiersLoc,
+ CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId,
+ OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation MapLoc,
+ SourceLocation ColonLoc, ArrayRef<Expr *> VarList,
+ const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers) {
+ OpenMPMapModifierKind Modifiers[] = {OMPC_MAP_MODIFIER_unknown,
+ OMPC_MAP_MODIFIER_unknown,
+ OMPC_MAP_MODIFIER_unknown};
SourceLocation ModifiersLoc[OMPMapClause::NumberOfModifiers];
// Process map-type-modifiers, flag errors for duplicate modifiers.
@@ -13135,12 +14278,18 @@ Sema::ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
++Count;
}
+ MappableVarListInfo MVLI(VarList);
+ checkMappableExpressionList(*this, DSAStack, OMPC_map, MVLI, Locs.StartLoc,
+ MapperIdScopeSpec, MapperId, UnresolvedMappers,
+ MapType, IsMapTypeImplicit);
+
// We need to produce a map clause even if we don't have variables so that
// other diagnostics related with non-existing map clauses are accurate.
- return OMPMapClause::Create(Context, StartLoc, LParenLoc, EndLoc,
- MVLI.ProcessedVarList, MVLI.VarBaseDeclarations,
- MVLI.VarComponents, Modifiers, ModifiersLoc,
- MapType, IsMapTypeImplicit, MapLoc);
+ return OMPMapClause::Create(Context, Locs, MVLI.ProcessedVarList,
+ MVLI.VarBaseDeclarations, MVLI.VarComponents,
+ MVLI.UDMapperList, Modifiers, ModifiersLoc,
+ MapperIdScopeSpec.getWithLocInContext(Context),
+ MapperId, MapType, IsMapTypeImplicit, MapLoc);
}
QualType Sema::ActOnOpenMPDeclareReductionType(SourceLocation TyLoc,
@@ -13400,6 +14549,143 @@ Sema::DeclGroupPtrTy Sema::ActOnOpenMPDeclareReductionDirectiveEnd(
return DeclReductions;
}
+TypeResult Sema::ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D) {
+ TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
+ QualType T = TInfo->getType();
+ if (D.isInvalidType())
+ return true;
+
+ if (getLangOpts().CPlusPlus) {
+ // Check that there are no default arguments (C++ only).
+ CheckExtraCXXDefaultArguments(D);
+ }
+
+ return CreateParsedType(T, TInfo);
+}
+
+QualType Sema::ActOnOpenMPDeclareMapperType(SourceLocation TyLoc,
+ TypeResult ParsedType) {
+ assert(ParsedType.isUsable() && "Expect usable parsed mapper type");
+
+ QualType MapperType = GetTypeFromParser(ParsedType.get());
+ assert(!MapperType.isNull() && "Expect valid mapper type");
+
+ // [OpenMP 5.0], 2.19.7.3 declare mapper Directive, Restrictions
+ // The type must be of struct, union or class type in C and C++
+ if (!MapperType->isStructureOrClassType() && !MapperType->isUnionType()) {
+ Diag(TyLoc, diag::err_omp_mapper_wrong_type);
+ return QualType();
+ }
+ return MapperType;
+}
+
+OMPDeclareMapperDecl *Sema::ActOnOpenMPDeclareMapperDirectiveStart(
+ Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType,
+ SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS,
+ Decl *PrevDeclInScope) {
+ LookupResult Lookup(*this, Name, SourceLocation(), LookupOMPMapperName,
+ forRedeclarationInCurContext());
+ // [OpenMP 5.0], 2.19.7.3 declare mapper Directive, Restrictions
+ // A mapper-identifier may not be redeclared in the current scope for the
+ // same type or for a type that is compatible according to the base language
+ // rules.
+ llvm::DenseMap<QualType, SourceLocation> PreviousRedeclTypes;
+ OMPDeclareMapperDecl *PrevDMD = nullptr;
+ bool InCompoundScope = true;
+ if (S != nullptr) {
+ // Find previous declaration with the same name not referenced in other
+ // declarations.
+ FunctionScopeInfo *ParentFn = getEnclosingFunction();
+ InCompoundScope =
+ (ParentFn != nullptr) && !ParentFn->CompoundScopes.empty();
+ LookupName(Lookup, S);
+ FilterLookupForScope(Lookup, DC, S, /*ConsiderLinkage=*/false,
+ /*AllowInlineNamespace=*/false);
+ llvm::DenseMap<OMPDeclareMapperDecl *, bool> UsedAsPrevious;
+ LookupResult::Filter Filter = Lookup.makeFilter();
+ while (Filter.hasNext()) {
+ auto *PrevDecl = cast<OMPDeclareMapperDecl>(Filter.next());
+ if (InCompoundScope) {
+ auto I = UsedAsPrevious.find(PrevDecl);
+ if (I == UsedAsPrevious.end())
+ UsedAsPrevious[PrevDecl] = false;
+ if (OMPDeclareMapperDecl *D = PrevDecl->getPrevDeclInScope())
+ UsedAsPrevious[D] = true;
+ }
+ PreviousRedeclTypes[PrevDecl->getType().getCanonicalType()] =
+ PrevDecl->getLocation();
+ }
+ Filter.done();
+ if (InCompoundScope) {
+ for (const auto &PrevData : UsedAsPrevious) {
+ if (!PrevData.second) {
+ PrevDMD = PrevData.first;
+ break;
+ }
+ }
+ }
+ } else if (PrevDeclInScope) {
+ auto *PrevDMDInScope = PrevDMD =
+ cast<OMPDeclareMapperDecl>(PrevDeclInScope);
+ do {
+ PreviousRedeclTypes[PrevDMDInScope->getType().getCanonicalType()] =
+ PrevDMDInScope->getLocation();
+ PrevDMDInScope = PrevDMDInScope->getPrevDeclInScope();
+ } while (PrevDMDInScope != nullptr);
+ }
+ const auto I = PreviousRedeclTypes.find(MapperType.getCanonicalType());
+ bool Invalid = false;
+ if (I != PreviousRedeclTypes.end()) {
+ Diag(StartLoc, diag::err_omp_declare_mapper_redefinition)
+ << MapperType << Name;
+ Diag(I->second, diag::note_previous_definition);
+ Invalid = true;
+ }
+ auto *DMD = OMPDeclareMapperDecl::Create(Context, DC, StartLoc, Name,
+ MapperType, VN, PrevDMD);
+ DC->addDecl(DMD);
+ DMD->setAccess(AS);
+ if (Invalid)
+ DMD->setInvalidDecl();
+
+ // Enter new function scope.
+ PushFunctionScope();
+ setFunctionHasBranchProtectedScope();
+
+ CurContext = DMD;
+
+ return DMD;
+}
+
+void Sema::ActOnOpenMPDeclareMapperDirectiveVarDecl(OMPDeclareMapperDecl *DMD,
+ Scope *S,
+ QualType MapperType,
+ SourceLocation StartLoc,
+ DeclarationName VN) {
+ VarDecl *VD = buildVarDecl(*this, StartLoc, MapperType, VN.getAsString());
+ if (S)
+ PushOnScopeChains(VD, S);
+ else
+ DMD->addDecl(VD);
+ Expr *MapperVarRefExpr = buildDeclRefExpr(*this, VD, MapperType, StartLoc);
+ DMD->setMapperVarRef(MapperVarRefExpr);
+}
+
+Sema::DeclGroupPtrTy
+Sema::ActOnOpenMPDeclareMapperDirectiveEnd(OMPDeclareMapperDecl *D, Scope *S,
+ ArrayRef<OMPClause *> ClauseList) {
+ PopDeclContext();
+ PopFunctionScopeInfo();
+
+ if (D) {
+ if (S)
+ PushOnScopeChains(D, S, /*AddToContext=*/false);
+ D->CreateClauses(Context, ClauseList);
+ }
+
+ return DeclGroupPtrTy::make(DeclGroupRef(D));
+}
+
OMPClause *Sema::ActOnOpenMPNumTeamsClause(Expr *NumTeams,
SourceLocation StartLoc,
SourceLocation LParenLoc,
@@ -13632,9 +14918,9 @@ void Sema::ActOnOpenMPDeclareTargetName(Scope *CurScope,
Lookup.suppressDiagnostics();
if (!Lookup.isSingleResult()) {
+ VarOrFuncDeclFilterCCC CCC(*this);
if (TypoCorrection Corrected =
- CorrectTypo(Id, LookupOrdinaryName, CurScope, nullptr,
- llvm::make_unique<VarOrFuncDeclFilterCCC>(*this),
+ CorrectTypo(Id, LookupOrdinaryName, CurScope, nullptr, CCC,
CTK_ErrorRecovery)) {
diagnoseTypo(Corrected, PDiag(diag::err_undeclared_var_use_suggest)
<< Id.getName());
@@ -13744,37 +15030,41 @@ void Sema::checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
}
OMPClause *Sema::ActOnOpenMPToClause(ArrayRef<Expr *> VarList,
- SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation EndLoc) {
+ CXXScopeSpec &MapperIdScopeSpec,
+ DeclarationNameInfo &MapperId,
+ const OMPVarListLocTy &Locs,
+ ArrayRef<Expr *> UnresolvedMappers) {
MappableVarListInfo MVLI(VarList);
- checkMappableExpressionList(*this, DSAStack, OMPC_to, MVLI, StartLoc);
+ checkMappableExpressionList(*this, DSAStack, OMPC_to, MVLI, Locs.StartLoc,
+ MapperIdScopeSpec, MapperId, UnresolvedMappers);
if (MVLI.ProcessedVarList.empty())
return nullptr;
- return OMPToClause::Create(Context, StartLoc, LParenLoc, EndLoc,
- MVLI.ProcessedVarList, MVLI.VarBaseDeclarations,
- MVLI.VarComponents);
+ return OMPToClause::Create(
+ Context, Locs, MVLI.ProcessedVarList, MVLI.VarBaseDeclarations,
+ MVLI.VarComponents, MVLI.UDMapperList,
+ MapperIdScopeSpec.getWithLocInContext(Context), MapperId);
}
OMPClause *Sema::ActOnOpenMPFromClause(ArrayRef<Expr *> VarList,
- SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation EndLoc) {
+ CXXScopeSpec &MapperIdScopeSpec,
+ DeclarationNameInfo &MapperId,
+ const OMPVarListLocTy &Locs,
+ ArrayRef<Expr *> UnresolvedMappers) {
MappableVarListInfo MVLI(VarList);
- checkMappableExpressionList(*this, DSAStack, OMPC_from, MVLI, StartLoc);
+ checkMappableExpressionList(*this, DSAStack, OMPC_from, MVLI, Locs.StartLoc,
+ MapperIdScopeSpec, MapperId, UnresolvedMappers);
if (MVLI.ProcessedVarList.empty())
return nullptr;
- return OMPFromClause::Create(Context, StartLoc, LParenLoc, EndLoc,
- MVLI.ProcessedVarList, MVLI.VarBaseDeclarations,
- MVLI.VarComponents);
+ return OMPFromClause::Create(
+ Context, Locs, MVLI.ProcessedVarList, MVLI.VarBaseDeclarations,
+ MVLI.VarComponents, MVLI.UDMapperList,
+ MapperIdScopeSpec.getWithLocInContext(Context), MapperId);
}
OMPClause *Sema::ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
- SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation EndLoc) {
+ const OMPVarListLocTy &Locs) {
MappableVarListInfo MVLI(VarList);
SmallVector<Expr *, 8> PrivateCopies;
SmallVector<Expr *, 8> Inits;
@@ -13854,14 +15144,12 @@ OMPClause *Sema::ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
return nullptr;
return OMPUseDevicePtrClause::Create(
- Context, StartLoc, LParenLoc, EndLoc, MVLI.ProcessedVarList,
- PrivateCopies, Inits, MVLI.VarBaseDeclarations, MVLI.VarComponents);
+ Context, Locs, MVLI.ProcessedVarList, PrivateCopies, Inits,
+ MVLI.VarBaseDeclarations, MVLI.VarComponents);
}
OMPClause *Sema::ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
- SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation EndLoc) {
+ const OMPVarListLocTy &Locs) {
MappableVarListInfo MVLI(VarList);
for (Expr *RefExpr : VarList) {
assert(RefExpr && "NULL expr in OpenMP is_device_ptr clause.");
@@ -13937,7 +15225,68 @@ OMPClause *Sema::ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
if (MVLI.ProcessedVarList.empty())
return nullptr;
- return OMPIsDevicePtrClause::Create(
- Context, StartLoc, LParenLoc, EndLoc, MVLI.ProcessedVarList,
- MVLI.VarBaseDeclarations, MVLI.VarComponents);
+ return OMPIsDevicePtrClause::Create(Context, Locs, MVLI.ProcessedVarList,
+ MVLI.VarBaseDeclarations,
+ MVLI.VarComponents);
+}
+
+OMPClause *Sema::ActOnOpenMPAllocateClause(
+ Expr *Allocator, ArrayRef<Expr *> VarList, SourceLocation StartLoc,
+ SourceLocation ColonLoc, SourceLocation LParenLoc, SourceLocation EndLoc) {
+ if (Allocator) {
+ // OpenMP [2.11.4 allocate Clause, Description]
+ // allocator is an expression of omp_allocator_handle_t type.
+ if (!findOMPAllocatorHandleT(*this, Allocator->getExprLoc(), DSAStack))
+ return nullptr;
+
+ ExprResult AllocatorRes = DefaultLvalueConversion(Allocator);
+ if (AllocatorRes.isInvalid())
+ return nullptr;
+ AllocatorRes = PerformImplicitConversion(AllocatorRes.get(),
+ DSAStack->getOMPAllocatorHandleT(),
+ Sema::AA_Initializing,
+ /*AllowExplicit=*/true);
+ if (AllocatorRes.isInvalid())
+ return nullptr;
+ Allocator = AllocatorRes.get();
+ } else {
+ // OpenMP 5.0, 2.11.4 allocate Clause, Restrictions.
+ // allocate clauses that appear on a target construct or on constructs in a
+ // target region must specify an allocator expression unless a requires
+ // directive with the dynamic_allocators clause is present in the same
+ // compilation unit.
+ if (LangOpts.OpenMPIsDevice &&
+ !DSAStack->hasRequiresDeclWithClause<OMPDynamicAllocatorsClause>())
+ targetDiag(StartLoc, diag::err_expected_allocator_expression);
+ }
+ // Analyze and build list of variables.
+ SmallVector<Expr *, 8> Vars;
+ for (Expr *RefExpr : VarList) {
+ assert(RefExpr && "NULL expr in OpenMP private clause.");
+ SourceLocation ELoc;
+ SourceRange ERange;
+ Expr *SimpleRefExpr = RefExpr;
+ auto Res = getPrivateItem(*this, SimpleRefExpr, ELoc, ERange);
+ if (Res.second) {
+ // It will be analyzed later.
+ Vars.push_back(RefExpr);
+ }
+ ValueDecl *D = Res.first;
+ if (!D)
+ continue;
+
+ auto *VD = dyn_cast<VarDecl>(D);
+ DeclRefExpr *Ref = nullptr;
+ if (!VD && !CurContext->isDependentContext())
+ Ref = buildCapture(*this, D, SimpleRefExpr, /*WithInit=*/false);
+ Vars.push_back((VD || CurContext->isDependentContext())
+ ? RefExpr->IgnoreParens()
+ : Ref);
+ }
+
+ if (Vars.empty())
+ return nullptr;
+
+ return OMPAllocateClause::Create(Context, StartLoc, LParenLoc, Allocator,
+ ColonLoc, EndLoc, Vars);
}
diff --git a/lib/Sema/SemaOverload.cpp b/lib/Sema/SemaOverload.cpp
index 52be0598fbc0..f632a4d3bd1a 100644
--- a/lib/Sema/SemaOverload.cpp
+++ b/lib/Sema/SemaOverload.cpp
@@ -1,9 +1,8 @@
//===--- SemaOverload.cpp - C++ Overloading -------------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -1057,6 +1056,7 @@ Sema::CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &Old,
// third bullet. If the type of the friend is dependent, skip this lookup
// until instantiation.
if (New->getFriendObjectKind() && New->getQualifier() &&
+ !New->getDescribedFunctionTemplate() &&
!New->getDependentSpecializationInfo() &&
!New->getType()->isDependentType()) {
LookupResult TemplateSpecResult(LookupResult::Temporary, Old);
@@ -1172,16 +1172,14 @@ bool Sema::IsOverload(FunctionDecl *New, FunctionDecl *Old,
// function yet (because we haven't yet resolved whether this is a static
// or non-static member function). Add it now, on the assumption that this
// is a redeclaration of OldMethod.
- // FIXME: OpenCL: Need to consider address spaces
- unsigned OldQuals = OldMethod->getTypeQualifiers().getCVRUQualifiers();
- unsigned NewQuals = NewMethod->getTypeQualifiers().getCVRUQualifiers();
+ auto OldQuals = OldMethod->getMethodQualifiers();
+ auto NewQuals = NewMethod->getMethodQualifiers();
if (!getLangOpts().CPlusPlus14 && NewMethod->isConstexpr() &&
!isa<CXXConstructorDecl>(NewMethod))
- NewQuals |= Qualifiers::Const;
-
+ NewQuals.addConst();
// We do not allow overloading based off of '__restrict'.
- OldQuals &= ~Qualifiers::Restrict;
- NewQuals &= ~Qualifiers::Restrict;
+ OldQuals.removeRestrict();
+ NewQuals.removeRestrict();
if (OldQuals != NewQuals)
return true;
}
@@ -1232,24 +1230,6 @@ bool Sema::IsOverload(FunctionDecl *New, FunctionDecl *Old,
return false;
}
-/// Checks availability of the function depending on the current
-/// function context. Inside an unavailable function, unavailability is ignored.
-///
-/// \returns true if \arg FD is unavailable and current context is inside
-/// an available function, false otherwise.
-bool Sema::isFunctionConsideredUnavailable(FunctionDecl *FD) {
- if (!FD->isUnavailable())
- return false;
-
- // Walk up the context of the caller.
- Decl *C = cast<Decl>(CurContext);
- do {
- if (C->isUnavailable())
- return false;
- } while ((C = cast_or_null<Decl>(C->getDeclContext())));
- return true;
-}
-
/// Tries a user-defined conversion from From to ToType.
///
/// Produces an implicit conversion sequence for when a standard conversion
@@ -1871,6 +1851,10 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
(From->EvaluateKnownConstInt(S.getASTContext()) == 0)) {
SCS.Second = ICK_Zero_Queue_Conversion;
FromType = ToType;
+ } else if (ToType->isSamplerT() &&
+ From->isIntegerConstantExpr(S.getASTContext())) {
+ SCS.Second = ICK_Compatible_Conversion;
+ FromType = ToType;
} else {
// No second conversion required.
SCS.Second = ICK_Identity;
@@ -1970,7 +1954,7 @@ IsTransparentUnionStandardConversion(Sema &S, Expr* From,
// It's compatible if the expression matches any of the fields.
for (const auto *it : UD->fields()) {
if (IsStandardConversion(S, From, it->getType(), InOverloadResolution, SCS,
- CStyle, /*ObjCWritebackConversion=*/false)) {
+ CStyle, /*AllowObjCWritebackConversion=*/false)) {
ToType = it->getType();
return true;
}
@@ -2547,7 +2531,7 @@ bool Sema::isObjCPointerConversion(QualType FromType, QualType ToType,
// function types are obviously different.
if (FromFunctionType->getNumParams() != ToFunctionType->getNumParams() ||
FromFunctionType->isVariadic() != ToFunctionType->isVariadic() ||
- FromFunctionType->getTypeQuals() != ToFunctionType->getTypeQuals())
+ FromFunctionType->getMethodQuals() != ToFunctionType->getMethodQuals())
return false;
bool HasObjCConversion = false;
@@ -2854,9 +2838,9 @@ void Sema::HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
return;
}
- if (FromFunction->getTypeQuals() != ToFunction->getTypeQuals()) {
- PDiag << ft_qualifer_mismatch << ToFunction->getTypeQuals()
- << FromFunction->getTypeQuals();
+ if (FromFunction->getMethodQuals() != ToFunction->getMethodQuals()) {
+ PDiag << ft_qualifer_mismatch << ToFunction->getMethodQuals()
+ << FromFunction->getMethodQuals();
return;
}
@@ -3262,10 +3246,13 @@ IsInitializerListConstructorConversion(Sema &S, Expr *From, QualType ToType,
if (Info.ConstructorTmpl)
S.AddTemplateOverloadCandidate(Info.ConstructorTmpl, Info.FoundDecl,
/*ExplicitArgs*/ nullptr, From,
- CandidateSet, SuppressUserConversions);
+ CandidateSet, SuppressUserConversions,
+ /*PartialOverloading*/ false,
+ AllowExplicit);
else
S.AddOverloadCandidate(Info.Constructor, Info.FoundDecl, From,
- CandidateSet, SuppressUserConversions);
+ CandidateSet, SuppressUserConversions,
+ /*PartialOverloading*/ false, AllowExplicit);
}
}
@@ -3392,13 +3379,15 @@ IsUserDefinedConversion(Sema &S, Expr *From, QualType ToType,
S.AddTemplateOverloadCandidate(
Info.ConstructorTmpl, Info.FoundDecl,
/*ExplicitArgs*/ nullptr, llvm::makeArrayRef(Args, NumArgs),
- CandidateSet, SuppressUserConversions);
+ CandidateSet, SuppressUserConversions,
+ /*PartialOverloading*/ false, AllowExplicit);
else
// Allow one user-defined conversion when user specifies a
// From->ToType conversion via an static cast (c-style, etc).
S.AddOverloadCandidate(Info.Constructor, Info.FoundDecl,
llvm::makeArrayRef(Args, NumArgs),
- CandidateSet, SuppressUserConversions);
+ CandidateSet, SuppressUserConversions,
+ /*PartialOverloading*/ false, AllowExplicit);
}
}
}
@@ -3430,14 +3419,13 @@ IsUserDefinedConversion(Sema &S, Expr *From, QualType ToType,
if (AllowExplicit || !Conv->isExplicit()) {
if (ConvTemplate)
- S.AddTemplateConversionCandidate(ConvTemplate, FoundDecl,
- ActingContext, From, ToType,
- CandidateSet,
- AllowObjCConversionOnExplicit);
+ S.AddTemplateConversionCandidate(
+ ConvTemplate, FoundDecl, ActingContext, From, ToType,
+ CandidateSet, AllowObjCConversionOnExplicit, AllowExplicit);
else
- S.AddConversionCandidate(Conv, FoundDecl, ActingContext,
- From, ToType, CandidateSet,
- AllowObjCConversionOnExplicit);
+ S.AddConversionCandidate(
+ Conv, FoundDecl, ActingContext, From, ToType, CandidateSet,
+ AllowObjCConversionOnExplicit, AllowExplicit);
}
}
}
@@ -3525,18 +3513,25 @@ Sema::DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType) {
OverloadingResult OvResult =
IsUserDefinedConversion(*this, From, ToType, ICS.UserDefined,
CandidateSet, false, false);
+
+ if (!(OvResult == OR_Ambiguous ||
+ (OvResult == OR_No_Viable_Function && !CandidateSet.empty())))
+ return false;
+
+ auto Cands = CandidateSet.CompleteCandidates(*this, OCD_AllCandidates, From);
if (OvResult == OR_Ambiguous)
Diag(From->getBeginLoc(), diag::err_typecheck_ambiguous_condition)
<< From->getType() << ToType << From->getSourceRange();
- else if (OvResult == OR_No_Viable_Function && !CandidateSet.empty()) {
+ else { // OR_No_Viable_Function && !CandidateSet.empty()
if (!RequireCompleteType(From->getBeginLoc(), ToType,
diag::err_typecheck_nonviable_condition_incomplete,
From->getType(), From->getSourceRange()))
Diag(From->getBeginLoc(), diag::err_typecheck_nonviable_condition)
<< false << From->getType() << From->getSourceRange() << ToType;
- } else
- return false;
- CandidateSet.NoteCandidates(*this, OCD_AllCandidates, From);
+ }
+
+ CandidateSet.NoteCandidates(
+ *this, From, Cands);
return true;
}
@@ -4019,9 +4014,12 @@ CompareQualificationConversions(Sema &S,
// to unwrap. This essentially mimics what
// IsQualificationConversion does, but here we're checking for a
// strict subset of qualifiers.
- if (T1.getCVRQualifiers() == T2.getCVRQualifiers())
+ if (T1.getQualifiers().withoutObjCLifetime() ==
+ T2.getQualifiers().withoutObjCLifetime())
// The qualifiers are the same, so this doesn't tell us anything
// about how the sequences rank.
+ // ObjC ownership quals are omitted above as they interfere with
+ // the ARC overload rule.
;
else if (T2.isMoreQualifiedThan(T1)) {
// T1 has fewer qualifiers, so it could be the better sequence.
@@ -4455,13 +4453,13 @@ FindConversionForRefInit(Sema &S, ImplicitConversionSequence &ICS,
}
if (ConvTemplate)
- S.AddTemplateConversionCandidate(ConvTemplate, I.getPair(), ActingDC,
- Init, DeclType, CandidateSet,
- /*AllowObjCConversionOnExplicit=*/false);
+ S.AddTemplateConversionCandidate(
+ ConvTemplate, I.getPair(), ActingDC, Init, DeclType, CandidateSet,
+ /*AllowObjCConversionOnExplicit=*/false, AllowExplicit);
else
- S.AddConversionCandidate(Conv, I.getPair(), ActingDC, Init,
- DeclType, CandidateSet,
- /*AllowObjCConversionOnExplicit=*/false);
+ S.AddConversionCandidate(
+ Conv, I.getPair(), ActingDC, Init, DeclType, CandidateSet,
+ /*AllowObjCConversionOnExplicit=*/false, AllowExplicit);
}
bool HadMultipleCandidates = (CandidateSet.size() > 1);
@@ -5095,12 +5093,10 @@ TryObjectArgumentInitialization(Sema &S, SourceLocation Loc, QualType FromType,
QualType ClassType = S.Context.getTypeDeclType(ActingContext);
// [class.dtor]p2: A destructor can be invoked for a const, volatile or
// const volatile object.
- Qualifiers Quals;
+ Qualifiers Quals = Method->getMethodQualifiers();
if (isa<CXXDestructorDecl>(Method)) {
Quals.addConst();
Quals.addVolatile();
- } else {
- Quals = Method->getTypeQualifiers();
}
QualType ImplicitParamType = S.Context.getQualifiedType(ClassType, Quals);
@@ -5148,6 +5144,16 @@ TryObjectArgumentInitialization(Sema &S, SourceLocation Loc, QualType FromType,
return ICS;
}
+ if (FromTypeCanon.getQualifiers().hasAddressSpace()) {
+ Qualifiers QualsImplicitParamType = ImplicitParamType.getQualifiers();
+ Qualifiers QualsFromType = FromTypeCanon.getQualifiers();
+ if (!QualsImplicitParamType.isAddressSpaceSupersetOf(QualsFromType)) {
+ ICS.setBad(BadConversionSequence::bad_qualifiers,
+ FromType, ImplicitParamType);
+ return ICS;
+ }
+ }
+
// Check that we have either the same type or a derived type. It
// affects the conversion rank.
QualType ClassTypeCanon = S.Context.getCanonicalType(ClassType);
@@ -5286,12 +5292,12 @@ Sema::PerformObjectArgumentInitialization(Expr *From,
}
if (!Context.hasSameType(From->getType(), DestType)) {
- if (From->getType().getAddressSpace() != DestType.getAddressSpace())
- From = ImpCastExprToType(From, DestType, CK_AddressSpaceConversion,
- From->getValueKind()).get();
+ CastKind CK;
+ if (FromRecordType.getAddressSpace() != DestType.getAddressSpace())
+ CK = CK_AddressSpaceConversion;
else
- From = ImpCastExprToType(From, DestType, CK_NoOp,
- From->getValueKind()).get();
+ CK = CK_NoOp;
+ From = ImpCastExprToType(From, DestType, CK, From->getValueKind()).get();
}
return From;
}
@@ -5414,12 +5420,12 @@ static ExprResult CheckConvertedConstantExpression(Sema &S, Expr *From,
// condition shall be a contextually converted constant expression of type
// bool.
ImplicitConversionSequence ICS =
- CCE == Sema::CCEK_ConstexprIf
+ CCE == Sema::CCEK_ConstexprIf || CCE == Sema::CCEK_ExplicitBool
? TryContextuallyConvertToBool(S, From)
: TryCopyInitialization(S, From, T,
/*SuppressUserConversions=*/false,
/*InOverloadResolution=*/false,
- /*AllowObjcWritebackConversion=*/false,
+ /*AllowObjCWritebackConversion=*/false,
/*AllowExplicit=*/false);
StandardConversionSequence *SCS = nullptr;
switch (ICS.getKind()) {
@@ -5510,7 +5516,7 @@ static ExprResult CheckConvertedConstantExpression(Sema &S, Expr *From,
if (Notes.empty()) {
// It's a constant expression.
- return ConstantExpr::Create(S.Context, Result.get());
+ return ConstantExpr::Create(S.Context, Result.get(), Value);
}
}
@@ -5730,12 +5736,13 @@ collectViableConversionCandidates(Sema &SemaRef, Expr *From, QualType ToType,
if (ConvTemplate)
SemaRef.AddTemplateConversionCandidate(
- ConvTemplate, FoundDecl, ActingContext, From, ToType, CandidateSet,
- /*AllowObjCConversionOnExplicit=*/false);
+ ConvTemplate, FoundDecl, ActingContext, From, ToType, CandidateSet,
+ /*AllowObjCConversionOnExplicit=*/false, /*AllowExplicit*/ true);
else
SemaRef.AddConversionCandidate(Conv, FoundDecl, ActingContext, From,
ToType, CandidateSet,
- /*AllowObjCConversionOnExplicit=*/false);
+ /*AllowObjCConversionOnExplicit=*/false,
+ /*AllowExplicit*/ true);
}
}
@@ -5987,13 +5994,11 @@ static bool IsAcceptableNonMemberOperatorCandidate(ASTContext &Context,
/// \param PartialOverloading true if we are performing "partial" overloading
/// based on an incomplete set of function arguments. This feature is used by
/// code completion.
-void Sema::AddOverloadCandidate(FunctionDecl *Function,
- DeclAccessPair FoundDecl, ArrayRef<Expr *> Args,
- OverloadCandidateSet &CandidateSet,
- bool SuppressUserConversions,
- bool PartialOverloading, bool AllowExplicit,
- ADLCallKind IsADLCandidate,
- ConversionSequenceList EarlyConversions) {
+void Sema::AddOverloadCandidate(
+ FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args,
+ OverloadCandidateSet &CandidateSet, bool SuppressUserConversions,
+ bool PartialOverloading, bool AllowExplicit, bool AllowExplicitConversions,
+ ADLCallKind IsADLCandidate, ConversionSequenceList EarlyConversions) {
const FunctionProtoType *Proto
= dyn_cast<FunctionProtoType>(Function->getType()->getAs<FunctionType>());
assert(Proto && "Functions without a prototype cannot be overloaded");
@@ -6098,6 +6103,15 @@ void Sema::AddOverloadCandidate(FunctionDecl *Function,
return;
}
}
+
+ // Check that the constructor is capable of constructing an object in the
+ // destination address space.
+ if (!Qualifiers::isAddressSpaceSupersetOf(
+ Constructor->getMethodQualifiers().getAddressSpace(),
+ CandidateSet.getDestAS())) {
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_object_addrspace_mismatch;
+ }
}
unsigned NumParams = Proto->getNumParams();
@@ -6150,13 +6164,11 @@ void Sema::AddOverloadCandidate(FunctionDecl *Function,
// (13.3.3.1) that converts that argument to the corresponding
// parameter of F.
QualType ParamType = Proto->getParamType(ArgIdx);
- Candidate.Conversions[ArgIdx]
- = TryCopyInitialization(*this, Args[ArgIdx], ParamType,
- SuppressUserConversions,
- /*InOverloadResolution=*/true,
- /*AllowObjCWritebackConversion=*/
- getLangOpts().ObjCAutoRefCount,
- AllowExplicit);
+ Candidate.Conversions[ArgIdx] = TryCopyInitialization(
+ *this, Args[ArgIdx], ParamType, SuppressUserConversions,
+ /*InOverloadResolution=*/true,
+ /*AllowObjCWritebackConversion=*/
+ getLangOpts().ObjCAutoRefCount, AllowExplicitConversions);
if (Candidate.Conversions[ArgIdx].isBad()) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_bad_conversion;
@@ -6170,6 +6182,15 @@ void Sema::AddOverloadCandidate(FunctionDecl *Function,
}
}
+ if (!AllowExplicit) {
+ ExplicitSpecifier ES = ExplicitSpecifier::getFromDecl(Function);
+ if (ES.getKind() != ExplicitSpecKind::ResolvedFalse) {
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_explicit_resolved;
+ return;
+ }
+ }
+
if (EnableIfAttr *FailedAttr = CheckEnableIf(Function, Args)) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_enable_if;
@@ -6366,7 +6387,8 @@ EnableIfAttr *Sema::CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args,
APValue Result;
// FIXME: This doesn't consider value-dependent cases, because doing so is
// very difficult. Ideally, we should handle them more gracefully.
- if (!EIA->getCond()->EvaluateWithSubstitution(
+ if (EIA->getCond()->isValueDependent() ||
+ !EIA->getCond()->EvaluateWithSubstitution(
Result, Context, Function, llvm::makeArrayRef(ConvertedArgs)))
return EIA;
@@ -6759,7 +6781,7 @@ void Sema::AddTemplateOverloadCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet, bool SuppressUserConversions,
- bool PartialOverloading, ADLCallKind IsADLCandidate) {
+ bool PartialOverloading, bool AllowExplicit, ADLCallKind IsADLCandidate) {
if (!CandidateSet.isNewCandidate(FunctionTemplate))
return;
@@ -6808,9 +6830,10 @@ void Sema::AddTemplateOverloadCandidate(
// Add the function template specialization produced by template argument
// deduction as a candidate.
assert(Specialization && "Missing function template specialization?");
- AddOverloadCandidate(Specialization, FoundDecl, Args, CandidateSet,
- SuppressUserConversions, PartialOverloading,
- /*AllowExplicit*/ false, IsADLCandidate, Conversions);
+ AddOverloadCandidate(
+ Specialization, FoundDecl, Args, CandidateSet, SuppressUserConversions,
+ PartialOverloading, AllowExplicit,
+ /*AllowExplicitConversions*/ false, IsADLCandidate, Conversions);
}
/// Check that implicit conversion sequences can be formed for each argument
@@ -6915,14 +6938,11 @@ static bool isAllowableExplicitConversion(Sema &S,
/// and ToType is the type that we're eventually trying to convert to
/// (which may or may not be the same type as the type that the
/// conversion function produces).
-void
-Sema::AddConversionCandidate(CXXConversionDecl *Conversion,
- DeclAccessPair FoundDecl,
- CXXRecordDecl *ActingContext,
- Expr *From, QualType ToType,
- OverloadCandidateSet& CandidateSet,
- bool AllowObjCConversionOnExplicit,
- bool AllowResultConversion) {
+void Sema::AddConversionCandidate(
+ CXXConversionDecl *Conversion, DeclAccessPair FoundDecl,
+ CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
+ OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
+ bool AllowExplicit, bool AllowResultConversion) {
assert(!Conversion->getDescribedFunctionTemplate() &&
"Conversion function templates use AddTemplateConversionCandidate");
QualType ConvType = Conversion->getConversionType().getNonReferenceType();
@@ -7081,6 +7101,13 @@ Sema::AddConversionCandidate(CXXConversionDecl *Conversion,
"Can only end up with a standard conversion sequence or failure");
}
+ if (!AllowExplicit && Conversion->getExplicitSpecifier().getKind() !=
+ ExplicitSpecKind::ResolvedFalse) {
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_explicit_resolved;
+ return;
+ }
+
if (EnableIfAttr *FailedAttr = CheckEnableIf(Conversion, None)) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_enable_if;
@@ -7100,14 +7127,11 @@ Sema::AddConversionCandidate(CXXConversionDecl *Conversion,
/// to deduce the template arguments of the conversion function
/// template from the type that we are converting to (C++
/// [temp.deduct.conv]).
-void
-Sema::AddTemplateConversionCandidate(FunctionTemplateDecl *FunctionTemplate,
- DeclAccessPair FoundDecl,
- CXXRecordDecl *ActingDC,
- Expr *From, QualType ToType,
- OverloadCandidateSet &CandidateSet,
- bool AllowObjCConversionOnExplicit,
- bool AllowResultConversion) {
+void Sema::AddTemplateConversionCandidate(
+ FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
+ CXXRecordDecl *ActingDC, Expr *From, QualType ToType,
+ OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
+ bool AllowExplicit, bool AllowResultConversion) {
assert(isa<CXXConversionDecl>(FunctionTemplate->getTemplatedDecl()) &&
"Only conversion function templates permitted here");
@@ -7137,7 +7161,7 @@ Sema::AddTemplateConversionCandidate(FunctionTemplateDecl *FunctionTemplate,
assert(Specialization && "Missing function template specialization?");
AddConversionCandidate(Specialization, FoundDecl, ActingDC, From, ToType,
CandidateSet, AllowObjCConversionOnExplicit,
- AllowResultConversion);
+ AllowExplicit, AllowResultConversion);
}
/// AddSurrogateCandidate - Adds a "surrogate" candidate function that
@@ -7297,7 +7321,7 @@ void Sema::AddMemberOperatorCandidates(OverloadedOperatorKind Op,
++Oper)
AddMethodCandidate(Oper.getPair(), Args[0]->getType(),
Args[0]->Classify(Context), Args.slice(1),
- CandidateSet, /*SuppressUserConversions=*/false);
+ CandidateSet, /*SuppressUserConversion=*/false);
}
}
@@ -7649,6 +7673,12 @@ BuiltinCandidateTypeSet::AddTypesConvertedFrom(QualType Ty,
}
}
}
+/// Helper function for adjusting address spaces for the pointer or reference
+/// operands of builtin operators depending on the argument.
+static QualType AdjustAddressSpaceForBuiltinOperandType(Sema &S, QualType T,
+ Expr *Arg) {
+ return S.Context.getAddrSpaceQualType(T, Arg->getType().getAddressSpace());
+}
/// Helper function for AddBuiltinOperatorCandidates() that adds
/// the volatile- and non-volatile-qualified assignment operators for the
@@ -7660,15 +7690,17 @@ static void AddBuiltinAssignmentOperatorCandidates(Sema &S,
QualType ParamTypes[2];
// T& operator=(T&, T)
- ParamTypes[0] = S.Context.getLValueReferenceType(T);
+ ParamTypes[0] = S.Context.getLValueReferenceType(
+ AdjustAddressSpaceForBuiltinOperandType(S, T, Args[0]));
ParamTypes[1] = T;
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet,
/*IsAssignmentOperator=*/true);
if (!S.Context.getCanonicalType(T).isVolatileQualified()) {
// volatile T& operator=(volatile T&, T)
- ParamTypes[0]
- = S.Context.getLValueReferenceType(S.Context.getVolatileType(T));
+ ParamTypes[0] = S.Context.getLValueReferenceType(
+ AdjustAddressSpaceForBuiltinOperandType(S, S.Context.getVolatileType(T),
+ Args[0]));
ParamTypes[1] = T;
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet,
/*IsAssignmentOperator=*/true);
@@ -7947,7 +7979,7 @@ public:
continue;
if (const FunctionProtoType *Proto =PointeeTy->getAs<FunctionProtoType>())
- if (Proto->getTypeQuals() || Proto->getRefQualifier())
+ if (Proto->getMethodQuals() || Proto->getRefQualifier())
continue;
S.AddBuiltinCandidate(&ParamTy, Args, CandidateSet);
@@ -8390,7 +8422,7 @@ public:
isEqualOp ? *Ptr : S.Context.getPointerDiffType(),
};
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet,
- /*IsAssigmentOperator=*/ isEqualOp);
+ /*IsAssignmentOperator=*/ isEqualOp);
bool NeedVolatile = !(*Ptr).isVolatileQualified() &&
VisibleTypeConversionsQuals.hasVolatile();
@@ -8399,7 +8431,7 @@ public:
ParamTypes[0] =
S.Context.getLValueReferenceType(S.Context.getVolatileType(*Ptr));
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet,
- /*IsAssigmentOperator=*/isEqualOp);
+ /*IsAssignmentOperator=*/isEqualOp);
}
if (!(*Ptr).isRestrictQualified() &&
@@ -8408,7 +8440,7 @@ public:
ParamTypes[0]
= S.Context.getLValueReferenceType(S.Context.getRestrictType(*Ptr));
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet,
- /*IsAssigmentOperator=*/isEqualOp);
+ /*IsAssignmentOperator=*/isEqualOp);
if (NeedVolatile) {
// volatile restrict version
@@ -8418,7 +8450,7 @@ public:
(Qualifiers::Volatile |
Qualifiers::Restrict)));
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet,
- /*IsAssigmentOperator=*/isEqualOp);
+ /*IsAssignmentOperator=*/isEqualOp);
}
}
}
@@ -8439,7 +8471,7 @@ public:
// non-volatile version
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet,
- /*IsAssigmentOperator=*/true);
+ /*IsAssignmentOperator=*/true);
bool NeedVolatile = !(*Ptr).isVolatileQualified() &&
VisibleTypeConversionsQuals.hasVolatile();
@@ -8448,7 +8480,7 @@ public:
ParamTypes[0] =
S.Context.getLValueReferenceType(S.Context.getVolatileType(*Ptr));
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet,
- /*IsAssigmentOperator=*/true);
+ /*IsAssignmentOperator=*/true);
}
if (!(*Ptr).isRestrictQualified() &&
@@ -8457,7 +8489,7 @@ public:
ParamTypes[0]
= S.Context.getLValueReferenceType(S.Context.getRestrictType(*Ptr));
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet,
- /*IsAssigmentOperator=*/true);
+ /*IsAssignmentOperator=*/true);
if (NeedVolatile) {
// volatile restrict version
@@ -8467,7 +8499,7 @@ public:
(Qualifiers::Volatile |
Qualifiers::Restrict)));
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet,
- /*IsAssigmentOperator=*/true);
+ /*IsAssignmentOperator=*/true);
}
}
}
@@ -8495,20 +8527,19 @@ public:
Right < LastPromotedArithmeticType; ++Right) {
QualType ParamTypes[2];
ParamTypes[1] = ArithmeticTypes[Right];
-
+ auto LeftBaseTy = AdjustAddressSpaceForBuiltinOperandType(
+ S, ArithmeticTypes[Left], Args[0]);
// Add this built-in operator as a candidate (VQ is empty).
- ParamTypes[0] =
- S.Context.getLValueReferenceType(ArithmeticTypes[Left]);
+ ParamTypes[0] = S.Context.getLValueReferenceType(LeftBaseTy);
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet,
- /*IsAssigmentOperator=*/isEqualOp);
+ /*IsAssignmentOperator=*/isEqualOp);
// Add this built-in operator as a candidate (VQ is 'volatile').
if (VisibleTypeConversionsQuals.hasVolatile()) {
- ParamTypes[0] =
- S.Context.getVolatileType(ArithmeticTypes[Left]);
+ ParamTypes[0] = S.Context.getVolatileType(LeftBaseTy);
ParamTypes[0] = S.Context.getLValueReferenceType(ParamTypes[0]);
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet,
- /*IsAssigmentOperator=*/isEqualOp);
+ /*IsAssignmentOperator=*/isEqualOp);
}
}
}
@@ -8527,14 +8558,14 @@ public:
// Add this built-in operator as a candidate (VQ is empty).
ParamTypes[0] = S.Context.getLValueReferenceType(*Vec1);
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet,
- /*IsAssigmentOperator=*/isEqualOp);
+ /*IsAssignmentOperator=*/isEqualOp);
// Add this built-in operator as a candidate (VQ is 'volatile').
if (VisibleTypeConversionsQuals.hasVolatile()) {
ParamTypes[0] = S.Context.getVolatileType(*Vec1);
ParamTypes[0] = S.Context.getLValueReferenceType(ParamTypes[0]);
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet,
- /*IsAssigmentOperator=*/isEqualOp);
+ /*IsAssignmentOperator=*/isEqualOp);
}
}
}
@@ -8561,14 +8592,14 @@ public:
Right < LastPromotedIntegralType; ++Right) {
QualType ParamTypes[2];
ParamTypes[1] = ArithmeticTypes[Right];
-
+ auto LeftBaseTy = AdjustAddressSpaceForBuiltinOperandType(
+ S, ArithmeticTypes[Left], Args[0]);
// Add this built-in operator as a candidate (VQ is empty).
- ParamTypes[0] =
- S.Context.getLValueReferenceType(ArithmeticTypes[Left]);
+ ParamTypes[0] = S.Context.getLValueReferenceType(LeftBaseTy);
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet);
if (VisibleTypeConversionsQuals.hasVolatile()) {
// Add this built-in operator as a candidate (VQ is 'volatile').
- ParamTypes[0] = ArithmeticTypes[Left];
+ ParamTypes[0] = LeftBaseTy;
ParamTypes[0] = S.Context.getVolatileType(ParamTypes[0]);
ParamTypes[0] = S.Context.getLValueReferenceType(ParamTypes[0]);
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet);
@@ -8983,13 +9014,16 @@ Sema::AddArgumentDependentLookupCandidates(DeclarationName Name,
continue;
AddOverloadCandidate(FD, FoundDecl, Args, CandidateSet,
- /*SupressUserConversions=*/false, PartialOverloading,
- /*AllowExplicit=*/false, ADLCallKind::UsesADL);
+ /*SuppressUserConversions=*/false, PartialOverloading,
+ /*AllowExplicit*/ true,
+ /*AllowExplicitConversions*/ false,
+ ADLCallKind::UsesADL);
} else {
- AddTemplateOverloadCandidate(cast<FunctionTemplateDecl>(*I), FoundDecl,
- ExplicitTemplateArgs, Args, CandidateSet,
- /*SupressUserConversions=*/false,
- PartialOverloading, ADLCallKind::UsesADL);
+ AddTemplateOverloadCandidate(
+ cast<FunctionTemplateDecl>(*I), FoundDecl, ExplicitTemplateArgs, Args,
+ CandidateSet,
+ /*SuppressUserConversions=*/false, PartialOverloading,
+ /*AllowExplicit*/true, ADLCallKind::UsesADL);
}
}
}
@@ -9436,9 +9470,7 @@ OverloadCandidateSet::BestViableFunction(Sema &S, SourceLocation Loc,
}
// Best is the best viable function.
- if (Best->Function &&
- (Best->Function->isDeleted() ||
- S.isFunctionConsideredUnavailable(Best->Function)))
+ if (Best->Function && Best->Function->isDeleted())
return OR_Deleted;
if (!EquivalentCands.empty())
@@ -9542,7 +9574,8 @@ static bool isFunctionAlwaysEnabled(const ASTContext &Ctx,
const FunctionDecl *FD) {
for (auto *EnableIf : FD->specific_attrs<EnableIfAttr>()) {
bool AlwaysTrue;
- if (!EnableIf->getCond()->EvaluateAsBooleanCondition(AlwaysTrue, Ctx))
+ if (EnableIf->getCond()->isValueDependent() ||
+ !EnableIf->getCond()->EvaluateAsBooleanCondition(AlwaysTrue, Ctx))
return false;
if (!AlwaysTrue)
return false;
@@ -10322,6 +10355,33 @@ static void DiagnoseFailedEnableIfAttr(Sema &S, OverloadCandidate *Cand) {
<< Attr->getCond()->getSourceRange() << Attr->getMessage();
}
+static void DiagnoseFailedExplicitSpec(Sema &S, OverloadCandidate *Cand) {
+ ExplicitSpecifier ES;
+ const char *DeclName;
+ switch (Cand->Function->getDeclKind()) {
+ case Decl::Kind::CXXConstructor:
+ ES = cast<CXXConstructorDecl>(Cand->Function)->getExplicitSpecifier();
+ DeclName = "constructor";
+ break;
+ case Decl::Kind::CXXConversion:
+ ES = cast<CXXConversionDecl>(Cand->Function)->getExplicitSpecifier();
+ DeclName = "conversion operator";
+ break;
+ case Decl::Kind::CXXDeductionGuide:
+ ES = cast<CXXDeductionGuideDecl>(Cand->Function)->getExplicitSpecifier();
+ DeclName = "deductiong guide";
+ break;
+ default:
+ llvm_unreachable("invalid Decl");
+ }
+ assert(ES.getExpr() && "null expression should be handled before");
+ S.Diag(Cand->Function->getLocation(),
+ diag::note_ovl_candidate_explicit_forbidden)
+ << DeclName;
+ S.Diag(ES.getExpr()->getBeginLoc(),
+ diag::note_explicit_bool_resolved_to_true);
+}
+
static void DiagnoseOpenCLExtensionDisabled(Sema &S, OverloadCandidate *Cand) {
FunctionDecl *Callee = Cand->Function;
@@ -10343,14 +10403,17 @@ static void DiagnoseOpenCLExtensionDisabled(Sema &S, OverloadCandidate *Cand) {
/// It would be great to be able to express per-candidate problems
/// more richly for those diagnostic clients that cared, but we'd
/// still have to be just as careful with the default diagnostics.
+/// \param CtorDestAS Addr space of object being constructed (for ctor
+/// candidates only).
static void NoteFunctionCandidate(Sema &S, OverloadCandidate *Cand,
unsigned NumArgs,
- bool TakingCandidateAddress) {
+ bool TakingCandidateAddress,
+ LangAS CtorDestAS = LangAS::Default) {
FunctionDecl *Fn = Cand->Function;
// Note deleted candidates, but only if they're viable.
if (Cand->Viable) {
- if (Fn->isDeleted() || S.isFunctionConsideredUnavailable(Fn)) {
+ if (Fn->isDeleted()) {
std::string FnDesc;
std::pair<OverloadCandidateKind, OverloadCandidateSelect> FnKindPair =
ClassifyOverloadCandidate(S, Cand->FoundDecl, Fn, FnDesc);
@@ -10383,6 +10446,16 @@ static void NoteFunctionCandidate(Sema &S, OverloadCandidate *Cand,
return;
}
+ case ovl_fail_object_addrspace_mismatch: {
+ Qualifiers QualsForPrinting;
+ QualsForPrinting.setAddressSpace(CtorDestAS);
+ S.Diag(Fn->getLocation(),
+ diag::note_ovl_candidate_illegal_constructor_adrspace_mismatch)
+ << QualsForPrinting;
+ MaybeEmitInheritedConstructorNote(S, Cand->FoundDecl);
+ return;
+ }
+
case ovl_fail_trivial_conversion:
case ovl_fail_bad_final_conversion:
case ovl_fail_final_conversion_not_exact:
@@ -10406,6 +10479,9 @@ static void NoteFunctionCandidate(Sema &S, OverloadCandidate *Cand,
case ovl_fail_enable_if:
return DiagnoseFailedEnableIfAttr(S, Cand);
+ case ovl_fail_explicit_resolved:
+ return DiagnoseFailedExplicitSpec(S, Cand);
+
case ovl_fail_ext_disabled:
return DiagnoseOpenCLExtensionDisabled(S, Cand);
@@ -10747,11 +10823,9 @@ static void CompleteNonViableCandidate(Sema &S, OverloadCandidate *Cand,
}
}
-/// When overload resolution fails, prints diagnostic messages containing the
-/// candidates in the candidate set.
-void OverloadCandidateSet::NoteCandidates(
+SmallVector<OverloadCandidate *, 32> OverloadCandidateSet::CompleteCandidates(
Sema &S, OverloadCandidateDisplayKind OCD, ArrayRef<Expr *> Args,
- StringRef Opc, SourceLocation OpLoc,
+ SourceLocation OpLoc,
llvm::function_ref<bool(OverloadCandidate &)> Filter) {
// Sort the candidates by viability and position. Sorting directly would
// be prohibitive, so we make a set of pointers and sort those.
@@ -10771,15 +10845,35 @@ void OverloadCandidateSet::NoteCandidates(
}
}
- std::stable_sort(Cands.begin(), Cands.end(),
- CompareOverloadCandidatesForDisplay(S, OpLoc, Args.size(), Kind));
+ llvm::stable_sort(
+ Cands, CompareOverloadCandidatesForDisplay(S, OpLoc, Args.size(), Kind));
+ return Cands;
+}
+
+/// When overload resolution fails, prints diagnostic messages containing the
+/// candidates in the candidate set.
+void OverloadCandidateSet::NoteCandidates(PartialDiagnosticAt PD,
+ Sema &S, OverloadCandidateDisplayKind OCD, ArrayRef<Expr *> Args,
+ StringRef Opc, SourceLocation OpLoc,
+ llvm::function_ref<bool(OverloadCandidate &)> Filter) {
+
+ auto Cands = CompleteCandidates(S, OCD, Args, OpLoc, Filter);
+
+ S.Diag(PD.first, PD.second);
+
+ NoteCandidates(S, Args, Cands, Opc, OpLoc);
+}
+
+void OverloadCandidateSet::NoteCandidates(Sema &S, ArrayRef<Expr *> Args,
+ ArrayRef<OverloadCandidate *> Cands,
+ StringRef Opc, SourceLocation OpLoc) {
bool ReportedAmbiguousConversions = false;
- SmallVectorImpl<OverloadCandidate*>::iterator I, E;
const OverloadsShown ShowOverloads = S.Diags.getShowOverloads();
unsigned CandsShown = 0;
- for (I = Cands.begin(), E = Cands.end(); I != E; ++I) {
+ auto I = Cands.begin(), E = Cands.end();
+ for (; I != E; ++I) {
OverloadCandidate *Cand = *I;
// Set an arbitrary limit on the number of candidate functions we'll spam
@@ -10792,7 +10886,7 @@ void OverloadCandidateSet::NoteCandidates(
if (Cand->Function)
NoteFunctionCandidate(S, Cand, Args.size(),
- /*TakingCandidateAddress=*/false);
+ /*TakingCandidateAddress=*/false, DestAS);
else if (Cand->IsSurrogate)
NoteSurrogateCandidate(S, Cand);
else {
@@ -11671,7 +11765,7 @@ static void AddOverloadedCallCandidate(Sema &S,
return;
S.AddOverloadCandidate(Func, FoundDecl, Args, CandidateSet,
- /*SuppressUsedConversions=*/false,
+ /*SuppressUserConversions=*/false,
PartialOverloading);
return;
}
@@ -11680,7 +11774,7 @@ static void AddOverloadedCallCandidate(Sema &S,
= dyn_cast<FunctionTemplateDecl>(Callee)) {
S.AddTemplateOverloadCandidate(FuncTemplate, FoundDecl,
ExplicitTemplateArgs, Args, CandidateSet,
- /*SuppressUsedConversions=*/false,
+ /*SuppressUserConversions=*/false,
PartialOverloading);
return;
}
@@ -11895,15 +11989,6 @@ public:
}
-static std::unique_ptr<CorrectionCandidateCallback>
-MakeValidator(Sema &SemaRef, MemberExpr *ME, size_t NumArgs,
- bool HasTemplateArgs, bool AllowTypoCorrection) {
- if (!AllowTypoCorrection)
- return llvm::make_unique<NoTypoCorrectionCCC>();
- return llvm::make_unique<FunctionCallFilterCCC>(SemaRef, NumArgs,
- HasTemplateArgs, ME);
-}
-
/// Attempts to recover from a call where no functions were found.
///
/// Returns true if new candidates were found.
@@ -11938,16 +12023,22 @@ BuildRecoveryCallExpr(Sema &SemaRef, Scope *S, Expr *Fn,
LookupResult R(SemaRef, ULE->getName(), ULE->getNameLoc(),
Sema::LookupOrdinaryName);
bool DoDiagnoseEmptyLookup = EmptyLookup;
- if (!DiagnoseTwoPhaseLookup(SemaRef, Fn->getExprLoc(), SS, R,
- OverloadCandidateSet::CSK_Normal,
- ExplicitTemplateArgs, Args,
- &DoDiagnoseEmptyLookup) &&
- (!DoDiagnoseEmptyLookup || SemaRef.DiagnoseEmptyLookup(
- S, SS, R,
- MakeValidator(SemaRef, dyn_cast<MemberExpr>(Fn), Args.size(),
- ExplicitTemplateArgs != nullptr, AllowTypoCorrection),
- ExplicitTemplateArgs, Args)))
- return ExprError();
+ if (!DiagnoseTwoPhaseLookup(
+ SemaRef, Fn->getExprLoc(), SS, R, OverloadCandidateSet::CSK_Normal,
+ ExplicitTemplateArgs, Args, &DoDiagnoseEmptyLookup)) {
+ NoTypoCorrectionCCC NoTypoValidator{};
+ FunctionCallFilterCCC FunctionCallValidator(SemaRef, Args.size(),
+ ExplicitTemplateArgs != nullptr,
+ dyn_cast<MemberExpr>(Fn));
+ CorrectionCandidateCallback &Validator =
+ AllowTypoCorrection
+ ? static_cast<CorrectionCandidateCallback &>(FunctionCallValidator)
+ : static_cast<CorrectionCandidateCallback &>(NoTypoValidator);
+ if (!DoDiagnoseEmptyLookup ||
+ SemaRef.DiagnoseEmptyLookup(S, SS, R, Validator, ExplicitTemplateArgs,
+ Args))
+ return ExprError();
+ }
assert(!R.empty() && "lookup results empty despite recovery");
@@ -11975,7 +12066,7 @@ BuildRecoveryCallExpr(Sema &SemaRef, Scope *S, Expr *Fn,
// This shouldn't cause an infinite loop because we're giving it
// an expression with viable lookup results, which should never
// end up here.
- return SemaRef.ActOnCallExpr(/*Scope*/ nullptr, NewFn.get(), LParenLoc,
+ return SemaRef.BuildCallExpr(/*Scope*/ nullptr, NewFn.get(), LParenLoc,
MultiExprArg(Args.data(), Args.size()),
RParenLoc);
}
@@ -11997,7 +12088,8 @@ bool Sema::buildOverloadedCallSet(Scope *S, Expr *Fn,
// We don't perform ADL for implicit declarations of builtins.
// Verify that this was correctly set up.
FunctionDecl *F;
- if (ULE->decls_begin() + 1 == ULE->decls_end() &&
+ if (ULE->decls_begin() != ULE->decls_end() &&
+ ULE->decls_begin() + 1 == ULE->decls_end() &&
(F = dyn_cast<FunctionDecl>(*ULE->decls_begin())) &&
F->getBuiltinID() && F->isImplicit())
llvm_unreachable("performing ADL for builtin");
@@ -12101,24 +12193,29 @@ static ExprResult FinishOverloadedCallExpr(Sema &SemaRef, Scope *S, Expr *Fn,
}
}
- SemaRef.Diag(Fn->getBeginLoc(), diag::err_ovl_no_viable_function_in_call)
- << ULE->getName() << Fn->getSourceRange();
- CandidateSet->NoteCandidates(SemaRef, OCD_AllCandidates, Args);
+ CandidateSet->NoteCandidates(
+ PartialDiagnosticAt(
+ Fn->getBeginLoc(),
+ SemaRef.PDiag(diag::err_ovl_no_viable_function_in_call)
+ << ULE->getName() << Fn->getSourceRange()),
+ SemaRef, OCD_AllCandidates, Args);
break;
}
case OR_Ambiguous:
- SemaRef.Diag(Fn->getBeginLoc(), diag::err_ovl_ambiguous_call)
- << ULE->getName() << Fn->getSourceRange();
- CandidateSet->NoteCandidates(SemaRef, OCD_ViableCandidates, Args);
+ CandidateSet->NoteCandidates(
+ PartialDiagnosticAt(Fn->getBeginLoc(),
+ SemaRef.PDiag(diag::err_ovl_ambiguous_call)
+ << ULE->getName() << Fn->getSourceRange()),
+ SemaRef, OCD_ViableCandidates, Args);
break;
case OR_Deleted: {
- SemaRef.Diag(Fn->getBeginLoc(), diag::err_ovl_deleted_call)
- << (*Best)->Function->isDeleted() << ULE->getName()
- << SemaRef.getDeletedOrUnavailableSuffix((*Best)->Function)
- << Fn->getSourceRange();
- CandidateSet->NoteCandidates(SemaRef, OCD_AllCandidates, Args);
+ CandidateSet->NoteCandidates(
+ PartialDiagnosticAt(Fn->getBeginLoc(),
+ SemaRef.PDiag(diag::err_ovl_deleted_call)
+ << ULE->getName() << Fn->getSourceRange()),
+ SemaRef, OCD_AllCandidates, Args);
// We emitted an error for the unavailable/deleted function call but keep
// the call in the AST.
@@ -12176,10 +12273,9 @@ ExprResult Sema::BuildOverloadedCallExpr(Scope *S, Expr *Fn,
OverloadingResult OverloadResult =
CandidateSet.BestViableFunction(*this, Fn->getBeginLoc(), Best);
- return FinishOverloadedCallExpr(*this, S, Fn, ULE, LParenLoc, Args,
- RParenLoc, ExecConfig, &CandidateSet,
- &Best, OverloadResult,
- AllowTypoCorrection);
+ return FinishOverloadedCallExpr(*this, S, Fn, ULE, LParenLoc, Args, RParenLoc,
+ ExecConfig, &CandidateSet, &Best,
+ OverloadResult, AllowTypoCorrection);
}
static bool IsOverloaded(const UnresolvedSetImpl &Functions) {
@@ -12352,22 +12448,22 @@ Sema::CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
break;
case OR_Ambiguous:
- Diag(OpLoc, diag::err_ovl_ambiguous_oper_unary)
- << UnaryOperator::getOpcodeStr(Opc)
- << Input->getType()
- << Input->getSourceRange();
- CandidateSet.NoteCandidates(*this, OCD_ViableCandidates, ArgsArray,
- UnaryOperator::getOpcodeStr(Opc), OpLoc);
+ CandidateSet.NoteCandidates(
+ PartialDiagnosticAt(OpLoc,
+ PDiag(diag::err_ovl_ambiguous_oper_unary)
+ << UnaryOperator::getOpcodeStr(Opc)
+ << Input->getType() << Input->getSourceRange()),
+ *this, OCD_ViableCandidates, ArgsArray,
+ UnaryOperator::getOpcodeStr(Opc), OpLoc);
return ExprError();
case OR_Deleted:
- Diag(OpLoc, diag::err_ovl_deleted_oper)
- << Best->Function->isDeleted()
- << UnaryOperator::getOpcodeStr(Opc)
- << getDeletedOrUnavailableSuffix(Best->Function)
- << Input->getSourceRange();
- CandidateSet.NoteCandidates(*this, OCD_AllCandidates, ArgsArray,
- UnaryOperator::getOpcodeStr(Opc), OpLoc);
+ CandidateSet.NoteCandidates(
+ PartialDiagnosticAt(OpLoc, PDiag(diag::err_ovl_deleted_oper)
+ << UnaryOperator::getOpcodeStr(Opc)
+ << Input->getSourceRange()),
+ *this, OCD_AllCandidates, ArgsArray, UnaryOperator::getOpcodeStr(Opc),
+ OpLoc);
return ExprError();
}
@@ -12601,6 +12697,9 @@ Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
// operator do not fall through to handling in built-in, but report that
// no overloaded assignment operator found
ExprResult Result = ExprError();
+ StringRef OpcStr = BinaryOperator::getOpcodeStr(Opc);
+ auto Cands = CandidateSet.CompleteCandidates(*this, OCD_AllCandidates,
+ Args, OpLoc);
if (Args[0]->getType()->isRecordType() &&
Opc >= BO_Assign && Opc <= BO_OrAssign) {
Diag(OpLoc, diag::err_ovl_no_viable_oper)
@@ -12625,19 +12724,20 @@ Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
}
assert(Result.isInvalid() &&
"C++ binary operator overloading is missing candidates!");
- if (Result.isInvalid())
- CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args,
- BinaryOperator::getOpcodeStr(Opc), OpLoc);
+ CandidateSet.NoteCandidates(*this, Args, Cands, OpcStr, OpLoc);
return Result;
}
case OR_Ambiguous:
- Diag(OpLoc, diag::err_ovl_ambiguous_oper_binary)
- << BinaryOperator::getOpcodeStr(Opc)
- << Args[0]->getType() << Args[1]->getType()
- << Args[0]->getSourceRange() << Args[1]->getSourceRange();
- CandidateSet.NoteCandidates(*this, OCD_ViableCandidates, Args,
- BinaryOperator::getOpcodeStr(Opc), OpLoc);
+ CandidateSet.NoteCandidates(
+ PartialDiagnosticAt(OpLoc, PDiag(diag::err_ovl_ambiguous_oper_binary)
+ << BinaryOperator::getOpcodeStr(Opc)
+ << Args[0]->getType()
+ << Args[1]->getType()
+ << Args[0]->getSourceRange()
+ << Args[1]->getSourceRange()),
+ *this, OCD_ViableCandidates, Args, BinaryOperator::getOpcodeStr(Opc),
+ OpLoc);
return ExprError();
case OR_Deleted:
@@ -12651,15 +12751,14 @@ Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
// explain why it's deleted.
NoteDeletedFunction(Method);
return ExprError();
- } else {
- Diag(OpLoc, diag::err_ovl_deleted_oper)
- << Best->Function->isDeleted()
- << BinaryOperator::getOpcodeStr(Opc)
- << getDeletedOrUnavailableSuffix(Best->Function)
- << Args[0]->getSourceRange() << Args[1]->getSourceRange();
}
- CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args,
- BinaryOperator::getOpcodeStr(Opc), OpLoc);
+ CandidateSet.NoteCandidates(
+ PartialDiagnosticAt(OpLoc, PDiag(diag::err_ovl_deleted_oper)
+ << BinaryOperator::getOpcodeStr(Opc)
+ << Args[0]->getSourceRange()
+ << Args[1]->getSourceRange()),
+ *this, OCD_AllCandidates, Args, BinaryOperator::getOpcodeStr(Opc),
+ OpLoc);
return ExprError();
}
@@ -12801,35 +12900,34 @@ Sema::CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
}
case OR_No_Viable_Function: {
- if (CandidateSet.empty())
- Diag(LLoc, diag::err_ovl_no_oper)
- << Args[0]->getType() << /*subscript*/ 0
- << Args[0]->getSourceRange() << Args[1]->getSourceRange();
- else
- Diag(LLoc, diag::err_ovl_no_viable_subscript)
- << Args[0]->getType()
- << Args[0]->getSourceRange() << Args[1]->getSourceRange();
- CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args,
- "[]", LLoc);
+ PartialDiagnostic PD = CandidateSet.empty()
+ ? (PDiag(diag::err_ovl_no_oper)
+ << Args[0]->getType() << /*subscript*/ 0
+ << Args[0]->getSourceRange() << Args[1]->getSourceRange())
+ : (PDiag(diag::err_ovl_no_viable_subscript)
+ << Args[0]->getType() << Args[0]->getSourceRange()
+ << Args[1]->getSourceRange());
+ CandidateSet.NoteCandidates(PartialDiagnosticAt(LLoc, PD), *this,
+ OCD_AllCandidates, Args, "[]", LLoc);
return ExprError();
}
case OR_Ambiguous:
- Diag(LLoc, diag::err_ovl_ambiguous_oper_binary)
- << "[]"
- << Args[0]->getType() << Args[1]->getType()
- << Args[0]->getSourceRange() << Args[1]->getSourceRange();
- CandidateSet.NoteCandidates(*this, OCD_ViableCandidates, Args,
- "[]", LLoc);
+ CandidateSet.NoteCandidates(
+ PartialDiagnosticAt(LLoc, PDiag(diag::err_ovl_ambiguous_oper_binary)
+ << "[]" << Args[0]->getType()
+ << Args[1]->getType()
+ << Args[0]->getSourceRange()
+ << Args[1]->getSourceRange()),
+ *this, OCD_ViableCandidates, Args, "[]", LLoc);
return ExprError();
case OR_Deleted:
- Diag(LLoc, diag::err_ovl_deleted_oper)
- << Best->Function->isDeleted() << "[]"
- << getDeletedOrUnavailableSuffix(Best->Function)
- << Args[0]->getSourceRange() << Args[1]->getSourceRange();
- CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args,
- "[]", LLoc);
+ CandidateSet.NoteCandidates(
+ PartialDiagnosticAt(LLoc, PDiag(diag::err_ovl_deleted_oper)
+ << "[]" << Args[0]->getSourceRange()
+ << Args[1]->getSourceRange()),
+ *this, OCD_AllCandidates, Args, "[]", LLoc);
return ExprError();
}
@@ -12870,7 +12968,7 @@ Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
// Check that the object type isn't more qualified than the
// member function we're calling.
- Qualifiers funcQuals = proto->getTypeQuals();
+ Qualifiers funcQuals = proto->getMethodQuals();
QualType objectType = op->getLHS()->getType();
if (op->getOpcode() == BO_PtrMemI)
@@ -12954,8 +13052,9 @@ Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
// Microsoft supports direct constructor calls.
if (getLangOpts().MicrosoftExt && isa<CXXConstructorDecl>(Func)) {
- AddOverloadCandidate(cast<CXXConstructorDecl>(Func), I.getPair(),
- Args, CandidateSet);
+ AddOverloadCandidate(cast<CXXConstructorDecl>(Func), I.getPair(), Args,
+ CandidateSet,
+ /*SuppressUserConversions*/ false);
} else if ((Method = dyn_cast<CXXMethodDecl>(Func))) {
// If explicit template arguments were provided, we can't call a
// non-template member function.
@@ -12969,7 +13068,7 @@ Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
AddMethodTemplateCandidate(
cast<FunctionTemplateDecl>(Func), I.getPair(), ActingDC,
TemplateArgs, ObjectType, ObjectClassification, Args, CandidateSet,
- /*SuppressUsedConversions=*/false);
+ /*SuppressUserConversions=*/false);
}
}
@@ -12998,27 +13097,30 @@ Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
break;
case OR_No_Viable_Function:
- Diag(UnresExpr->getMemberLoc(),
- diag::err_ovl_no_viable_member_function_in_call)
- << DeclName << MemExprE->getSourceRange();
- CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args);
+ CandidateSet.NoteCandidates(
+ PartialDiagnosticAt(
+ UnresExpr->getMemberLoc(),
+ PDiag(diag::err_ovl_no_viable_member_function_in_call)
+ << DeclName << MemExprE->getSourceRange()),
+ *this, OCD_AllCandidates, Args);
// FIXME: Leaking incoming expressions!
return ExprError();
case OR_Ambiguous:
- Diag(UnresExpr->getMemberLoc(), diag::err_ovl_ambiguous_member_call)
- << DeclName << MemExprE->getSourceRange();
- CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args);
+ CandidateSet.NoteCandidates(
+ PartialDiagnosticAt(UnresExpr->getMemberLoc(),
+ PDiag(diag::err_ovl_ambiguous_member_call)
+ << DeclName << MemExprE->getSourceRange()),
+ *this, OCD_AllCandidates, Args);
// FIXME: Leaking incoming expressions!
return ExprError();
case OR_Deleted:
- Diag(UnresExpr->getMemberLoc(), diag::err_ovl_deleted_member_call)
- << Best->Function->isDeleted()
- << DeclName
- << getDeletedOrUnavailableSuffix(Best->Function)
- << MemExprE->getSourceRange();
- CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args);
+ CandidateSet.NoteCandidates(
+ PartialDiagnosticAt(UnresExpr->getMemberLoc(),
+ PDiag(diag::err_ovl_deleted_member_call)
+ << DeclName << MemExprE->getSourceRange()),
+ *this, OCD_AllCandidates, Args);
// FIXME: Leaking incoming expressions!
return ExprError();
}
@@ -13162,7 +13264,7 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
Oper != OperEnd; ++Oper) {
AddMethodCandidate(Oper.getPair(), Object.get()->getType(),
Object.get()->Classify(Context), Args, CandidateSet,
- /*SuppressUserConversions=*/false);
+ /*SuppressUserConversion=*/false);
}
// C++ [over.call.object]p2:
@@ -13222,29 +13324,35 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
// below.
break;
- case OR_No_Viable_Function:
- if (CandidateSet.empty())
- Diag(Object.get()->getBeginLoc(), diag::err_ovl_no_oper)
- << Object.get()->getType() << /*call*/ 1
- << Object.get()->getSourceRange();
- else
- Diag(Object.get()->getBeginLoc(), diag::err_ovl_no_viable_object_call)
- << Object.get()->getType() << Object.get()->getSourceRange();
- CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args);
+ case OR_No_Viable_Function: {
+ PartialDiagnostic PD =
+ CandidateSet.empty()
+ ? (PDiag(diag::err_ovl_no_oper)
+ << Object.get()->getType() << /*call*/ 1
+ << Object.get()->getSourceRange())
+ : (PDiag(diag::err_ovl_no_viable_object_call)
+ << Object.get()->getType() << Object.get()->getSourceRange());
+ CandidateSet.NoteCandidates(
+ PartialDiagnosticAt(Object.get()->getBeginLoc(), PD), *this,
+ OCD_AllCandidates, Args);
break;
-
+ }
case OR_Ambiguous:
- Diag(Object.get()->getBeginLoc(), diag::err_ovl_ambiguous_object_call)
- << Object.get()->getType() << Object.get()->getSourceRange();
- CandidateSet.NoteCandidates(*this, OCD_ViableCandidates, Args);
+ CandidateSet.NoteCandidates(
+ PartialDiagnosticAt(Object.get()->getBeginLoc(),
+ PDiag(diag::err_ovl_ambiguous_object_call)
+ << Object.get()->getType()
+ << Object.get()->getSourceRange()),
+ *this, OCD_ViableCandidates, Args);
break;
case OR_Deleted:
- Diag(Object.get()->getBeginLoc(), diag::err_ovl_deleted_object_call)
- << Best->Function->isDeleted() << Object.get()->getType()
- << getDeletedOrUnavailableSuffix(Best->Function)
- << Object.get()->getSourceRange();
- CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args);
+ CandidateSet.NoteCandidates(
+ PartialDiagnosticAt(Object.get()->getBeginLoc(),
+ PDiag(diag::err_ovl_deleted_object_call)
+ << Object.get()->getType()
+ << Object.get()->getSourceRange()),
+ *this, OCD_AllCandidates, Args);
break;
}
@@ -13268,7 +13376,7 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
"Found Decl & conversion-to-functionptr should be same, right?!");
// We selected one of the surrogate functions that converts the
// object parameter to a function pointer. Perform the conversion
- // on the object argument, then let ActOnCallExpr finish the job.
+ // on the object argument, then let BuildCallExpr finish the job.
// Create an implicit member expr to refer to the conversion operator.
// and then call it.
@@ -13281,7 +13389,7 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
CK_UserDefinedConversion, Call.get(),
nullptr, VK_RValue);
- return ActOnCallExpr(S, Call.get(), LParenLoc, Args, RParenLoc);
+ return BuildCallExpr(S, Call.get(), LParenLoc, Args, RParenLoc);
}
CheckMemberOperatorAccess(LParenLoc, Object.get(), nullptr, Best->FoundDecl);
@@ -13431,7 +13539,7 @@ Sema::BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc,
for (LookupResult::iterator Oper = R.begin(), OperEnd = R.end();
Oper != OperEnd; ++Oper) {
AddMethodCandidate(Oper.getPair(), Base->getType(), Base->Classify(Context),
- None, CandidateSet, /*SuppressUserConversions=*/false);
+ None, CandidateSet, /*SuppressUserConversion=*/false);
}
bool HadMultipleCandidates = (CandidateSet.size() > 1);
@@ -13443,7 +13551,8 @@ Sema::BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc,
// Overload resolution succeeded; we'll build the call below.
break;
- case OR_No_Viable_Function:
+ case OR_No_Viable_Function: {
+ auto Cands = CandidateSet.CompleteCandidates(*this, OCD_AllCandidates, Base);
if (CandidateSet.empty()) {
QualType BaseType = Base->getType();
if (NoArrowOperatorFound) {
@@ -13461,22 +13570,22 @@ Sema::BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc,
} else
Diag(OpLoc, diag::err_ovl_no_viable_oper)
<< "operator->" << Base->getSourceRange();
- CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Base);
+ CandidateSet.NoteCandidates(*this, Base, Cands);
return ExprError();
-
+ }
case OR_Ambiguous:
- Diag(OpLoc, diag::err_ovl_ambiguous_oper_unary)
- << "->" << Base->getType() << Base->getSourceRange();
- CandidateSet.NoteCandidates(*this, OCD_ViableCandidates, Base);
+ CandidateSet.NoteCandidates(
+ PartialDiagnosticAt(OpLoc, PDiag(diag::err_ovl_ambiguous_oper_unary)
+ << "->" << Base->getType()
+ << Base->getSourceRange()),
+ *this, OCD_ViableCandidates, Base);
return ExprError();
case OR_Deleted:
- Diag(OpLoc, diag::err_ovl_deleted_oper)
- << Best->Function->isDeleted()
- << "->"
- << getDeletedOrUnavailableSuffix(Best->Function)
- << Base->getSourceRange();
- CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Base);
+ CandidateSet.NoteCandidates(
+ PartialDiagnosticAt(OpLoc, PDiag(diag::err_ovl_deleted_oper)
+ << "->" << Base->getSourceRange()),
+ *this, OCD_AllCandidates, Base);
return ExprError();
}
@@ -13538,14 +13647,18 @@ ExprResult Sema::BuildLiteralOperatorCall(LookupResult &R,
break;
case OR_No_Viable_Function:
- Diag(UDSuffixLoc, diag::err_ovl_no_viable_function_in_call)
- << R.getLookupName();
- CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args);
+ CandidateSet.NoteCandidates(
+ PartialDiagnosticAt(UDSuffixLoc,
+ PDiag(diag::err_ovl_no_viable_function_in_call)
+ << R.getLookupName()),
+ *this, OCD_AllCandidates, Args);
return ExprError();
case OR_Ambiguous:
- Diag(R.getNameLoc(), diag::err_ovl_ambiguous_call) << R.getLookupName();
- CandidateSet.NoteCandidates(*this, OCD_ViableCandidates, Args);
+ CandidateSet.NoteCandidates(
+ PartialDiagnosticAt(R.getNameLoc(), PDiag(diag::err_ovl_ambiguous_call)
+ << R.getLookupName()),
+ *this, OCD_ViableCandidates, Args);
return ExprError();
}
@@ -13615,7 +13728,7 @@ Sema::BuildForRangeBeginEndCall(SourceLocation Loc,
*CallExpr = ExprError();
return FRS_DiagnosticIssued;
}
- *CallExpr = ActOnCallExpr(S, MemberRef.get(), Loc, None, Loc, nullptr);
+ *CallExpr = BuildCallExpr(S, MemberRef.get(), Loc, None, Loc, nullptr);
if (CallExpr->isInvalid()) {
*CallExpr = ExprError();
return FRS_DiagnosticIssued;
@@ -13701,7 +13814,7 @@ Expr *Sema::FixOverloadedFunctionReference(Expr *E, DeclAccessPair Found,
unsigned ResultIdx = GSE->getResultIndex();
AssocExprs[ResultIdx] = SubExpr;
- return new (Context) GenericSelectionExpr(
+ return GenericSelectionExpr::Create(
Context, GSE->getGenericLoc(), GSE->getControllingExpr(),
GSE->getAssocTypeSourceInfos(), AssocExprs, GSE->getDefaultLoc(),
GSE->getRParenLoc(), GSE->containsUnexpandedParameterPack(),
@@ -13775,17 +13888,10 @@ Expr *Sema::FixOverloadedFunctionReference(Expr *E, DeclAccessPair Found,
TemplateArgs = &TemplateArgsBuffer;
}
- DeclRefExpr *DRE = DeclRefExpr::Create(Context,
- ULE->getQualifierLoc(),
- ULE->getTemplateKeywordLoc(),
- Fn,
- /*enclosing*/ false, // FIXME?
- ULE->getNameLoc(),
- Fn->getType(),
- VK_LValue,
- Found.getDecl(),
- TemplateArgs);
- MarkDeclRefReferenced(DRE);
+ DeclRefExpr *DRE =
+ BuildDeclRefExpr(Fn, Fn->getType(), VK_LValue, ULE->getNameInfo(),
+ ULE->getQualifierLoc(), Found.getDecl(),
+ ULE->getTemplateKeywordLoc(), TemplateArgs);
DRE->setHadMultipleCandidates(ULE->getNumDecls() > 1);
return DRE;
}
@@ -13804,27 +13910,18 @@ Expr *Sema::FixOverloadedFunctionReference(Expr *E, DeclAccessPair Found,
// implicit member access, rewrite to a simple decl ref.
if (MemExpr->isImplicitAccess()) {
if (cast<CXXMethodDecl>(Fn)->isStatic()) {
- DeclRefExpr *DRE = DeclRefExpr::Create(Context,
- MemExpr->getQualifierLoc(),
- MemExpr->getTemplateKeywordLoc(),
- Fn,
- /*enclosing*/ false,
- MemExpr->getMemberLoc(),
- Fn->getType(),
- VK_LValue,
- Found.getDecl(),
- TemplateArgs);
- MarkDeclRefReferenced(DRE);
+ DeclRefExpr *DRE = BuildDeclRefExpr(
+ Fn, Fn->getType(), VK_LValue, MemExpr->getNameInfo(),
+ MemExpr->getQualifierLoc(), Found.getDecl(),
+ MemExpr->getTemplateKeywordLoc(), TemplateArgs);
DRE->setHadMultipleCandidates(MemExpr->getNumDecls() > 1);
return DRE;
} else {
SourceLocation Loc = MemExpr->getMemberLoc();
if (MemExpr->getQualifier())
Loc = MemExpr->getQualifierLoc().getBeginLoc();
- CheckCXXThisCapture(Loc);
- Base = new (Context) CXXThisExpr(Loc,
- MemExpr->getBaseType(),
- /*isImplicit=*/true);
+ Base =
+ BuildCXXThisExpr(Loc, MemExpr->getBaseType(), /*IsImplicit=*/true);
}
} else
Base = MemExpr->getBase();
@@ -13839,14 +13936,11 @@ Expr *Sema::FixOverloadedFunctionReference(Expr *E, DeclAccessPair Found,
type = Context.BoundMemberTy;
}
- MemberExpr *ME = MemberExpr::Create(
- Context, Base, MemExpr->isArrow(), MemExpr->getOperatorLoc(),
+ return BuildMemberExpr(
+ Base, MemExpr->isArrow(), MemExpr->getOperatorLoc(),
MemExpr->getQualifierLoc(), MemExpr->getTemplateKeywordLoc(), Fn, Found,
- MemExpr->getMemberNameInfo(), TemplateArgs, type, valueKind,
- OK_Ordinary);
- ME->setHadMultipleCandidates(true);
- MarkMemberReferenced(ME);
- return ME;
+ /*HadMultipleCandidates=*/true, MemExpr->getMemberNameInfo(),
+ type, valueKind, OK_Ordinary, TemplateArgs);
}
llvm_unreachable("Invalid reference to overloaded function");
diff --git a/lib/Sema/SemaPseudoObject.cpp b/lib/Sema/SemaPseudoObject.cpp
index ebf1d10aa16a..06bcd8d00ded 100644
--- a/lib/Sema/SemaPseudoObject.cpp
+++ b/lib/Sema/SemaPseudoObject.cpp
@@ -1,9 +1,8 @@
//===--- SemaPseudoObject.cpp - Semantic Analysis for Pseudo-Objects ------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -141,25 +140,24 @@ namespace {
unsigned resultIndex = gse->getResultIndex();
unsigned numAssocs = gse->getNumAssocs();
- SmallVector<Expr*, 8> assocs(numAssocs);
- SmallVector<TypeSourceInfo*, 8> assocTypes(numAssocs);
-
- for (unsigned i = 0; i != numAssocs; ++i) {
- Expr *assoc = gse->getAssocExpr(i);
- if (i == resultIndex) assoc = rebuild(assoc);
- assocs[i] = assoc;
- assocTypes[i] = gse->getAssocTypeSourceInfo(i);
+ SmallVector<Expr *, 8> assocExprs;
+ SmallVector<TypeSourceInfo *, 8> assocTypes;
+ assocExprs.reserve(numAssocs);
+ assocTypes.reserve(numAssocs);
+
+ for (const GenericSelectionExpr::Association &assoc :
+ gse->associations()) {
+ Expr *assocExpr = assoc.getAssociationExpr();
+ if (assoc.isSelected())
+ assocExpr = rebuild(assocExpr);
+ assocExprs.push_back(assocExpr);
+ assocTypes.push_back(assoc.getTypeSourceInfo());
}
- return new (S.Context) GenericSelectionExpr(S.Context,
- gse->getGenericLoc(),
- gse->getControllingExpr(),
- assocTypes,
- assocs,
- gse->getDefaultLoc(),
- gse->getRParenLoc(),
- gse->containsUnexpandedParameterPack(),
- resultIndex);
+ return GenericSelectionExpr::Create(
+ S.Context, gse->getGenericLoc(), gse->getControllingExpr(),
+ assocTypes, assocExprs, gse->getDefaultLoc(), gse->getRParenLoc(),
+ gse->containsUnexpandedParameterPack(), resultIndex);
}
if (ChooseExpr *ce = dyn_cast<ChooseExpr>(e)) {
@@ -1493,7 +1491,7 @@ ExprResult MSPropertyOpBuilder::buildGet() {
return ExprError();
}
- return S.ActOnCallExpr(S.getCurScope(), GetterExpr.get(),
+ return S.BuildCallExpr(S.getCurScope(), GetterExpr.get(),
RefExpr->getSourceRange().getBegin(), CallArgs,
RefExpr->getSourceRange().getEnd());
}
@@ -1525,7 +1523,7 @@ ExprResult MSPropertyOpBuilder::buildSet(Expr *op, SourceLocation sl,
SmallVector<Expr*, 4> ArgExprs;
ArgExprs.append(CallArgs.begin(), CallArgs.end());
ArgExprs.push_back(op);
- return S.ActOnCallExpr(S.getCurScope(), SetterExpr.get(),
+ return S.BuildCallExpr(S.getCurScope(), SetterExpr.get(),
RefExpr->getSourceRange().getBegin(), ArgExprs,
op->getSourceRange().getEnd());
}
diff --git a/lib/Sema/SemaStmt.cpp b/lib/Sema/SemaStmt.cpp
index 9e30c9a396c0..480155df8990 100644
--- a/lib/Sema/SemaStmt.cpp
+++ b/lib/Sema/SemaStmt.cpp
@@ -1,9 +1,8 @@
//===--- SemaStmt.cpp - Semantic Analysis for Statements ------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -11,6 +10,7 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/Sema/Ownership.h"
#include "clang/Sema/SemaInternal.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTDiagnostic.h"
@@ -347,10 +347,6 @@ sema::CompoundScopeInfo &Sema::getCurCompoundScope() const {
return getCurFunction()->CompoundScopes.back();
}
-bool Sema::isCurCompoundStmtAStmtExpr() const {
- return getCurCompoundScope().IsStmtExpr;
-}
-
StmtResult Sema::ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr) {
const unsigned NumElts = Elts.size();
@@ -943,7 +939,7 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
bool ShouldCheckConstantCond = HasConstantCond;
// Sort all the scalar case values so we can easily detect duplicates.
- std::stable_sort(CaseVals.begin(), CaseVals.end(), CmpCaseVals);
+ llvm::stable_sort(CaseVals, CmpCaseVals);
if (!CaseVals.empty()) {
for (unsigned i = 0, e = CaseVals.size(); i != e; ++i) {
@@ -991,7 +987,7 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
if (!CaseRanges.empty()) {
// Sort all the case ranges by their low value so we can easily detect
// overlaps between ranges.
- std::stable_sort(CaseRanges.begin(), CaseRanges.end());
+ llvm::stable_sort(CaseRanges);
// Scan the ranges, computing the high values and removing empty ranges.
std::vector<llvm::APSInt> HiVals;
@@ -1045,9 +1041,8 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
// Find the smallest value >= the lower bound. If I is in the
// case range, then we have overlap.
- CaseValsTy::iterator I = std::lower_bound(CaseVals.begin(),
- CaseVals.end(), CRLo,
- CaseCompareFunctor());
+ CaseValsTy::iterator I =
+ llvm::lower_bound(CaseVals, CRLo, CaseCompareFunctor());
if (I != CaseVals.end() && I->first < CRHi) {
OverlapVal = I->first; // Found overlap with scalar.
OverlapStmt = I->second;
@@ -1110,7 +1105,7 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
AdjustAPSInt(Val, CondWidth, CondIsSigned);
EnumVals.push_back(std::make_pair(Val, EDI));
}
- std::stable_sort(EnumVals.begin(), EnumVals.end(), CmpEnumVals);
+ llvm::stable_sort(EnumVals, CmpEnumVals);
auto EI = EnumVals.begin(), EIEnd =
std::unique(EnumVals.begin(), EnumVals.end(), EqEnumVals);
@@ -1167,6 +1162,9 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
break;
}
+ if (EI->second->hasAttr<UnusedAttr>())
+ continue;
+
// Drop unneeded case values
while (CI != CaseVals.end() && CI->first < EI->first)
CI++;
@@ -1261,7 +1259,7 @@ Sema::DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
}
if (EnumVals.empty())
return;
- std::stable_sort(EnumVals.begin(), EnumVals.end(), CmpEnumVals);
+ llvm::stable_sort(EnumVals, CmpEnumVals);
EnumValsTy::iterator EIend =
std::unique(EnumVals.begin(), EnumVals.end(), EqEnumVals);
@@ -2229,9 +2227,11 @@ BuildNonArrayForRange(Sema &SemaRef, Expr *BeginRange, Expr *EndRange,
return Sema::FRS_Success;
case Sema::FRS_NoViableFunction:
- SemaRef.Diag(BeginRange->getBeginLoc(), diag::err_for_range_invalid)
- << BeginRange->getType() << BEFFound;
- CandidateSet->NoteCandidates(SemaRef, OCD_AllCandidates, BeginRange);
+ CandidateSet->NoteCandidates(
+ PartialDiagnosticAt(BeginRange->getBeginLoc(),
+ SemaRef.PDiag(diag::err_for_range_invalid)
+ << BeginRange->getType() << BEFFound),
+ SemaRef, OCD_AllCandidates, BeginRange);
LLVM_FALLTHROUGH;
case Sema::FRS_DiagnosticIssued:
@@ -2447,7 +2447,7 @@ StmtResult Sema::BuildCXXForRangeStmt(SourceLocation ForLoc,
ExprResult SizeOfVLAExprR = ActOnUnaryExprOrTypeTraitExpr(
EndVar->getLocation(), UETT_SizeOf,
- /*isType=*/true,
+ /*IsType=*/true,
CreateParsedType(VAT->desugar(), Context.getTrivialTypeSourceInfo(
VAT->desugar(), RangeLoc))
.getAsOpaquePtr(),
@@ -2457,7 +2457,7 @@ StmtResult Sema::BuildCXXForRangeStmt(SourceLocation ForLoc,
ExprResult SizeOfEachElementExprR = ActOnUnaryExprOrTypeTraitExpr(
EndVar->getLocation(), UETT_SizeOf,
- /*isType=*/true,
+ /*IsType=*/true,
CreateParsedType(VAT->desugar(),
Context.getTrivialTypeSourceInfo(
VAT->getElementType(), RangeLoc))
@@ -2528,9 +2528,12 @@ StmtResult Sema::BuildCXXForRangeStmt(SourceLocation ForLoc,
// Otherwise, emit diagnostics if we haven't already.
if (RangeStatus == FRS_NoViableFunction) {
Expr *Range = BEFFailure ? EndRangeRef.get() : BeginRangeRef.get();
- Diag(Range->getBeginLoc(), diag::err_for_range_invalid)
- << RangeLoc << Range->getType() << BEFFailure;
- CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Range);
+ CandidateSet.NoteCandidates(
+ PartialDiagnosticAt(Range->getBeginLoc(),
+ PDiag(diag::err_for_range_invalid)
+ << RangeLoc << Range->getType()
+ << BEFFailure),
+ *this, OCD_AllCandidates, Range);
}
// Return an error if no fix was discovered.
if (RangeStatus != FRS_Success)
@@ -3387,10 +3390,10 @@ bool LocalTypedefNameReferencer::VisitRecordType(const RecordType *RT) {
}
TypeLoc Sema::getReturnTypeLoc(FunctionDecl *FD) const {
- TypeLoc TL = FD->getTypeSourceInfo()->getTypeLoc().IgnoreParens();
- while (auto ATL = TL.getAs<AttributedTypeLoc>())
- TL = ATL.getModifiedLoc().IgnoreParens();
- return TL.castAs<FunctionProtoTypeLoc>().getReturnLoc();
+ return FD->getTypeSourceInfo()
+ ->getTypeLoc()
+ .getAsAdjusted<FunctionProtoTypeLoc>()
+ .getReturnLoc();
}
/// Deduce the return type for a function from a returned expression, per
@@ -3500,7 +3503,12 @@ bool Sema::DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
StmtResult
Sema::ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
Scope *CurScope) {
- StmtResult R = BuildReturnStmt(ReturnLoc, RetValExp);
+ // Correct typos, in case the containing function returns 'auto' and
+ // RetValExp should determine the deduced type.
+ ExprResult RetVal = CorrectDelayedTyposInExpr(RetValExp);
+ if (RetVal.isInvalid())
+ return StmtError();
+ StmtResult R = BuildReturnStmt(ReturnLoc, RetVal.get());
if (R.isInvalid() || ExprEvalContexts.back().Context ==
ExpressionEvaluationContext::DiscardedStatement)
return R;
@@ -3684,7 +3692,8 @@ StmtResult Sema::BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
}
if (FD)
- Diag(ReturnLoc, DiagID) << FD->getIdentifier() << 0/*fn*/;
+ Diag(ReturnLoc, DiagID)
+ << FD->getIdentifier() << 0 /*fn*/ << FD->isConsteval();
else
Diag(ReturnLoc, DiagID) << getCurMethodDecl()->getDeclName() << 1/*meth*/;
@@ -3998,12 +4007,10 @@ StmtResult Sema::ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
ArrayRef<Stmt *> Handlers) {
// Don't report an error if 'try' is used in system headers.
if (!getLangOpts().CXXExceptions &&
- !getSourceManager().isInSystemHeader(TryLoc) &&
- (!getLangOpts().OpenMPIsDevice ||
- !getLangOpts().OpenMPHostCXXExceptions ||
- isInOpenMPTargetExecutionDirective() ||
- isInOpenMPDeclareTargetContext()))
- Diag(TryLoc, diag::err_exceptions_disabled) << "try";
+ !getSourceManager().isInSystemHeader(TryLoc) && !getLangOpts().CUDA) {
+ // Delay error emission for the OpenMP device code.
+ targetDiag(TryLoc, diag::err_exceptions_disabled) << "try";
+ }
// Exceptions aren't allowed in CUDA device code.
if (getLangOpts().CUDA)
@@ -4216,30 +4223,46 @@ Sema::CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc,
return RD;
}
-static void
-buildCapturedStmtCaptureList(SmallVectorImpl<CapturedStmt::Capture> &Captures,
- SmallVectorImpl<Expr *> &CaptureInits,
- ArrayRef<sema::Capture> Candidates) {
- for (const sema::Capture &Cap : Candidates) {
+static bool
+buildCapturedStmtCaptureList(Sema &S, CapturedRegionScopeInfo *RSI,
+ SmallVectorImpl<CapturedStmt::Capture> &Captures,
+ SmallVectorImpl<Expr *> &CaptureInits) {
+ for (const sema::Capture &Cap : RSI->Captures) {
+ if (Cap.isInvalid())
+ continue;
+
+ // Form the initializer for the capture.
+ ExprResult Init = S.BuildCaptureInit(Cap, Cap.getLocation(),
+ RSI->CapRegionKind == CR_OpenMP);
+
+ // FIXME: Bail out now if the capture is not used and the initializer has
+ // no side-effects.
+
+ // Create a field for this capture.
+ FieldDecl *Field = S.BuildCaptureField(RSI->TheRecordDecl, Cap);
+
+ // Add the capture to our list of captures.
if (Cap.isThisCapture()) {
Captures.push_back(CapturedStmt::Capture(Cap.getLocation(),
CapturedStmt::VCK_This));
- CaptureInits.push_back(Cap.getInitExpr());
- continue;
} else if (Cap.isVLATypeCapture()) {
Captures.push_back(
CapturedStmt::Capture(Cap.getLocation(), CapturedStmt::VCK_VLAType));
- CaptureInits.push_back(nullptr);
- continue;
- }
+ } else {
+ assert(Cap.isVariableCapture() && "unknown kind of capture");
- Captures.push_back(CapturedStmt::Capture(Cap.getLocation(),
- Cap.isReferenceCapture()
- ? CapturedStmt::VCK_ByRef
- : CapturedStmt::VCK_ByCopy,
- Cap.getVariable()));
- CaptureInits.push_back(Cap.getInitExpr());
+ if (S.getLangOpts().OpenMP && RSI->CapRegionKind == CR_OpenMP)
+ S.setOpenMPCaptureKind(Field, Cap.getVariable(), RSI->OpenMPLevel);
+
+ Captures.push_back(CapturedStmt::Capture(Cap.getLocation(),
+ Cap.isReferenceCapture()
+ ? CapturedStmt::VCK_ByRef
+ : CapturedStmt::VCK_ByCopy,
+ Cap.getVariable()));
+ }
+ CaptureInits.push_back(Init.get());
}
+ return false;
}
void Sema::ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
@@ -4332,25 +4355,31 @@ void Sema::ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
void Sema::ActOnCapturedRegionError() {
DiscardCleanupsInEvaluationContext();
PopExpressionEvaluationContext();
+ PopDeclContext();
+ PoppedFunctionScopePtr ScopeRAII = PopFunctionScopeInfo();
+ CapturedRegionScopeInfo *RSI = cast<CapturedRegionScopeInfo>(ScopeRAII.get());
- CapturedRegionScopeInfo *RSI = getCurCapturedRegion();
RecordDecl *Record = RSI->TheRecordDecl;
Record->setInvalidDecl();
SmallVector<Decl*, 4> Fields(Record->fields());
ActOnFields(/*Scope=*/nullptr, Record->getLocation(), Record, Fields,
SourceLocation(), SourceLocation(), ParsedAttributesView());
-
- PopDeclContext();
- PopFunctionScopeInfo();
}
StmtResult Sema::ActOnCapturedRegionEnd(Stmt *S) {
- CapturedRegionScopeInfo *RSI = getCurCapturedRegion();
+ // Leave the captured scope before we start creating captures in the
+ // enclosing scope.
+ DiscardCleanupsInEvaluationContext();
+ PopExpressionEvaluationContext();
+ PopDeclContext();
+ PoppedFunctionScopePtr ScopeRAII = PopFunctionScopeInfo();
+ CapturedRegionScopeInfo *RSI = cast<CapturedRegionScopeInfo>(ScopeRAII.get());
SmallVector<CapturedStmt::Capture, 4> Captures;
SmallVector<Expr *, 4> CaptureInits;
- buildCapturedStmtCaptureList(Captures, CaptureInits, RSI->Captures);
+ if (buildCapturedStmtCaptureList(*this, RSI, Captures, CaptureInits))
+ return StmtError();
CapturedDecl *CD = RSI->TheCapturedDecl;
RecordDecl *RD = RSI->TheRecordDecl;
@@ -4362,11 +4391,5 @@ StmtResult Sema::ActOnCapturedRegionEnd(Stmt *S) {
CD->setBody(Res->getCapturedStmt());
RD->completeDefinition();
- DiscardCleanupsInEvaluationContext();
- PopExpressionEvaluationContext();
-
- PopDeclContext();
- PopFunctionScopeInfo();
-
return Res;
}
diff --git a/lib/Sema/SemaStmtAsm.cpp b/lib/Sema/SemaStmtAsm.cpp
index 9e084c99d0dd..b123a739a7ab 100644
--- a/lib/Sema/SemaStmtAsm.cpp
+++ b/lib/Sema/SemaStmtAsm.cpp
@@ -1,9 +1,8 @@
//===--- SemaStmtAsm.cpp - Semantic Analysis for Asm Statements -----------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -210,11 +209,12 @@ static StringRef extractRegisterName(const Expr *Expression,
static SourceLocation
getClobberConflictLocation(MultiExprArg Exprs, StringLiteral **Constraints,
StringLiteral **Clobbers, int NumClobbers,
+ unsigned NumLabels,
const TargetInfo &Target, ASTContext &Cont) {
llvm::StringSet<> InOutVars;
// Collect all the input and output registers from the extended asm
// statement in order to check for conflicts with the clobber list
- for (unsigned int i = 0; i < Exprs.size(); ++i) {
+ for (unsigned int i = 0; i < Exprs.size() - NumLabels; ++i) {
StringRef Constraint = Constraints[i]->getString();
StringRef InOutReg = Target.getConstraintRegister(
Constraint, extractRegisterName(Exprs[i], Target));
@@ -242,6 +242,7 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
unsigned NumInputs, IdentifierInfo **Names,
MultiExprArg constraints, MultiExprArg Exprs,
Expr *asmString, MultiExprArg clobbers,
+ unsigned NumLabels,
SourceLocation RParenLoc) {
unsigned NumClobbers = clobbers.size();
StringLiteral **Constraints =
@@ -254,15 +255,6 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
// The parser verifies that there is a string literal here.
assert(AsmString->isAscii());
- // If we're compiling CUDA file and function attributes indicate that it's not
- // for this compilation side, skip all the checks.
- if (!DeclAttrsMatchCUDAMode(getLangOpts(), getCurFunctionDecl())) {
- GCCAsmStmt *NS = new (Context) GCCAsmStmt(
- Context, AsmLoc, IsSimple, IsVolatile, NumOutputs, NumInputs, Names,
- Constraints, Exprs.data(), AsmString, NumClobbers, Clobbers, RParenLoc);
- return NS;
- }
-
for (unsigned i = 0; i != NumOutputs; i++) {
StringLiteral *Literal = Constraints[i];
assert(Literal->isAscii());
@@ -272,10 +264,15 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
OutputName = Names[i]->getName();
TargetInfo::ConstraintInfo Info(Literal->getString(), OutputName);
- if (!Context.getTargetInfo().validateOutputConstraint(Info))
- return StmtError(
- Diag(Literal->getBeginLoc(), diag::err_asm_invalid_output_constraint)
- << Info.getConstraintStr());
+ if (!Context.getTargetInfo().validateOutputConstraint(Info)) {
+ targetDiag(Literal->getBeginLoc(),
+ diag::err_asm_invalid_output_constraint)
+ << Info.getConstraintStr();
+ return new (Context)
+ GCCAsmStmt(Context, AsmLoc, IsSimple, IsVolatile, NumOutputs,
+ NumInputs, Names, Constraints, Exprs.data(), AsmString,
+ NumClobbers, Clobbers, NumLabels, RParenLoc);
+ }
ExprResult ER = CheckPlaceholderExpr(Exprs[i]);
if (ER.isInvalid())
@@ -329,10 +326,14 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
unsigned Size = Context.getTypeSize(OutputExpr->getType());
if (!Context.getTargetInfo().validateOutputSize(Literal->getString(),
- Size))
- return StmtError(
- Diag(OutputExpr->getBeginLoc(), diag::err_asm_invalid_output_size)
- << Info.getConstraintStr());
+ Size)) {
+ targetDiag(OutputExpr->getBeginLoc(), diag::err_asm_invalid_output_size)
+ << Info.getConstraintStr();
+ return new (Context)
+ GCCAsmStmt(Context, AsmLoc, IsSimple, IsVolatile, NumOutputs,
+ NumInputs, Names, Constraints, Exprs.data(), AsmString,
+ NumClobbers, Clobbers, NumLabels, RParenLoc);
+ }
}
SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
@@ -348,9 +349,12 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
TargetInfo::ConstraintInfo Info(Literal->getString(), InputName);
if (!Context.getTargetInfo().validateInputConstraint(OutputConstraintInfos,
Info)) {
- return StmtError(
- Diag(Literal->getBeginLoc(), diag::err_asm_invalid_input_constraint)
- << Info.getConstraintStr());
+ targetDiag(Literal->getBeginLoc(), diag::err_asm_invalid_input_constraint)
+ << Info.getConstraintStr();
+ return new (Context)
+ GCCAsmStmt(Context, AsmLoc, IsSimple, IsVolatile, NumOutputs,
+ NumInputs, Names, Constraints, Exprs.data(), AsmString,
+ NumClobbers, Clobbers, NumLabels, RParenLoc);
}
ExprResult ER = CheckPlaceholderExpr(Exprs[i]);
@@ -383,11 +387,20 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
return StmtError(
Diag(InputExpr->getBeginLoc(), diag::err_asm_immediate_expected)
<< Info.getConstraintStr() << InputExpr->getSourceRange());
- llvm::APSInt Result = EVResult.Val.getInt();
- if (!Info.isValidAsmImmediate(Result))
+
+ // For compatibility with GCC, we also allow pointers that would be
+ // integral constant expressions if they were cast to int.
+ llvm::APSInt IntResult;
+ if (!EVResult.Val.toIntegralConstant(IntResult, InputExpr->getType(),
+ Context))
+ return StmtError(
+ Diag(InputExpr->getBeginLoc(), diag::err_asm_immediate_expected)
+ << Info.getConstraintStr() << InputExpr->getSourceRange());
+
+ if (!Info.isValidAsmImmediate(IntResult))
return StmtError(Diag(InputExpr->getBeginLoc(),
diag::err_invalid_asm_value_for_constraint)
- << Result.toString(10) << Info.getConstraintStr()
+ << IntResult.toString(10) << Info.getConstraintStr()
<< InputExpr->getSourceRange());
}
@@ -422,8 +435,8 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
unsigned Size = Context.getTypeSize(Ty);
if (!Context.getTargetInfo().validateInputSize(Literal->getString(),
Size))
- return StmtError(
- Diag(InputExpr->getBeginLoc(), diag::err_asm_invalid_input_size)
+ return StmtResult(
+ targetDiag(InputExpr->getBeginLoc(), diag::err_asm_invalid_input_size)
<< Info.getConstraintStr());
}
@@ -434,24 +447,29 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
StringRef Clobber = Literal->getString();
- if (!Context.getTargetInfo().isValidClobber(Clobber))
- return StmtError(
- Diag(Literal->getBeginLoc(), diag::err_asm_unknown_register_name)
- << Clobber);
+ if (!Context.getTargetInfo().isValidClobber(Clobber)) {
+ targetDiag(Literal->getBeginLoc(), diag::err_asm_unknown_register_name)
+ << Clobber;
+ return new (Context)
+ GCCAsmStmt(Context, AsmLoc, IsSimple, IsVolatile, NumOutputs,
+ NumInputs, Names, Constraints, Exprs.data(), AsmString,
+ NumClobbers, Clobbers, NumLabels, RParenLoc);
+ }
}
GCCAsmStmt *NS =
new (Context) GCCAsmStmt(Context, AsmLoc, IsSimple, IsVolatile, NumOutputs,
NumInputs, Names, Constraints, Exprs.data(),
- AsmString, NumClobbers, Clobbers, RParenLoc);
+ AsmString, NumClobbers, Clobbers, NumLabels,
+ RParenLoc);
// Validate the asm string, ensuring it makes sense given the operands we
// have.
SmallVector<GCCAsmStmt::AsmStringPiece, 8> Pieces;
unsigned DiagOffs;
if (unsigned DiagID = NS->AnalyzeAsmString(Pieces, Context, DiagOffs)) {
- Diag(getLocationOfStringLiteralByte(AsmString, DiagOffs), DiagID)
- << AsmString->getSourceRange();
- return StmtError();
+ targetDiag(getLocationOfStringLiteralByte(AsmString, DiagOffs), DiagID)
+ << AsmString->getSourceRange();
+ return NS;
}
// Validate constraints and modifiers.
@@ -461,8 +479,10 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
// Look for the correct constraint index.
unsigned ConstraintIdx = Piece.getOperandNo();
+ // Labels are the last in the Exprs list.
+ if (NS->isAsmGoto() && ConstraintIdx >= NS->getNumInputs())
+ continue;
unsigned NumOperands = NS->getNumOutputs() + NS->getNumInputs();
-
// Look for the (ConstraintIdx - NumOperands + 1)th constraint with
// modifier '+'.
if (ConstraintIdx >= NumOperands) {
@@ -489,16 +509,15 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
if (!Context.getTargetInfo().validateConstraintModifier(
Literal->getString(), Piece.getModifier(), Size,
SuggestedModifier)) {
- Diag(Exprs[ConstraintIdx]->getBeginLoc(),
- diag::warn_asm_mismatched_size_modifier);
+ targetDiag(Exprs[ConstraintIdx]->getBeginLoc(),
+ diag::warn_asm_mismatched_size_modifier);
if (!SuggestedModifier.empty()) {
- auto B = Diag(Piece.getRange().getBegin(),
- diag::note_asm_missing_constraint_modifier)
+ auto B = targetDiag(Piece.getRange().getBegin(),
+ diag::note_asm_missing_constraint_modifier)
<< SuggestedModifier;
SuggestedModifier = "%" + SuggestedModifier + Piece.getString();
- B.AddFixItHint(FixItHint::CreateReplacement(Piece.getRange(),
- SuggestedModifier));
+ B << FixItHint::CreateReplacement(Piece.getRange(), SuggestedModifier);
}
}
}
@@ -509,12 +528,14 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
StringRef ConstraintStr = Info.getConstraintStr();
unsigned AltCount = ConstraintStr.count(',') + 1;
- if (NumAlternatives == ~0U)
+ if (NumAlternatives == ~0U) {
NumAlternatives = AltCount;
- else if (NumAlternatives != AltCount)
- return StmtError(Diag(NS->getOutputExpr(i)->getBeginLoc(),
- diag::err_asm_unexpected_constraint_alternatives)
- << NumAlternatives << AltCount);
+ } else if (NumAlternatives != AltCount) {
+ targetDiag(NS->getOutputExpr(i)->getBeginLoc(),
+ diag::err_asm_unexpected_constraint_alternatives)
+ << NumAlternatives << AltCount;
+ return NS;
+ }
}
SmallVector<size_t, 4> InputMatchedToOutput(OutputConstraintInfos.size(),
~0U);
@@ -522,12 +543,14 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
StringRef ConstraintStr = Info.getConstraintStr();
unsigned AltCount = ConstraintStr.count(',') + 1;
- if (NumAlternatives == ~0U)
+ if (NumAlternatives == ~0U) {
NumAlternatives = AltCount;
- else if (NumAlternatives != AltCount)
- return StmtError(Diag(NS->getInputExpr(i)->getBeginLoc(),
- diag::err_asm_unexpected_constraint_alternatives)
- << NumAlternatives << AltCount);
+ } else if (NumAlternatives != AltCount) {
+ targetDiag(NS->getInputExpr(i)->getBeginLoc(),
+ diag::err_asm_unexpected_constraint_alternatives)
+ << NumAlternatives << AltCount;
+ return NS;
+ }
// If this is a tied constraint, verify that the output and input have
// either exactly the same type, or that they are int/ptr operands with the
@@ -542,13 +565,13 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
// Make sure no more than one input constraint matches each output.
assert(TiedTo < InputMatchedToOutput.size() && "TiedTo value out of range");
if (InputMatchedToOutput[TiedTo] != ~0U) {
- Diag(NS->getInputExpr(i)->getBeginLoc(),
- diag::err_asm_input_duplicate_match)
+ targetDiag(NS->getInputExpr(i)->getBeginLoc(),
+ diag::err_asm_input_duplicate_match)
<< TiedTo;
- Diag(NS->getInputExpr(InputMatchedToOutput[TiedTo])->getBeginLoc(),
- diag::note_asm_input_duplicate_first)
+ targetDiag(NS->getInputExpr(InputMatchedToOutput[TiedTo])->getBeginLoc(),
+ diag::note_asm_input_duplicate_first)
<< TiedTo;
- return StmtError();
+ return NS;
}
InputMatchedToOutput[TiedTo] = i;
@@ -633,19 +656,48 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
continue;
}
- Diag(InputExpr->getBeginLoc(), diag::err_asm_tying_incompatible_types)
+ targetDiag(InputExpr->getBeginLoc(), diag::err_asm_tying_incompatible_types)
<< InTy << OutTy << OutputExpr->getSourceRange()
<< InputExpr->getSourceRange();
- return StmtError();
+ return NS;
}
// Check for conflicts between clobber list and input or output lists
SourceLocation ConstraintLoc =
getClobberConflictLocation(Exprs, Constraints, Clobbers, NumClobbers,
+ NumLabels,
Context.getTargetInfo(), Context);
if (ConstraintLoc.isValid())
- return Diag(ConstraintLoc, diag::error_inoutput_conflict_with_clobber);
+ targetDiag(ConstraintLoc, diag::error_inoutput_conflict_with_clobber);
+ // Check for duplicate asm operand name between input, output and label lists.
+ typedef std::pair<StringRef , Expr *> NamedOperand;
+ SmallVector<NamedOperand, 4> NamedOperandList;
+ for (unsigned i = 0, e = NumOutputs + NumInputs + NumLabels; i != e; ++i)
+ if (Names[i])
+ NamedOperandList.emplace_back(
+ std::make_pair(Names[i]->getName(), Exprs[i]));
+ // Sort NamedOperandList.
+ std::stable_sort(NamedOperandList.begin(), NamedOperandList.end(),
+ [](const NamedOperand &LHS, const NamedOperand &RHS) {
+ return LHS.first < RHS.first;
+ });
+ // Find adjacent duplicate operand.
+ SmallVector<NamedOperand, 4>::iterator Found =
+ std::adjacent_find(begin(NamedOperandList), end(NamedOperandList),
+ [](const NamedOperand &LHS, const NamedOperand &RHS) {
+ return LHS.first == RHS.first;
+ });
+ if (Found != NamedOperandList.end()) {
+ Diag((Found + 1)->second->getBeginLoc(),
+ diag::error_duplicate_asm_operand_name)
+ << (Found + 1)->first;
+ Diag(Found->second->getBeginLoc(), diag::note_duplicate_asm_operand_name)
+ << Found->first;
+ return StmtError();
+ }
+ if (NS->isAsmGoto())
+ setFunctionHasBranchIntoScope();
return NS;
}
@@ -797,7 +849,7 @@ Sema::LookupInlineAsmVarDeclField(Expr *E, StringRef Member,
return CXXDependentScopeMemberExpr::Create(
Context, E, T, /*IsArrow=*/false, AsmLoc, NestedNameSpecifierLoc(),
SourceLocation(),
- /*FirstQualifierInScope=*/nullptr, NameInfo, /*TemplateArgs=*/nullptr);
+ /*FirstQualifierFoundInScope=*/nullptr, NameInfo, /*TemplateArgs=*/nullptr);
}
const RecordType *RT = T->getAs<RecordType>();
diff --git a/lib/Sema/SemaStmtAttr.cpp b/lib/Sema/SemaStmtAttr.cpp
index a8e54b36b29b..791c52c2d913 100644
--- a/lib/Sema/SemaStmtAttr.cpp
+++ b/lib/Sema/SemaStmtAttr.cpp
@@ -1,9 +1,8 @@
//===--- SemaStmtAttr.cpp - Statement Attribute Handling ------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -83,22 +82,17 @@ static Attr *handleLoopHintAttr(Sema &S, Stmt *St, const ParsedAttr &A,
IdentifierLoc *StateLoc = A.getArgAsIdent(2);
Expr *ValueExpr = A.getArgAsExpr(3);
- bool PragmaUnroll = PragmaNameLoc->Ident->getName() == "unroll";
- bool PragmaNoUnroll = PragmaNameLoc->Ident->getName() == "nounroll";
- bool PragmaUnrollAndJam = PragmaNameLoc->Ident->getName() == "unroll_and_jam";
- bool PragmaNoUnrollAndJam =
- PragmaNameLoc->Ident->getName() == "nounroll_and_jam";
+ StringRef PragmaName =
+ llvm::StringSwitch<StringRef>(PragmaNameLoc->Ident->getName())
+ .Cases("unroll", "nounroll", "unroll_and_jam", "nounroll_and_jam",
+ PragmaNameLoc->Ident->getName())
+ .Default("clang loop");
+
if (St->getStmtClass() != Stmt::DoStmtClass &&
St->getStmtClass() != Stmt::ForStmtClass &&
St->getStmtClass() != Stmt::CXXForRangeStmtClass &&
St->getStmtClass() != Stmt::WhileStmtClass) {
- const char *Pragma =
- llvm::StringSwitch<const char *>(PragmaNameLoc->Ident->getName())
- .Case("unroll", "#pragma unroll")
- .Case("nounroll", "#pragma nounroll")
- .Case("unroll_and_jam", "#pragma unroll_and_jam")
- .Case("nounroll_and_jam", "#pragma nounroll_and_jam")
- .Default("#pragma clang loop");
+ std::string Pragma = "#pragma " + std::string(PragmaName);
S.Diag(St->getBeginLoc(), diag::err_pragma_loop_precedes_nonloop) << Pragma;
return nullptr;
}
@@ -107,34 +101,29 @@ static Attr *handleLoopHintAttr(Sema &S, Stmt *St, const ParsedAttr &A,
LoopHintAttr::Spelling(A.getAttributeSpellingListIndex());
LoopHintAttr::OptionType Option;
LoopHintAttr::LoopHintState State;
- if (PragmaNoUnroll) {
- // #pragma nounroll
- Option = LoopHintAttr::Unroll;
- State = LoopHintAttr::Disable;
- } else if (PragmaUnroll) {
- if (ValueExpr) {
- // #pragma unroll N
- Option = LoopHintAttr::UnrollCount;
- State = LoopHintAttr::Numeric;
- } else {
- // #pragma unroll
- Option = LoopHintAttr::Unroll;
- State = LoopHintAttr::Enable;
- }
- } else if (PragmaNoUnrollAndJam) {
- // #pragma nounroll_and_jam
- Option = LoopHintAttr::UnrollAndJam;
- State = LoopHintAttr::Disable;
- } else if (PragmaUnrollAndJam) {
- if (ValueExpr) {
- // #pragma unroll_and_jam N
- Option = LoopHintAttr::UnrollAndJamCount;
- State = LoopHintAttr::Numeric;
- } else {
- // #pragma unroll_and_jam
- Option = LoopHintAttr::UnrollAndJam;
- State = LoopHintAttr::Enable;
- }
+
+ auto SetHints = [&Option, &State](LoopHintAttr::OptionType O,
+ LoopHintAttr::LoopHintState S) {
+ Option = O;
+ State = S;
+ };
+
+ if (PragmaName == "nounroll") {
+ SetHints(LoopHintAttr::Unroll, LoopHintAttr::Disable);
+ } else if (PragmaName == "unroll") {
+ // #pragma unroll N
+ if (ValueExpr)
+ SetHints(LoopHintAttr::UnrollCount, LoopHintAttr::Numeric);
+ else
+ SetHints(LoopHintAttr::Unroll, LoopHintAttr::Enable);
+ } else if (PragmaName == "nounroll_and_jam") {
+ SetHints(LoopHintAttr::UnrollAndJam, LoopHintAttr::Disable);
+ } else if (PragmaName == "unroll_and_jam") {
+ // #pragma unroll_and_jam N
+ if (ValueExpr)
+ SetHints(LoopHintAttr::UnrollAndJamCount, LoopHintAttr::Numeric);
+ else
+ SetHints(LoopHintAttr::UnrollAndJam, LoopHintAttr::Enable);
} else {
// #pragma clang loop ...
assert(OptionLoc && OptionLoc->Ident &&
diff --git a/lib/Sema/SemaTemplate.cpp b/lib/Sema/SemaTemplate.cpp
index 3f9dc989103f..3212281cc34d 100644
--- a/lib/Sema/SemaTemplate.cpp
+++ b/lib/Sema/SemaTemplate.cpp
@@ -1,9 +1,8 @@
//===------- SemaTemplate.cpp - Semantic Analysis for C++ Templates -------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//===----------------------------------------------------------------------===//
//
// This file implements semantic analysis for C++ templates.
@@ -67,17 +66,20 @@ static Expr *clang::formAssociatedConstraints(TemplateParameterList *Params,
/// Determine whether the declaration found is acceptable as the name
/// of a template and, if so, return that template declaration. Otherwise,
-/// returns NULL.
-static NamedDecl *isAcceptableTemplateName(ASTContext &Context,
- NamedDecl *Orig,
- bool AllowFunctionTemplates) {
- NamedDecl *D = Orig->getUnderlyingDecl();
+/// returns null.
+///
+/// Note that this may return an UnresolvedUsingValueDecl if AllowDependent
+/// is true. In all other cases it will return a TemplateDecl (or null).
+NamedDecl *Sema::getAsTemplateNameDecl(NamedDecl *D,
+ bool AllowFunctionTemplates,
+ bool AllowDependent) {
+ D = D->getUnderlyingDecl();
if (isa<TemplateDecl>(D)) {
if (!AllowFunctionTemplates && isa<FunctionTemplateDecl>(D))
return nullptr;
- return Orig;
+ return D;
}
if (CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(D)) {
@@ -108,55 +110,35 @@ static NamedDecl *isAcceptableTemplateName(ASTContext &Context,
// 'using Dependent::foo;' can resolve to a template name.
// 'using typename Dependent::foo;' cannot (not even if 'foo' is an
// injected-class-name).
- if (isa<UnresolvedUsingValueDecl>(D))
+ if (AllowDependent && isa<UnresolvedUsingValueDecl>(D))
return D;
return nullptr;
}
void Sema::FilterAcceptableTemplateNames(LookupResult &R,
- bool AllowFunctionTemplates) {
- // The set of class templates we've already seen.
- llvm::SmallPtrSet<ClassTemplateDecl *, 8> ClassTemplates;
+ bool AllowFunctionTemplates,
+ bool AllowDependent) {
LookupResult::Filter filter = R.makeFilter();
while (filter.hasNext()) {
NamedDecl *Orig = filter.next();
- NamedDecl *Repl = isAcceptableTemplateName(Context, Orig,
- AllowFunctionTemplates);
- if (!Repl)
+ if (!getAsTemplateNameDecl(Orig, AllowFunctionTemplates, AllowDependent))
filter.erase();
- else if (Repl != Orig) {
-
- // C++ [temp.local]p3:
- // A lookup that finds an injected-class-name (10.2) can result in an
- // ambiguity in certain cases (for example, if it is found in more than
- // one base class). If all of the injected-class-names that are found
- // refer to specializations of the same class template, and if the name
- // is used as a template-name, the reference refers to the class
- // template itself and not a specialization thereof, and is not
- // ambiguous.
- if (ClassTemplateDecl *ClassTmpl = dyn_cast<ClassTemplateDecl>(Repl))
- if (!ClassTemplates.insert(ClassTmpl).second) {
- filter.erase();
- continue;
- }
-
- // FIXME: we promote access to public here as a workaround to
- // the fact that LookupResult doesn't let us remember that we
- // found this template through a particular injected class name,
- // which means we end up doing nasty things to the invariants.
- // Pretending that access is public is *much* safer.
- filter.replace(Repl, AS_public);
- }
}
filter.done();
}
bool Sema::hasAnyAcceptableTemplateNames(LookupResult &R,
- bool AllowFunctionTemplates) {
- for (LookupResult::iterator I = R.begin(), IEnd = R.end(); I != IEnd; ++I)
- if (isAcceptableTemplateName(Context, *I, AllowFunctionTemplates))
+ bool AllowFunctionTemplates,
+ bool AllowDependent,
+ bool AllowNonTemplateFunctions) {
+ for (LookupResult::iterator I = R.begin(), IEnd = R.end(); I != IEnd; ++I) {
+ if (getAsTemplateNameDecl(*I, AllowFunctionTemplates, AllowDependent))
+ return true;
+ if (AllowNonTemplateFunctions &&
+ isa<FunctionDecl>((*I)->getUnderlyingDecl()))
return true;
+ }
return false;
}
@@ -194,25 +176,64 @@ TemplateNameKind Sema::isTemplateName(Scope *S,
QualType ObjectType = ObjectTypePtr.get();
+ AssumedTemplateKind AssumedTemplate;
LookupResult R(*this, TName, Name.getBeginLoc(), LookupOrdinaryName);
if (LookupTemplateName(R, S, SS, ObjectType, EnteringContext,
- MemberOfUnknownSpecialization))
+ MemberOfUnknownSpecialization, SourceLocation(),
+ &AssumedTemplate))
return TNK_Non_template;
- if (R.empty()) return TNK_Non_template;
- if (R.isAmbiguous()) {
- // Suppress diagnostics; we'll redo this lookup later.
- R.suppressDiagnostics();
- // FIXME: we might have ambiguous templates, in which case we
- // should at least parse them properly!
+ if (AssumedTemplate != AssumedTemplateKind::None) {
+ TemplateResult = TemplateTy::make(Context.getAssumedTemplateName(TName));
+ // Let the parser know whether we found nothing or found functions; if we
+ // found nothing, we want to more carefully check whether this is actually
+ // a function template name versus some other kind of undeclared identifier.
+ return AssumedTemplate == AssumedTemplateKind::FoundNothing
+ ? TNK_Undeclared_template
+ : TNK_Function_template;
+ }
+
+ if (R.empty())
return TNK_Non_template;
+
+ NamedDecl *D = nullptr;
+ if (R.isAmbiguous()) {
+ // If we got an ambiguity involving a non-function template, treat this
+ // as a template name, and pick an arbitrary template for error recovery.
+ bool AnyFunctionTemplates = false;
+ for (NamedDecl *FoundD : R) {
+ if (NamedDecl *FoundTemplate = getAsTemplateNameDecl(FoundD)) {
+ if (isa<FunctionTemplateDecl>(FoundTemplate))
+ AnyFunctionTemplates = true;
+ else {
+ D = FoundTemplate;
+ break;
+ }
+ }
+ }
+
+ // If we didn't find any templates at all, this isn't a template name.
+ // Leave the ambiguity for a later lookup to diagnose.
+ if (!D && !AnyFunctionTemplates) {
+ R.suppressDiagnostics();
+ return TNK_Non_template;
+ }
+
+ // If the only templates were function templates, filter out the rest.
+ // We'll diagnose the ambiguity later.
+ if (!D)
+ FilterAcceptableTemplateNames(R);
}
+ // At this point, we have either picked a single template name declaration D
+ // or we have a non-empty set of results R containing either one template name
+ // declaration or a set of function templates.
+
TemplateName Template;
TemplateNameKind TemplateKind;
unsigned ResultCount = R.end() - R.begin();
- if (ResultCount > 1) {
+ if (!D && ResultCount > 1) {
// We assume that we'll preserve the qualifier from a function
// template name in other ways.
Template = Context.getOverloadedTemplateName(R.begin(), R.end());
@@ -220,12 +241,19 @@ TemplateNameKind Sema::isTemplateName(Scope *S,
// We'll do this lookup again later.
R.suppressDiagnostics();
- } else if (isa<UnresolvedUsingValueDecl>((*R.begin())->getUnderlyingDecl())) {
- // We don't yet know whether this is a template-name or not.
- MemberOfUnknownSpecialization = true;
- return TNK_Non_template;
} else {
- TemplateDecl *TD = cast<TemplateDecl>((*R.begin())->getUnderlyingDecl());
+ if (!D) {
+ D = getAsTemplateNameDecl(*R.begin());
+ assert(D && "unambiguous result is not a template name");
+ }
+
+ if (isa<UnresolvedUsingValueDecl>(D)) {
+ // We don't yet know whether this is a template-name or not.
+ MemberOfUnknownSpecialization = true;
+ return TNK_Non_template;
+ }
+
+ TemplateDecl *TD = cast<TemplateDecl>(D);
if (SS.isSet() && !SS.isInvalid()) {
NestedNameSpecifier *Qualifier = SS.getScopeRep();
@@ -243,9 +271,11 @@ TemplateNameKind Sema::isTemplateName(Scope *S,
} else {
assert(isa<ClassTemplateDecl>(TD) || isa<TemplateTemplateParmDecl>(TD) ||
isa<TypeAliasTemplateDecl>(TD) || isa<VarTemplateDecl>(TD) ||
- isa<BuiltinTemplateDecl>(TD));
+ isa<BuiltinTemplateDecl>(TD) || isa<ConceptDecl>(TD));
TemplateKind =
- isa<VarTemplateDecl>(TD) ? TNK_Var_template : TNK_Type_template;
+ isa<VarTemplateDecl>(TD) ? TNK_Var_template :
+ isa<ConceptDecl>(TD) ? TNK_Concept_template :
+ TNK_Type_template;
}
}
@@ -316,7 +346,13 @@ bool Sema::LookupTemplateName(LookupResult &Found,
QualType ObjectType,
bool EnteringContext,
bool &MemberOfUnknownSpecialization,
- SourceLocation TemplateKWLoc) {
+ SourceLocation TemplateKWLoc,
+ AssumedTemplateKind *ATK) {
+ if (ATK)
+ *ATK = AssumedTemplateKind::None;
+
+ Found.setTemplateNameLookup(true);
+
// Determine where to perform name lookup
MemberOfUnknownSpecialization = false;
DeclContext *LookupCtx = nullptr;
@@ -391,24 +427,55 @@ bool Sema::LookupTemplateName(LookupResult &Found,
IsDependent |= Found.wasNotFoundInCurrentInstantiation();
}
+ if (Found.isAmbiguous())
+ return false;
+
+ if (ATK && !SS.isSet() && ObjectType.isNull() && TemplateKWLoc.isInvalid()) {
+ // C++2a [temp.names]p2:
+ // A name is also considered to refer to a template if it is an
+ // unqualified-id followed by a < and name lookup finds either one or more
+ // functions or finds nothing.
+ //
+ // To keep our behavior consistent, we apply the "finds nothing" part in
+ // all language modes, and diagnose the empty lookup in ActOnCallExpr if we
+ // successfully form a call to an undeclared template-id.
+ bool AllFunctions =
+ getLangOpts().CPlusPlus2a &&
+ std::all_of(Found.begin(), Found.end(), [](NamedDecl *ND) {
+ return isa<FunctionDecl>(ND->getUnderlyingDecl());
+ });
+ if (AllFunctions || (Found.empty() && !IsDependent)) {
+ // If lookup found any functions, or if this is a name that can only be
+ // used for a function, then strongly assume this is a function
+ // template-id.
+ *ATK = (Found.empty() && Found.getLookupName().isIdentifier())
+ ? AssumedTemplateKind::FoundNothing
+ : AssumedTemplateKind::FoundFunctions;
+ Found.clear();
+ return false;
+ }
+ }
+
if (Found.empty() && !IsDependent) {
// If we did not find any names, attempt to correct any typos.
DeclarationName Name = Found.getLookupName();
Found.clear();
// Simple filter callback that, for keywords, only accepts the C++ *_cast
- auto FilterCCC = llvm::make_unique<CorrectionCandidateCallback>();
- FilterCCC->WantTypeSpecifiers = false;
- FilterCCC->WantExpressionKeywords = false;
- FilterCCC->WantRemainingKeywords = false;
- FilterCCC->WantCXXNamedCasts = true;
- if (TypoCorrection Corrected = CorrectTypo(
- Found.getLookupNameInfo(), Found.getLookupKind(), S, &SS,
- std::move(FilterCCC), CTK_ErrorRecovery, LookupCtx)) {
- Found.setLookupName(Corrected.getCorrection());
+ DefaultFilterCCC FilterCCC{};
+ FilterCCC.WantTypeSpecifiers = false;
+ FilterCCC.WantExpressionKeywords = false;
+ FilterCCC.WantRemainingKeywords = false;
+ FilterCCC.WantCXXNamedCasts = true;
+ if (TypoCorrection Corrected =
+ CorrectTypo(Found.getLookupNameInfo(), Found.getLookupKind(), S,
+ &SS, FilterCCC, CTK_ErrorRecovery, LookupCtx)) {
if (auto *ND = Corrected.getFoundDecl())
Found.addDecl(ND);
FilterAcceptableTemplateNames(Found);
- if (!Found.empty()) {
+ if (Found.isAmbiguous()) {
+ Found.clear();
+ } else if (!Found.empty()) {
+ Found.setLookupName(Corrected.getCorrection());
if (LookupCtx) {
std::string CorrectedStr(Corrected.getAsString(getLangOpts()));
bool DroppedSpecifier = Corrected.WillReplaceSpecifier() &&
@@ -420,8 +487,6 @@ bool Sema::LookupTemplateName(LookupResult &Found,
diagnoseTypo(Corrected, PDiag(diag::err_no_template_suggest) << Name);
}
}
- } else {
- Found.setLookupName(Name);
}
}
@@ -458,14 +523,19 @@ bool Sema::LookupTemplateName(LookupResult &Found,
// Note: C++11 does not perform this second lookup.
LookupResult FoundOuter(*this, Found.getLookupName(), Found.getNameLoc(),
LookupOrdinaryName);
+ FoundOuter.setTemplateNameLookup(true);
LookupName(FoundOuter, S);
+ // FIXME: We silently accept an ambiguous lookup here, in violation of
+ // [basic.lookup]/1.
FilterAcceptableTemplateNames(FoundOuter, /*AllowFunctionTemplates=*/false);
+ NamedDecl *OuterTemplate;
if (FoundOuter.empty()) {
// - if the name is not found, the name found in the class of the
// object expression is used, otherwise
- } else if (!FoundOuter.getAsSingle<ClassTemplateDecl>() ||
- FoundOuter.isAmbiguous()) {
+ } else if (FoundOuter.isAmbiguous() || !FoundOuter.isSingleResult() ||
+ !(OuterTemplate =
+ getAsTemplateNameDecl(FoundOuter.getFoundDecl()))) {
// - if the name is found in the context of the entire
// postfix-expression and does not name a class template, the name
// found in the class of the object expression is used, otherwise
@@ -475,8 +545,8 @@ bool Sema::LookupTemplateName(LookupResult &Found,
// entity as the one found in the class of the object expression,
// otherwise the program is ill-formed.
if (!Found.isSingleResult() ||
- Found.getFoundDecl()->getCanonicalDecl()
- != FoundOuter.getFoundDecl()->getCanonicalDecl()) {
+ getAsTemplateNameDecl(Found.getFoundDecl())->getCanonicalDecl() !=
+ OuterTemplate->getCanonicalDecl()) {
Diag(Found.getNameLoc(),
diag::ext_nested_name_member_ref_lookup_ambiguous)
<< Found.getLookupName()
@@ -546,7 +616,8 @@ void Sema::diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName,
// Try to correct the name by looking for templates and C++ named casts.
struct TemplateCandidateFilter : CorrectionCandidateCallback {
- TemplateCandidateFilter() {
+ Sema &S;
+ TemplateCandidateFilter(Sema &S) : S(S) {
WantTypeSpecifiers = false;
WantExpressionKeywords = false;
WantRemainingKeywords = false;
@@ -554,20 +625,22 @@ void Sema::diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName,
};
bool ValidateCandidate(const TypoCorrection &Candidate) override {
if (auto *ND = Candidate.getCorrectionDecl())
- return isAcceptableTemplateName(ND->getASTContext(), ND, true);
+ return S.getAsTemplateNameDecl(ND);
return Candidate.isKeyword();
}
+
+ std::unique_ptr<CorrectionCandidateCallback> clone() override {
+ return llvm::make_unique<TemplateCandidateFilter>(*this);
+ }
};
DeclarationName Name = NameInfo.getName();
- if (TypoCorrection Corrected =
- CorrectTypo(NameInfo, LookupKind, S, &SS,
- llvm::make_unique<TemplateCandidateFilter>(),
- CTK_ErrorRecovery, LookupCtx)) {
+ TemplateCandidateFilter CCC(*this);
+ if (TypoCorrection Corrected = CorrectTypo(NameInfo, LookupKind, S, &SS, CCC,
+ CTK_ErrorRecovery, LookupCtx)) {
auto *ND = Corrected.getFoundDecl();
if (ND)
- ND = isAcceptableTemplateName(Context, ND,
- /*AllowFunctionTemplates*/ true);
+ ND = getAsTemplateNameDecl(ND);
if (ND || Corrected.isKeyword()) {
if (LookupCtx) {
std::string CorrectedStr(Corrected.getAsString(getLangOpts()));
@@ -1076,7 +1149,7 @@ NamedDecl *Sema::ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
// variable or variable template or the declaration of a function or
// function template.
- if (DS.isConstexprSpecified())
+ if (DS.hasConstexprSpecifier())
EmitDiag(DS.getConstexprSpecLoc());
// [dcl.fct.spec]p1:
@@ -1085,7 +1158,7 @@ NamedDecl *Sema::ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
if (DS.isVirtualSpecified())
EmitDiag(DS.getVirtualSpecLoc());
- if (DS.isExplicitSpecified())
+ if (DS.hasExplicitSpecifier())
EmitDiag(DS.getExplicitSpecLoc());
if (DS.isNoreturnSpecified())
@@ -1110,6 +1183,8 @@ NamedDecl *Sema::ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
Invalid = true;
}
+ CheckFunctionOrTemplateParamDeclarator(S, D);
+
IdentifierInfo *ParamName = D.getIdentifier();
bool IsParameterPack = D.hasEllipsis();
NonTypeTemplateParmDecl *Param = NonTypeTemplateParmDecl::Create(
@@ -1765,8 +1840,8 @@ struct ConvertConstructorToDeductionGuideTransform {
return nullptr;
TypeSourceInfo *NewTInfo = TLB.getTypeSourceInfo(SemaRef.Context, NewType);
- return buildDeductionGuide(TemplateParams, CD->isExplicit(), NewTInfo,
- CD->getBeginLoc(), CD->getLocation(),
+ return buildDeductionGuide(TemplateParams, CD->getExplicitSpecifier(),
+ NewTInfo, CD->getBeginLoc(), CD->getLocation(),
CD->getEndLoc());
}
@@ -1795,8 +1870,8 @@ struct ConvertConstructorToDeductionGuideTransform {
Params.push_back(NewParam);
}
- return buildDeductionGuide(Template->getTemplateParameters(), false, TSI,
- Loc, Loc, Loc);
+ return buildDeductionGuide(Template->getTemplateParameters(),
+ ExplicitSpecifier(), TSI, Loc, Loc, Loc);
}
private:
@@ -1946,7 +2021,7 @@ private:
}
NamedDecl *buildDeductionGuide(TemplateParameterList *TemplateParams,
- bool Explicit, TypeSourceInfo *TInfo,
+ ExplicitSpecifier ES, TypeSourceInfo *TInfo,
SourceLocation LocStart, SourceLocation Loc,
SourceLocation LocEnd) {
DeclarationNameInfo Name(DeductionGuideName, Loc);
@@ -1955,8 +2030,8 @@ private:
// Build the implicit deduction guide template.
auto *Guide =
- CXXDeductionGuideDecl::Create(SemaRef.Context, DC, LocStart, Explicit,
- Name, TInfo->getType(), TInfo, LocEnd);
+ CXXDeductionGuideDecl::Create(SemaRef.Context, DC, LocStart, ES, Name,
+ TInfo->getType(), TInfo, LocEnd);
Guide->setImplicit();
Guide->setParams(Params);
@@ -1981,6 +2056,12 @@ private:
void Sema::DeclareImplicitDeductionGuides(TemplateDecl *Template,
SourceLocation Loc) {
+ if (CXXRecordDecl *DefRecord =
+ cast<CXXRecordDecl>(Template->getTemplatedDecl())->getDefinition()) {
+ TemplateDecl *DescribedTemplate = DefRecord->getDescribedClassTemplate();
+ Template = DescribedTemplate ? DescribedTemplate : Template;
+ }
+
DeclContext *DC = Template->getDeclContext();
if (DC->isDependentContext())
return;
@@ -3148,7 +3229,8 @@ QualType Sema::CheckTemplateIdType(TemplateName Name,
TemplateDecl *Template = Name.getAsTemplateDecl();
if (!Template || isa<FunctionTemplateDecl>(Template) ||
- isa<VarTemplateDecl>(Template)) {
+ isa<VarTemplateDecl>(Template) ||
+ isa<ConceptDecl>(Template)) {
// We might have a substituted template template parameter pack. If so,
// build a template specialization type for it.
if (Name.getAsSubstTemplateTemplateParmPack())
@@ -3324,14 +3406,65 @@ QualType Sema::CheckTemplateIdType(TemplateName Name,
return Context.getTemplateSpecializationType(Name, TemplateArgs, CanonType);
}
-TypeResult
-Sema::ActOnTemplateIdType(CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
- TemplateTy TemplateD, IdentifierInfo *TemplateII,
- SourceLocation TemplateIILoc,
- SourceLocation LAngleLoc,
- ASTTemplateArgsPtr TemplateArgsIn,
- SourceLocation RAngleLoc,
- bool IsCtorOrDtorName, bool IsClassName) {
+void Sema::ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &ParsedName,
+ TemplateNameKind &TNK,
+ SourceLocation NameLoc,
+ IdentifierInfo *&II) {
+ assert(TNK == TNK_Undeclared_template && "not an undeclared template name");
+
+ TemplateName Name = ParsedName.get();
+ auto *ATN = Name.getAsAssumedTemplateName();
+ assert(ATN && "not an assumed template name");
+ II = ATN->getDeclName().getAsIdentifierInfo();
+
+ if (!resolveAssumedTemplateNameAsType(S, Name, NameLoc, /*Diagnose*/false)) {
+ // Resolved to a type template name.
+ ParsedName = TemplateTy::make(Name);
+ TNK = TNK_Type_template;
+ }
+}
+
+bool Sema::resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name,
+ SourceLocation NameLoc,
+ bool Diagnose) {
+ // We assumed this undeclared identifier to be an (ADL-only) function
+ // template name, but it was used in a context where a type was required.
+ // Try to typo-correct it now.
+ AssumedTemplateStorage *ATN = Name.getAsAssumedTemplateName();
+ assert(ATN && "not an assumed template name");
+
+ LookupResult R(*this, ATN->getDeclName(), NameLoc, LookupOrdinaryName);
+ struct CandidateCallback : CorrectionCandidateCallback {
+ bool ValidateCandidate(const TypoCorrection &TC) override {
+ return TC.getCorrectionDecl() &&
+ getAsTypeTemplateDecl(TC.getCorrectionDecl());
+ }
+ std::unique_ptr<CorrectionCandidateCallback> clone() override {
+ return llvm::make_unique<CandidateCallback>(*this);
+ }
+ } FilterCCC;
+
+ TypoCorrection Corrected =
+ CorrectTypo(R.getLookupNameInfo(), R.getLookupKind(), S, nullptr,
+ FilterCCC, CTK_ErrorRecovery);
+ if (Corrected && Corrected.getFoundDecl()) {
+ diagnoseTypo(Corrected, PDiag(diag::err_no_template_suggest)
+ << ATN->getDeclName());
+ Name = TemplateName(Corrected.getCorrectionDeclAs<TemplateDecl>());
+ return false;
+ }
+
+ if (Diagnose)
+ Diag(R.getNameLoc(), diag::err_no_template) << R.getLookupName();
+ return true;
+}
+
+TypeResult Sema::ActOnTemplateIdType(
+ Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
+ TemplateTy TemplateD, IdentifierInfo *TemplateII,
+ SourceLocation TemplateIILoc, SourceLocation LAngleLoc,
+ ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc,
+ bool IsCtorOrDtorName, bool IsClassName) {
if (SS.isInvalid())
return true;
@@ -3372,6 +3505,9 @@ Sema::ActOnTemplateIdType(CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
}
TemplateName Template = TemplateD.get();
+ if (Template.getAsAssumedTemplateName() &&
+ resolveAssumedTemplateNameAsType(S, Template, TemplateIILoc))
+ return true;
// Translate the parser's template argument list in our AST format.
TemplateArgumentListInfo TemplateArgs(LAngleLoc, RAngleLoc);
@@ -3903,13 +4039,6 @@ DeclResult Sema::ActOnVarTemplateSpecialization(
Specialization->setAccess(VarTemplate->getAccess());
}
- // Link instantiations of static data members back to the template from
- // which they were instantiated.
- if (Specialization->isStaticDataMember())
- Specialization->setInstantiationOfStaticDataMember(
- VarTemplate->getTemplatedDecl(),
- Specialization->getSpecializationKind());
-
return Specialization;
}
@@ -4108,6 +4237,18 @@ void Sema::diagnoseMissingTemplateArguments(TemplateName Name,
}
}
+ExprResult
+Sema::CheckConceptTemplateId(const CXXScopeSpec &SS,
+ const DeclarationNameInfo &NameInfo,
+ ConceptDecl *Template,
+ SourceLocation TemplateLoc,
+ const TemplateArgumentListInfo *TemplateArgs) {
+ // TODO: Do concept specialization here.
+ Diag(NameInfo.getBeginLoc(), diag::err_concept_not_implemented) <<
+ "concept specialization";
+ return ExprError();
+}
+
ExprResult Sema::BuildTemplateIdExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
@@ -4124,7 +4265,6 @@ ExprResult Sema::BuildTemplateIdExpr(const CXXScopeSpec &SS,
// vs template<class T, class U> void f(U);
// These should be filtered out by our callers.
- assert(!R.empty() && "empty lookup results when building templateid");
assert(!R.isAmbiguous() && "ambiguous lookup when building templateid");
// Non-function templates require a template argument list.
@@ -4149,6 +4289,12 @@ ExprResult Sema::BuildTemplateIdExpr(const CXXScopeSpec &SS,
TemplateKWLoc, TemplateArgs);
}
+ if (R.getAsSingle<ConceptDecl>() && !AnyDependentArguments()) {
+ return CheckConceptTemplateId(SS, R.getLookupNameInfo(),
+ R.getAsSingle<ConceptDecl>(),
+ TemplateKWLoc, TemplateArgs);
+ }
+
// We don't want lookup warnings at this point.
R.suppressDiagnostics();
@@ -4263,7 +4409,7 @@ TemplateNameKind Sema::ActOnDependentTemplateName(Scope *S,
LookupOrdinaryName);
bool MOUS;
if (!LookupTemplateName(R, S, SS, ObjectType.get(), EnteringContext,
- MOUS, TemplateKWLoc))
+ MOUS, TemplateKWLoc) && !R.isAmbiguous())
Diag(Name.getBeginLoc(), diag::err_no_member)
<< DNI.getName() << LookupCtx << SS.getRange();
return TNK_Non_template;
@@ -4763,10 +4909,22 @@ bool Sema::CheckTemplateArgument(NamedDecl *Param,
TemplateArgumentList TemplateArgs(TemplateArgumentList::OnStack,
Converted);
- NTTPType = SubstType(NTTPType,
- MultiLevelTemplateArgumentList(TemplateArgs),
- NTTP->getLocation(),
- NTTP->getDeclName());
+
+ // If the parameter is a pack expansion, expand this slice of the pack.
+ if (auto *PET = NTTPType->getAs<PackExpansionType>()) {
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(*this,
+ ArgumentPackIndex);
+ NTTPType = SubstType(PET->getPattern(),
+ MultiLevelTemplateArgumentList(TemplateArgs),
+ NTTP->getLocation(),
+ NTTP->getDeclName());
+ } else {
+ NTTPType = SubstType(NTTPType,
+ MultiLevelTemplateArgumentList(TemplateArgs),
+ NTTP->getLocation(),
+ NTTP->getDeclName());
+ }
+
// If that worked, check the non-type template parameter type
// for validity.
if (!NTTPType.isNull())
@@ -6185,12 +6343,13 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
// When checking a deduced template argument, deduce from its type even if
// the type is dependent, in order to check the types of non-type template
// arguments line up properly in partial ordering.
- Optional<unsigned> Depth;
- if (CTAK != CTAK_Specified)
- Depth = Param->getDepth() + 1;
+ Optional<unsigned> Depth = Param->getDepth() + 1;
+ Expr *DeductionArg = Arg;
+ if (auto *PE = dyn_cast<PackExpansionExpr>(DeductionArg))
+ DeductionArg = PE->getPattern();
if (DeduceAutoType(
Context.getTrivialTypeSourceInfo(ParamType, Param->getLocation()),
- Arg, ParamType, Depth) == DAR_Failed) {
+ DeductionArg, ParamType, Depth) == DAR_Failed) {
Diag(Arg->getExprLoc(),
diag::err_non_type_template_parm_type_deduction_failure)
<< Param->getDeclName() << Param->getType() << Arg->getType()
@@ -6242,9 +6401,24 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
// If either the parameter has a dependent type or the argument is
// type-dependent, there's nothing we can check now.
if (ParamType->isDependentType() || Arg->isTypeDependent()) {
- // FIXME: Produce a cloned, canonical expression?
- Converted = TemplateArgument(Arg);
- return Arg;
+ // Force the argument to the type of the parameter to maintain invariants.
+ auto *PE = dyn_cast<PackExpansionExpr>(Arg);
+ if (PE)
+ Arg = PE->getPattern();
+ ExprResult E = ImpCastExprToType(
+ Arg, ParamType.getNonLValueExprType(Context), CK_Dependent,
+ ParamType->isLValueReferenceType() ? VK_LValue :
+ ParamType->isRValueReferenceType() ? VK_XValue : VK_RValue);
+ if (E.isInvalid())
+ return ExprError();
+ if (PE) {
+ // Recreate a pack expansion if we unwrapped one.
+ E = new (Context)
+ PackExpansionExpr(E.get()->getType(), E.get(), PE->getEllipsisLoc(),
+ PE->getNumExpansions());
+ }
+ Converted = TemplateArgument(E.get());
+ return E;
}
// The initialization of the parameter from the argument is
@@ -6273,10 +6447,13 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
// Convert the APValue to a TemplateArgument.
switch (Value.getKind()) {
- case APValue::Uninitialized:
+ case APValue::None:
assert(ParamType->isNullPtrType());
Converted = TemplateArgument(CanonParamType, /*isNullPtr*/true);
break;
+ case APValue::Indeterminate:
+ llvm_unreachable("result of constant evaluation should be initialized");
+ break;
case APValue::Int:
assert(ParamType->isIntegralOrEnumerationType());
Converted = TemplateArgument(Context, Value.getInt(), CanonParamType);
@@ -6307,21 +6484,22 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
// -- a string literal
// -- the result of a typeid expression, or
// -- a predefined __func__ variable
- if (auto *E = Value.getLValueBase().dyn_cast<const Expr*>()) {
- if (isa<CXXUuidofExpr>(E)) {
- Converted = TemplateArgument(ArgResult.get());
+ APValue::LValueBase Base = Value.getLValueBase();
+ auto *VD = const_cast<ValueDecl *>(Base.dyn_cast<const ValueDecl *>());
+ if (Base && !VD) {
+ auto *E = Base.dyn_cast<const Expr *>();
+ if (E && isa<CXXUuidofExpr>(E)) {
+ Converted = TemplateArgument(ArgResult.get()->IgnoreImpCasts());
break;
}
Diag(Arg->getBeginLoc(), diag::err_template_arg_not_decl_ref)
<< Arg->getSourceRange();
return ExprError();
}
- auto *VD = const_cast<ValueDecl *>(
- Value.getLValueBase().dyn_cast<const ValueDecl *>());
// -- a subobject
if (Value.hasLValuePath() && Value.getLValuePath().size() == 1 &&
VD && VD->getType()->isArrayType() &&
- Value.getLValuePath()[0].ArrayIndex == 0 &&
+ Value.getLValuePath()[0].getAsArrayIndex() == 0 &&
!Value.isLValueOnePastTheEnd() && ParamType->isPointerType()) {
// Per defect report (no number yet):
// ... other than a pointer to the first element of a complete array
@@ -6342,6 +6520,7 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
}
case APValue::AddrLabelDiff:
return Diag(StartLoc, diag::err_non_type_template_arg_addr_label_diff);
+ case APValue::FixedPoint:
case APValue::Float:
case APValue::ComplexInt:
case APValue::ComplexFloat:
@@ -7816,7 +7995,74 @@ Decl *Sema::ActOnTemplateDeclarator(Scope *S,
return NewDecl;
}
-/// Strips various properties off an implicit instantiation
+Decl *Sema::ActOnConceptDefinition(Scope *S,
+ MultiTemplateParamsArg TemplateParameterLists,
+ IdentifierInfo *Name, SourceLocation NameLoc,
+ Expr *ConstraintExpr) {
+ DeclContext *DC = CurContext;
+
+ if (!DC->getRedeclContext()->isFileContext()) {
+ Diag(NameLoc,
+ diag::err_concept_decls_may_only_appear_in_global_namespace_scope);
+ return nullptr;
+ }
+
+ if (TemplateParameterLists.size() > 1) {
+ Diag(NameLoc, diag::err_concept_extra_headers);
+ return nullptr;
+ }
+
+ if (TemplateParameterLists.front()->size() == 0) {
+ Diag(NameLoc, diag::err_concept_no_parameters);
+ return nullptr;
+ }
+
+ ConceptDecl *NewDecl = ConceptDecl::Create(Context, DC, NameLoc, Name,
+ TemplateParameterLists.front(),
+ ConstraintExpr);
+
+ if (!ConstraintExpr->isTypeDependent() &&
+ ConstraintExpr->getType() != Context.BoolTy) {
+ // C++2a [temp.constr.atomic]p3:
+ // E shall be a constant expression of type bool.
+ // TODO: Do this check for individual atomic constraints
+ // and not the constraint expression. Probably should do it in
+ // ParseConstraintExpression.
+ Diag(ConstraintExpr->getSourceRange().getBegin(),
+ diag::err_concept_initialized_with_non_bool_type)
+ << ConstraintExpr->getType();
+ NewDecl->setInvalidDecl();
+ }
+
+ if (NewDecl->getAssociatedConstraints()) {
+ // C++2a [temp.concept]p4:
+ // A concept shall not have associated constraints.
+ // TODO: Make a test once we have actual associated constraints.
+ Diag(NameLoc, diag::err_concept_no_associated_constraints);
+ NewDecl->setInvalidDecl();
+ }
+
+ // Check for conflicting previous declaration.
+ DeclarationNameInfo NameInfo(NewDecl->getDeclName(), NameLoc);
+ LookupResult Previous(*this, NameInfo, LookupOrdinaryName,
+ ForVisibleRedeclaration);
+ LookupName(Previous, S);
+
+ FilterLookupForScope(Previous, DC, S, /*ConsiderLinkage=*/false,
+ /*AllowInlineNamespace*/false);
+ if (!Previous.empty()) {
+ auto *Old = Previous.getRepresentativeDecl();
+ Diag(NameLoc, isa<ConceptDecl>(Old) ? diag::err_redefinition :
+ diag::err_redefinition_different_kind) << NewDecl->getDeclName();
+ Diag(Old->getLocation(), diag::note_previous_definition);
+ }
+
+ ActOnDocumentableDecl(NewDecl);
+ PushOnScopeChains(NewDecl, S);
+ return NewDecl;
+}
+
+/// \brief Strips various properties off an implicit instantiation
/// that has just been explicitly specialized.
static void StripImplicitInstantiation(NamedDecl *D) {
D->dropAttr<DLLImportAttr>();
@@ -8182,8 +8428,8 @@ bool Sema::CheckFunctionTemplateSpecialization(
// here that have a different target.
if (LangOpts.CUDA &&
IdentifyCUDATarget(Specialization,
- /* IgnoreImplicitHDAttributes = */ true) !=
- IdentifyCUDATarget(FD, /* IgnoreImplicitHDAttributes = */ true)) {
+ /* IgnoreImplicitHDAttr = */ true) !=
+ IdentifyCUDATarget(FD, /* IgnoreImplicitHDAttr = */ true)) {
FailedCandidates.addCandidate().set(
I.getPair(), FunTmpl->getTemplatedDecl(),
MakeDeductionFailureInfo(Context, TDK_CUDATargetMismatch, Info));
@@ -8243,7 +8489,7 @@ bool Sema::CheckFunctionTemplateSpecialization(
// FIXME: We need an update record for this AST mutation.
// FIXME: What if there are multiple such prior declarations (for instance,
// from different modules)?
- Specialization->setConstexpr(FD->isConstexpr());
+ Specialization->setConstexprKind(FD->getConstexprKind());
}
// FIXME: Check if the prior specialization has a point of instantiation.
@@ -8594,6 +8840,29 @@ static bool CheckExplicitInstantiationScope(Sema &S, NamedDecl *D,
return false;
}
+/// Common checks for whether an explicit instantiation of \p D is valid.
+static bool CheckExplicitInstantiation(Sema &S, NamedDecl *D,
+ SourceLocation InstLoc,
+ bool WasQualifiedName,
+ TemplateSpecializationKind TSK) {
+ // C++ [temp.explicit]p13:
+ // An explicit instantiation declaration shall not name a specialization of
+ // a template with internal linkage.
+ if (TSK == TSK_ExplicitInstantiationDeclaration &&
+ D->getFormalLinkage() == InternalLinkage) {
+ S.Diag(InstLoc, diag::err_explicit_instantiation_internal_linkage) << D;
+ return true;
+ }
+
+ // C++11 [temp.explicit]p3: [DR 275]
+ // An explicit instantiation shall appear in an enclosing namespace of its
+ // template.
+ if (CheckExplicitInstantiationScope(S, D, InstLoc, WasQualifiedName))
+ return true;
+
+ return false;
+}
+
/// Determine whether the given scope specifier has a template-id in it.
static bool ScopeSpecifierHasTemplateId(const CXXScopeSpec &SS) {
if (!SS.isSet())
@@ -8684,8 +8953,10 @@ DeclResult Sema::ActOnExplicitInstantiation(
? TSK_ExplicitInstantiationDefinition
: TSK_ExplicitInstantiationDeclaration;
- if (TSK == TSK_ExplicitInstantiationDeclaration) {
- // Check for dllexport class template instantiation declarations.
+ if (TSK == TSK_ExplicitInstantiationDeclaration &&
+ !Context.getTargetInfo().getTriple().isWindowsGNUEnvironment()) {
+ // Check for dllexport class template instantiation declarations,
+ // except for MinGW mode.
for (const ParsedAttr &AL : Attr) {
if (AL.getKind() == ParsedAttr::AT_DLLExport) {
Diag(ExternLoc,
@@ -8745,13 +9016,21 @@ DeclResult Sema::ActOnExplicitInstantiation(
TemplateSpecializationKind PrevDecl_TSK
= PrevDecl ? PrevDecl->getTemplateSpecializationKind() : TSK_Undeclared;
- // C++0x [temp.explicit]p2:
- // [...] An explicit instantiation shall appear in an enclosing
- // namespace of its template. [...]
- //
- // This is C++ DR 275.
- if (CheckExplicitInstantiationScope(*this, ClassTemplate, TemplateNameLoc,
- SS.isSet()))
+ if (TSK == TSK_ExplicitInstantiationDefinition && PrevDecl != nullptr &&
+ Context.getTargetInfo().getTriple().isWindowsGNUEnvironment()) {
+ // Check for dllexport class template instantiation definitions in MinGW
+ // mode, if a previous declaration of the instantiation was seen.
+ for (const ParsedAttr &AL : Attr) {
+ if (AL.getKind() == ParsedAttr::AT_DLLExport) {
+ Diag(AL.getLoc(),
+ diag::warn_attribute_dllexport_explicit_instantiation_def);
+ break;
+ }
+ }
+ }
+
+ if (CheckExplicitInstantiation(*this, ClassTemplate, TemplateNameLoc,
+ SS.isSet(), TSK))
return true;
ClassTemplateSpecializationDecl *Specialization = nullptr;
@@ -8906,6 +9185,14 @@ DeclResult Sema::ActOnExplicitInstantiation(
dllExportImportClassTemplateSpecialization(*this, Def);
}
+ // In MinGW mode, export the template instantiation if the declaration
+ // was marked dllexport.
+ if (PrevDecl_TSK == TSK_ExplicitInstantiationDeclaration &&
+ Context.getTargetInfo().getTriple().isWindowsGNUEnvironment() &&
+ PrevDecl->hasAttr<DLLExportAttr>()) {
+ dllExportImportClassTemplateSpecialization(*this, Def);
+ }
+
// Set the template specialization kind. Make sure it is set before
// instantiating the members which will trigger ASTConsumer callbacks.
Specialization->setTemplateSpecializationKind(TSK);
@@ -8974,12 +9261,7 @@ Sema::ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc,
= ExternLoc.isInvalid()? TSK_ExplicitInstantiationDefinition
: TSK_ExplicitInstantiationDeclaration;
- // C++0x [temp.explicit]p2:
- // [...] An explicit instantiation shall appear in an enclosing
- // namespace of its template. [...]
- //
- // This is C++ DR 275.
- CheckExplicitInstantiationScope(*this, Record, NameLoc, true);
+ CheckExplicitInstantiation(*this, Record, NameLoc, true, TSK);
// Verify that it is okay to explicitly instantiate here.
CXXRecordDecl *PrevDecl
@@ -9096,7 +9378,7 @@ DeclResult Sema::ActOnExplicitInstantiation(Scope *S,
diag::err_explicit_instantiation_inline :
diag::warn_explicit_instantiation_inline_0x)
<< FixItHint::CreateRemoval(D.getDeclSpec().getInlineSpecLoc());
- if (D.getDeclSpec().isConstexprSpecified() && R->isFunctionType())
+ if (D.getDeclSpec().hasConstexprSpecifier() && R->isFunctionType())
// FIXME: Add a fix-it to remove the 'constexpr' and add a 'const' if one is
// not already specified.
Diag(D.getDeclSpec().getConstexprSpecLoc(),
@@ -9137,7 +9419,7 @@ DeclResult Sema::ActOnExplicitInstantiation(Scope *S,
if (!PrevTemplate) {
if (!Prev || !Prev->isStaticDataMember()) {
- // We expect to see a data data member here.
+ // We expect to see a static data member here.
Diag(D.getIdentifierLoc(), diag::err_explicit_instantiation_not_known)
<< Name;
for (LookupResult::iterator P = Previous.begin(), PEnd = Previous.end();
@@ -9210,8 +9492,7 @@ DeclResult Sema::ActOnExplicitInstantiation(Scope *S,
diag::ext_explicit_instantiation_without_qualified_id)
<< Prev << D.getCXXScopeSpec().getRange();
- // Check the scope of this explicit instantiation.
- CheckExplicitInstantiationScope(*this, Prev, D.getIdentifierLoc(), true);
+ CheckExplicitInstantiation(*this, Prev, D.getIdentifierLoc(), true, TSK);
// Verify that it is okay to explicitly instantiate here.
TemplateSpecializationKind PrevTSK = Prev->getTemplateSpecializationKind();
@@ -9306,7 +9587,7 @@ DeclResult Sema::ActOnExplicitInstantiation(Scope *S,
// have a different target.
if (LangOpts.CUDA &&
IdentifyCUDATarget(Specialization,
- /* IgnoreImplicitHDAttributes = */ true) !=
+ /* IgnoreImplicitHDAttr = */ true) !=
IdentifyCUDATarget(D.getDeclSpec().getAttributes())) {
FailedCandidates.addCandidate().set(
P.getPair(), FunTmpl->getTemplatedDecl(),
@@ -9386,6 +9667,20 @@ DeclResult Sema::ActOnExplicitInstantiation(Scope *S,
return (Decl*) nullptr;
}
+ // HACK: libc++ has a bug where it attempts to explicitly instantiate the
+ // functions
+ // valarray<size_t>::valarray(size_t) and
+ // valarray<size_t>::~valarray()
+ // that it declared to have internal linkage with the internal_linkage
+ // attribute. Ignore the explicit instantiation declaration in this case.
+ if (Specialization->hasAttr<InternalLinkageAttr>() &&
+ TSK == TSK_ExplicitInstantiationDeclaration) {
+ if (auto *RD = dyn_cast<CXXRecordDecl>(Specialization->getDeclContext()))
+ if (RD->getIdentifier() && RD->getIdentifier()->isStr("valarray") &&
+ RD->isInStdNamespace())
+ return (Decl*) nullptr;
+ }
+
ProcessDeclAttributeList(S, Specialization, D.getDeclSpec().getAttributes());
// In MSVC mode, dllimported explicit instantiation definitions are treated as
@@ -9419,11 +9714,11 @@ DeclResult Sema::ActOnExplicitInstantiation(Scope *S,
diag::ext_explicit_instantiation_without_qualified_id)
<< Specialization << D.getCXXScopeSpec().getRange();
- CheckExplicitInstantiationScope(*this,
- FunTmpl? (NamedDecl *)FunTmpl
- : Specialization->getInstantiatedFromMemberFunction(),
- D.getIdentifierLoc(),
- D.getCXXScopeSpec().isSet());
+ CheckExplicitInstantiation(
+ *this,
+ FunTmpl ? (NamedDecl *)FunTmpl
+ : Specialization->getInstantiatedFromMemberFunction(),
+ D.getIdentifierLoc(), D.getCXXScopeSpec().isSet(), TSK);
// FIXME: Create some kind of ExplicitInstantiationDecl here.
return (Decl*) nullptr;
diff --git a/lib/Sema/SemaTemplateDeduction.cpp b/lib/Sema/SemaTemplateDeduction.cpp
index f2f989ce1241..b55a232d26c2 100644
--- a/lib/Sema/SemaTemplateDeduction.cpp
+++ b/lib/Sema/SemaTemplateDeduction.cpp
@@ -1,9 +1,8 @@
//===- SemaTemplateDeduction.cpp - Template Argument Deduction ------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -1671,8 +1670,8 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
const FunctionProtoType *FunctionProtoParam =
cast<FunctionProtoType>(Param);
- if (FunctionProtoParam->getTypeQuals()
- != FunctionProtoArg->getTypeQuals() ||
+ if (FunctionProtoParam->getMethodQuals()
+ != FunctionProtoArg->getMethodQuals() ||
FunctionProtoParam->getRefQualifier()
!= FunctionProtoArg->getRefQualifier() ||
FunctionProtoParam->isVariadic() != FunctionProtoArg->isVariadic())
@@ -2873,7 +2872,7 @@ Sema::DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
return Sema::TDK_SubstitutionFailure;
return ::FinishTemplateArgumentDeduction(
- *this, Partial, /*PartialOrdering=*/false, TemplateArgs, Deduced, Info);
+ *this, Partial, /*IsPartialOrdering=*/false, TemplateArgs, Deduced, Info);
}
/// Perform template argument deduction to determine whether
@@ -2914,7 +2913,7 @@ Sema::DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
return Sema::TDK_SubstitutionFailure;
return ::FinishTemplateArgumentDeduction(
- *this, Partial, /*PartialOrdering=*/false, TemplateArgs, Deduced, Info);
+ *this, Partial, /*IsPartialOrdering=*/false, TemplateArgs, Deduced, Info);
}
/// Determine whether the given type T is a simple-template-id type.
@@ -3082,7 +3081,7 @@ Sema::SubstituteExplicitTemplateArguments(
CXXRecordDecl *ThisContext = nullptr;
if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Function)) {
ThisContext = Method->getParent();
- ThisTypeQuals = Method->getTypeQualifiers();
+ ThisTypeQuals = Method->getMethodQualifiers();
}
CXXThisScopeRAII ThisScope(*this, ThisContext, ThisTypeQuals,
@@ -4281,19 +4280,26 @@ Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(
}
namespace {
+ struct DependentAuto { bool IsPack; };
/// Substitute the 'auto' specifier or deduced template specialization type
/// specifier within a type for a given replacement type.
class SubstituteDeducedTypeTransform :
public TreeTransform<SubstituteDeducedTypeTransform> {
QualType Replacement;
+ bool ReplacementIsPack;
bool UseTypeSugar;
public:
+ SubstituteDeducedTypeTransform(Sema &SemaRef, DependentAuto DA)
+ : TreeTransform<SubstituteDeducedTypeTransform>(SemaRef), Replacement(),
+ ReplacementIsPack(DA.IsPack), UseTypeSugar(true) {}
+
SubstituteDeducedTypeTransform(Sema &SemaRef, QualType Replacement,
- bool UseTypeSugar = true)
+ bool UseTypeSugar = true)
: TreeTransform<SubstituteDeducedTypeTransform>(SemaRef),
- Replacement(Replacement), UseTypeSugar(UseTypeSugar) {}
+ Replacement(Replacement), ReplacementIsPack(false),
+ UseTypeSugar(UseTypeSugar) {}
QualType TransformDesugared(TypeLocBuilder &TLB, DeducedTypeLoc TL) {
assert(isa<TemplateTypeParmType>(Replacement) &&
@@ -4318,7 +4324,8 @@ namespace {
return TransformDesugared(TLB, TL);
QualType Result = SemaRef.Context.getAutoType(
- Replacement, TL.getTypePtr()->getKeyword(), Replacement.isNull());
+ Replacement, TL.getTypePtr()->getKeyword(), Replacement.isNull(),
+ ReplacementIsPack);
auto NewTL = TLB.push<AutoTypeLoc>(Result);
NewTL.setNameLoc(TL.getNameLoc());
return Result;
@@ -4409,9 +4416,12 @@ Sema::DeduceAutoType(TypeLoc Type, Expr *&Init, QualType &Result,
Init = NonPlaceholder.get();
}
+ DependentAuto DependentResult = {
+ /*.IsPack = */ (bool)Type.getAs<PackExpansionTypeLoc>()};
+
if (!DependentDeductionDepth &&
(Type.getType()->isDependentType() || Init->isTypeDependent())) {
- Result = SubstituteDeducedTypeTransform(*this, QualType()).Apply(Type);
+ Result = SubstituteDeducedTypeTransform(*this, DependentResult).Apply(Type);
assert(!Result.isNull() && "substituting DependentTy can't fail");
return DAR_Succeeded;
}
@@ -4479,7 +4489,8 @@ Sema::DeduceAutoType(TypeLoc Type, Expr *&Init, QualType &Result,
auto DeductionFailed = [&](TemplateDeductionResult TDK,
ArrayRef<SourceRange> Ranges) -> DeduceAutoResult {
if (Init->isTypeDependent()) {
- Result = SubstituteDeducedTypeTransform(*this, QualType()).Apply(Type);
+ Result =
+ SubstituteDeducedTypeTransform(*this, DependentResult).Apply(Type);
assert(!Result.isNull() && "substituting DependentTy can't fail");
return DAR_Succeeded;
}
@@ -4560,7 +4571,10 @@ Sema::DeduceAutoType(TypeLoc Type, Expr *&Init, QualType &Result,
QualType Sema::SubstAutoType(QualType TypeWithAuto,
QualType TypeToReplaceAuto) {
if (TypeToReplaceAuto->isDependentType())
- TypeToReplaceAuto = QualType();
+ return SubstituteDeducedTypeTransform(
+ *this, DependentAuto{
+ TypeToReplaceAuto->containsUnexpandedParameterPack()})
+ .TransformType(TypeWithAuto);
return SubstituteDeducedTypeTransform(*this, TypeToReplaceAuto)
.TransformType(TypeWithAuto);
}
@@ -4568,7 +4582,11 @@ QualType Sema::SubstAutoType(QualType TypeWithAuto,
TypeSourceInfo *Sema::SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType TypeToReplaceAuto) {
if (TypeToReplaceAuto->isDependentType())
- TypeToReplaceAuto = QualType();
+ return SubstituteDeducedTypeTransform(
+ *this,
+ DependentAuto{
+ TypeToReplaceAuto->containsUnexpandedParameterPack()})
+ .TransformType(TypeWithAuto);
return SubstituteDeducedTypeTransform(*this, TypeToReplaceAuto)
.TransformType(TypeWithAuto);
}
@@ -4661,7 +4679,7 @@ AddImplicitObjectParameterType(ASTContext &Context,
// The standard doesn't say explicitly, but we pick the appropriate kind of
// reference type based on [over.match.funcs]p4.
QualType ArgTy = Context.getTypeDeclType(Method->getParent());
- ArgTy = Context.getQualifiedType(ArgTy, Method->getTypeQualifiers());
+ ArgTy = Context.getQualifiedType(ArgTy, Method->getMethodQualifiers());
if (Method->getRefQualifier() == RQ_RValue)
ArgTy = Context.getRValueReferenceType(ArgTy);
else
@@ -5049,7 +5067,7 @@ static bool isAtLeastAsSpecializedAs(Sema &S, QualType T1, QualType T2,
Info);
auto *TST1 = T1->castAs<TemplateSpecializationType>();
if (FinishTemplateArgumentDeduction(
- S, P2, /*PartialOrdering=*/true,
+ S, P2, /*IsPartialOrdering=*/true,
TemplateArgumentList(TemplateArgumentList::OnStack,
TST1->template_arguments()),
Deduced, Info))
diff --git a/lib/Sema/SemaTemplateInstantiate.cpp b/lib/Sema/SemaTemplateInstantiate.cpp
index 96abeed82493..973f564d3058 100644
--- a/lib/Sema/SemaTemplateInstantiate.cpp
+++ b/lib/Sema/SemaTemplateInstantiate.cpp
@@ -1,9 +1,8 @@
//===------- SemaTemplateInstantiate.cpp - C++ Template Instantiation ------===/
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//===----------------------------------------------------------------------===/
//
// This file implements C++ template instantiation.
@@ -26,6 +25,7 @@
#include "clang/Sema/Template.h"
#include "clang/Sema/TemplateDeduction.h"
#include "clang/Sema/TemplateInstCallback.h"
+#include "llvm/Support/TimeProfiler.h"
using namespace clang;
using namespace sema;
@@ -66,9 +66,12 @@ Sema::getTemplateInstantiationArgs(NamedDecl *D,
if (!Ctx) {
Ctx = D->getDeclContext();
- // Add template arguments from a variable template instantiation.
- if (VarTemplateSpecializationDecl *Spec =
- dyn_cast<VarTemplateSpecializationDecl>(D)) {
+ // Add template arguments from a variable template instantiation. For a
+ // class-scope explicit specialization, there are no template arguments
+ // at this level, but there may be enclosing template arguments.
+ VarTemplateSpecializationDecl *Spec =
+ dyn_cast<VarTemplateSpecializationDecl>(D);
+ if (Spec && !Spec->isClassScopeExplicitSpecialization()) {
// We're done when we hit an explicit specialization.
if (Spec->getSpecializationKind() == TSK_ExplicitSpecialization &&
!isa<VarTemplatePartialSpecializationDecl>(Spec))
@@ -111,8 +114,9 @@ Sema::getTemplateInstantiationArgs(NamedDecl *D,
while (!Ctx->isFileContext()) {
// Add template arguments from a class template instantiation.
- if (ClassTemplateSpecializationDecl *Spec
- = dyn_cast<ClassTemplateSpecializationDecl>(Ctx)) {
+ ClassTemplateSpecializationDecl *Spec
+ = dyn_cast<ClassTemplateSpecializationDecl>(Ctx);
+ if (Spec && !Spec->isClassScopeExplicitSpecialization()) {
// We're done when we hit an explicit specialization.
if (Spec->getSpecializationKind() == TSK_ExplicitSpecialization &&
!isa<ClassTemplatePartialSpecializationDecl>(Spec))
@@ -129,9 +133,8 @@ Sema::getTemplateInstantiationArgs(NamedDecl *D,
// Add template arguments from a function template specialization.
else if (FunctionDecl *Function = dyn_cast<FunctionDecl>(Ctx)) {
if (!RelativeToPrimary &&
- (Function->getTemplateSpecializationKind() ==
- TSK_ExplicitSpecialization &&
- !Function->getClassScopeSpecializationPattern()))
+ Function->getTemplateSpecializationKindForInstantiation() ==
+ TSK_ExplicitSpecialization)
break;
if (const TemplateArgumentList *TemplateArgs
@@ -816,7 +819,19 @@ namespace {
SemaRef.InstantiateAttrs(TemplateArgs, Old, New);
}
- void transformedLocalDecl(Decl *Old, Decl *New) {
+ void transformedLocalDecl(Decl *Old, ArrayRef<Decl *> NewDecls) {
+ if (Old->isParameterPack()) {
+ SemaRef.CurrentInstantiationScope->MakeInstantiatedLocalArgPack(Old);
+ for (auto *New : NewDecls)
+ SemaRef.CurrentInstantiationScope->InstantiatedLocalPackArg(
+ Old, cast<VarDecl>(New));
+ return;
+ }
+
+ assert(NewDecls.size() == 1 &&
+ "should only have multiple expansions for a pack");
+ Decl *New = NewDecls.front();
+
// If we've instantiated the call operator of a lambda or the call
// operator template of a generic lambda, update the "instantiation of"
// information.
@@ -885,12 +900,11 @@ namespace {
ExprResult TransformSubstNonTypeTemplateParmPackExpr(
SubstNonTypeTemplateParmPackExpr *E);
- /// Rebuild a DeclRefExpr for a ParmVarDecl reference.
- ExprResult RebuildParmVarDeclRefExpr(ParmVarDecl *PD, SourceLocation Loc);
+ /// Rebuild a DeclRefExpr for a VarDecl reference.
+ ExprResult RebuildVarDeclRefExpr(VarDecl *PD, SourceLocation Loc);
- /// Transform a reference to a function parameter pack.
- ExprResult TransformFunctionParmPackRefExpr(DeclRefExpr *E,
- ParmVarDecl *PD);
+ /// Transform a reference to a function or init-capture parameter pack.
+ ExprResult TransformFunctionParmPackRefExpr(DeclRefExpr *E, VarDecl *PD);
/// Transform a FunctionParmPackExpr which was built when we couldn't
/// expand a function parameter pack reference which refers to an expanded
@@ -1321,9 +1335,8 @@ TemplateInstantiator::TransformSubstNonTypeTemplateParmPackExpr(
Arg);
}
-ExprResult
-TemplateInstantiator::RebuildParmVarDeclRefExpr(ParmVarDecl *PD,
- SourceLocation Loc) {
+ExprResult TemplateInstantiator::RebuildVarDeclRefExpr(VarDecl *PD,
+ SourceLocation Loc) {
DeclarationNameInfo NameInfo(PD->getDeclName(), Loc);
return getSema().BuildDeclarationNameExpr(CXXScopeSpec(), NameInfo, PD);
}
@@ -1332,11 +1345,11 @@ ExprResult
TemplateInstantiator::TransformFunctionParmPackExpr(FunctionParmPackExpr *E) {
if (getSema().ArgumentPackSubstitutionIndex != -1) {
// We can expand this parameter pack now.
- ParmVarDecl *D = E->getExpansion(getSema().ArgumentPackSubstitutionIndex);
- ValueDecl *VD = cast_or_null<ValueDecl>(TransformDecl(E->getExprLoc(), D));
+ VarDecl *D = E->getExpansion(getSema().ArgumentPackSubstitutionIndex);
+ VarDecl *VD = cast_or_null<VarDecl>(TransformDecl(E->getExprLoc(), D));
if (!VD)
return ExprError();
- return RebuildParmVarDeclRefExpr(cast<ParmVarDecl>(VD), E->getExprLoc());
+ return RebuildVarDeclRefExpr(VD, E->getExprLoc());
}
QualType T = TransformType(E->getType());
@@ -1345,25 +1358,26 @@ TemplateInstantiator::TransformFunctionParmPackExpr(FunctionParmPackExpr *E) {
// Transform each of the parameter expansions into the corresponding
// parameters in the instantiation of the function decl.
- SmallVector<ParmVarDecl *, 8> Parms;
- Parms.reserve(E->getNumExpansions());
+ SmallVector<VarDecl *, 8> Vars;
+ Vars.reserve(E->getNumExpansions());
for (FunctionParmPackExpr::iterator I = E->begin(), End = E->end();
I != End; ++I) {
- ParmVarDecl *D =
- cast_or_null<ParmVarDecl>(TransformDecl(E->getExprLoc(), *I));
+ VarDecl *D = cast_or_null<VarDecl>(TransformDecl(E->getExprLoc(), *I));
if (!D)
return ExprError();
- Parms.push_back(D);
+ Vars.push_back(D);
}
- return FunctionParmPackExpr::Create(getSema().Context, T,
- E->getParameterPack(),
- E->getParameterPackLocation(), Parms);
+ auto *PackExpr =
+ FunctionParmPackExpr::Create(getSema().Context, T, E->getParameterPack(),
+ E->getParameterPackLocation(), Vars);
+ getSema().MarkFunctionParmPackReferenced(PackExpr);
+ return PackExpr;
}
ExprResult
TemplateInstantiator::TransformFunctionParmPackRefExpr(DeclRefExpr *E,
- ParmVarDecl *PD) {
+ VarDecl *PD) {
typedef LocalInstantiationScope::DeclArgumentPack DeclArgumentPack;
llvm::PointerUnion<Decl *, DeclArgumentPack *> *Found
= getSema().CurrentInstantiationScope->findInstantiationOf(PD);
@@ -1377,8 +1391,10 @@ TemplateInstantiator::TransformFunctionParmPackRefExpr(DeclRefExpr *E,
QualType T = TransformType(E->getType());
if (T.isNull())
return ExprError();
- return FunctionParmPackExpr::Create(getSema().Context, T, PD,
- E->getExprLoc(), *Pack);
+ auto *PackExpr = FunctionParmPackExpr::Create(getSema().Context, T, PD,
+ E->getExprLoc(), *Pack);
+ getSema().MarkFunctionParmPackReferenced(PackExpr);
+ return PackExpr;
}
TransformedDecl = (*Pack)[getSema().ArgumentPackSubstitutionIndex];
@@ -1387,8 +1403,7 @@ TemplateInstantiator::TransformFunctionParmPackRefExpr(DeclRefExpr *E,
}
// We have either an unexpanded pack or a specific expansion.
- return RebuildParmVarDeclRefExpr(cast<ParmVarDecl>(TransformedDecl),
- E->getExprLoc());
+ return RebuildVarDeclRefExpr(cast<VarDecl>(TransformedDecl), E->getExprLoc());
}
ExprResult
@@ -1406,7 +1421,7 @@ TemplateInstantiator::TransformDeclRefExpr(DeclRefExpr *E) {
}
// Handle references to function parameter packs.
- if (ParmVarDecl *PD = dyn_cast<ParmVarDecl>(D))
+ if (VarDecl *PD = dyn_cast<VarDecl>(D))
if (PD->isParameterPack())
return TransformFunctionParmPackRefExpr(E, PD);
@@ -2009,6 +2024,15 @@ Sema::InstantiateClass(SourceLocation PointOfInstantiation,
Instantiation->getInstantiatedFromMemberClass(),
Pattern, PatternDef, TSK, Complain))
return true;
+
+ llvm::TimeTraceScope TimeScope("InstantiateClass", [&]() {
+ std::string Name;
+ llvm::raw_string_ostream OS(Name);
+ Instantiation->getNameForDiagnostic(OS, getPrintingPolicy(),
+ /*Qualified=*/true);
+ return Name;
+ });
+
Pattern = PatternDef;
// Record the point of instantiation.
@@ -2070,6 +2094,7 @@ Sema::InstantiateClass(SourceLocation PointOfInstantiation,
LateInstantiatedAttrVec LateAttrs;
Instantiator.enableLateAttributeInstantiation(&LateAttrs);
+ bool MightHaveConstexprVirtualFunctions = false;
for (auto *Member : Pattern->decls()) {
// Don't instantiate members not belonging in this semantic context.
// e.g. for:
@@ -2116,6 +2141,10 @@ Sema::InstantiateClass(SourceLocation PointOfInstantiation,
Instantiation->setInvalidDecl();
break;
}
+ } else if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(NewMember)) {
+ if (MD->isConstexpr() && !MD->getFriendObjectKind() &&
+ (MD->isVirtualAsWritten() || Instantiation->getNumBases()))
+ MightHaveConstexprVirtualFunctions = true;
}
if (NewMember->isInvalidDecl())
@@ -2208,9 +2237,14 @@ Sema::InstantiateClass(SourceLocation PointOfInstantiation,
Consumer.HandleTagDeclDefinition(Instantiation);
// Always emit the vtable for an explicit instantiation definition
- // of a polymorphic class template specialization.
+ // of a polymorphic class template specialization. Otherwise, eagerly
+ // instantiate only constexpr virtual functions in preparation for their use
+ // in constant evaluation.
if (TSK == TSK_ExplicitInstantiationDefinition)
MarkVTableUsed(PointOfInstantiation, Instantiation, true);
+ else if (MightHaveConstexprVirtualFunctions)
+ MarkVirtualMembersReferenced(PointOfInstantiation, Instantiation,
+ /*ConstexprOnly*/ true);
}
return Instantiation->isInvalidDecl();
@@ -2675,11 +2709,14 @@ Sema::InstantiateClassMembers(SourceLocation PointOfInstantiation,
== TSK_ExplicitSpecialization)
continue;
- if ((Context.getTargetInfo().getCXXABI().isMicrosoft() ||
- Context.getTargetInfo().getTriple().isWindowsItaniumEnvironment()) &&
+ if (Context.getTargetInfo().getTriple().isOSWindows() &&
TSK == TSK_ExplicitInstantiationDeclaration) {
- // In MSVC and Windows Itanium mode, explicit instantiation decl of the
- // outer class doesn't affect the inner class.
+ // On Windows, explicit instantiation decl of the outer class doesn't
+ // affect the inner class. Typically extern template declarations are
+ // used in combination with dll import/export annotations, but those
+ // are not propagated from the outer class templates to inner classes.
+ // Therefore, do not instantiate inner classes on this platform, so
+ // that users don't end up with undefined symbols during linking.
continue;
}
@@ -2887,7 +2924,7 @@ static const Decl *getCanonicalParmVarDecl(const Decl *D) {
unsigned i = PV->getFunctionScopeIndex();
// This parameter might be from a freestanding function type within the
// function and isn't necessarily referring to one of FD's parameters.
- if (FD->getParamDecl(i) == PV)
+ if (i < FD->getNumParams() && FD->getParamDecl(i) == PV)
return FD->getCanonicalDecl()->getParamDecl(i);
}
}
@@ -2959,14 +2996,14 @@ void LocalInstantiationScope::InstantiatedLocal(const Decl *D, Decl *Inst) {
#endif
Stored = Inst;
} else if (DeclArgumentPack *Pack = Stored.dyn_cast<DeclArgumentPack *>()) {
- Pack->push_back(cast<ParmVarDecl>(Inst));
+ Pack->push_back(cast<VarDecl>(Inst));
} else {
assert(Stored.get<Decl *>() == Inst && "Already instantiated this local");
}
}
void LocalInstantiationScope::InstantiatedLocalPackArg(const Decl *D,
- ParmVarDecl *Inst) {
+ VarDecl *Inst) {
D = getCanonicalParmVarDecl(D);
DeclArgumentPack *Pack = LocalDecls[D].get<DeclArgumentPack *>();
Pack->push_back(Inst);
diff --git a/lib/Sema/SemaTemplateInstantiateDecl.cpp b/lib/Sema/SemaTemplateInstantiateDecl.cpp
index fad3c065e896..67343d11d333 100644
--- a/lib/Sema/SemaTemplateInstantiateDecl.cpp
+++ b/lib/Sema/SemaTemplateInstantiateDecl.cpp
@@ -1,9 +1,8 @@
//===--- SemaTemplateInstantiateDecl.cpp - C++ Template Decl Instantiation ===/
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//===----------------------------------------------------------------------===/
//
// This file implements C++ template instantiation for declarations.
@@ -24,6 +23,7 @@
#include "clang/Sema/Lookup.h"
#include "clang/Sema/Template.h"
#include "clang/Sema/TemplateInstCallback.h"
+#include "llvm/Support/TimeProfiler.h"
using namespace clang;
@@ -285,7 +285,7 @@ static void instantiateOMPDeclareSimdDeclAttr(
SmallVector<Expr *, 4> Uniforms, Aligneds, Alignments, Linears, Steps;
SmallVector<unsigned, 4> LinModifiers;
- auto &&Subst = [&](Expr *E) -> ExprResult {
+ auto SubstExpr = [&](Expr *E) -> ExprResult {
if (auto *DRE = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts()))
if (auto *PVD = dyn_cast<ParmVarDecl>(DRE->getDecl())) {
Sema::ContextRAII SavedContext(S, FD);
@@ -300,6 +300,17 @@ static void instantiateOMPDeclareSimdDeclAttr(
return S.SubstExpr(E, TemplateArgs);
};
+ // Substitute a single OpenMP clause, which is a potentially-evaluated
+ // full-expression.
+ auto Subst = [&](Expr *E) -> ExprResult {
+ EnterExpressionEvaluationContext Evaluated(
+ S, Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
+ ExprResult Res = SubstExpr(E);
+ if (Res.isInvalid())
+ return Res;
+ return S.ActOnFinishFullExpr(Res.get(), false);
+ };
+
ExprResult Simdlen;
if (auto *E = Attr.getSimdlen())
Simdlen = Subst(E);
@@ -345,6 +356,74 @@ static void instantiateOMPDeclareSimdDeclAttr(
Attr.getRange());
}
+static void instantiateDependentAMDGPUFlatWorkGroupSizeAttr(
+ Sema &S, const MultiLevelTemplateArgumentList &TemplateArgs,
+ const AMDGPUFlatWorkGroupSizeAttr &Attr, Decl *New) {
+ // Both min and max expression are constant expressions.
+ EnterExpressionEvaluationContext Unevaluated(
+ S, Sema::ExpressionEvaluationContext::ConstantEvaluated);
+
+ ExprResult Result = S.SubstExpr(Attr.getMin(), TemplateArgs);
+ if (Result.isInvalid())
+ return;
+ Expr *MinExpr = Result.getAs<Expr>();
+
+ Result = S.SubstExpr(Attr.getMax(), TemplateArgs);
+ if (Result.isInvalid())
+ return;
+ Expr *MaxExpr = Result.getAs<Expr>();
+
+ S.addAMDGPUFlatWorkGroupSizeAttr(Attr.getLocation(), New, MinExpr, MaxExpr,
+ Attr.getSpellingListIndex());
+}
+
+static ExplicitSpecifier
+instantiateExplicitSpecifier(Sema &S,
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ ExplicitSpecifier ES, FunctionDecl *New) {
+ if (!ES.getExpr())
+ return ES;
+ Expr *OldCond = ES.getExpr();
+ Expr *Cond = nullptr;
+ {
+ EnterExpressionEvaluationContext Unevaluated(
+ S, Sema::ExpressionEvaluationContext::ConstantEvaluated);
+ ExprResult SubstResult = S.SubstExpr(OldCond, TemplateArgs);
+ if (SubstResult.isInvalid()) {
+ return ExplicitSpecifier::Invalid();
+ }
+ Cond = SubstResult.get();
+ }
+ ExplicitSpecifier Result(Cond, ES.getKind());
+ if (!Cond->isTypeDependent())
+ S.tryResolveExplicitSpecifier(Result);
+ return Result;
+}
+
+static void instantiateDependentAMDGPUWavesPerEUAttr(
+ Sema &S, const MultiLevelTemplateArgumentList &TemplateArgs,
+ const AMDGPUWavesPerEUAttr &Attr, Decl *New) {
+ // Both min and max expression are constant expressions.
+ EnterExpressionEvaluationContext Unevaluated(
+ S, Sema::ExpressionEvaluationContext::ConstantEvaluated);
+
+ ExprResult Result = S.SubstExpr(Attr.getMin(), TemplateArgs);
+ if (Result.isInvalid())
+ return;
+ Expr *MinExpr = Result.getAs<Expr>();
+
+ Expr *MaxExpr = nullptr;
+ if (auto Max = Attr.getMax()) {
+ Result = S.SubstExpr(Max, TemplateArgs);
+ if (Result.isInvalid())
+ return;
+ MaxExpr = Result.getAs<Expr>();
+ }
+
+ S.addAMDGPUWavesPerEUAttr(Attr.getLocation(), New, MinExpr, MaxExpr,
+ Attr.getSpellingListIndex());
+}
+
void Sema::InstantiateAttrsForDecl(
const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Tmpl,
Decl *New, LateInstantiatedAttrVec *LateAttrs,
@@ -438,6 +517,18 @@ void Sema::InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
continue;
}
+ if (const AMDGPUFlatWorkGroupSizeAttr *AMDGPUFlatWorkGroupSize =
+ dyn_cast<AMDGPUFlatWorkGroupSizeAttr>(TmplAttr)) {
+ instantiateDependentAMDGPUFlatWorkGroupSizeAttr(
+ *this, TemplateArgs, *AMDGPUFlatWorkGroupSize, New);
+ }
+
+ if (const AMDGPUWavesPerEUAttr *AMDGPUFlatWorkGroupSize =
+ dyn_cast<AMDGPUWavesPerEUAttr>(TmplAttr)) {
+ instantiateDependentAMDGPUWavesPerEUAttr(*this, TemplateArgs,
+ *AMDGPUFlatWorkGroupSize, New);
+ }
+
// Existing DLL attribute on the instantiation takes precedence.
if (TmplAttr->getKind() == attr::DLLExport ||
TmplAttr->getKind() == attr::DLLImport) {
@@ -1633,6 +1724,14 @@ Decl *TemplateDeclInstantiator::VisitFunctionDecl(FunctionDecl *D,
cast<Decl>(Owner)->isDefinedOutsideFunctionOrMethod());
LocalInstantiationScope Scope(SemaRef, MergeWithParentScope);
+ ExplicitSpecifier InstantiatedExplicitSpecifier;
+ if (auto *DGuide = dyn_cast<CXXDeductionGuideDecl>(D)) {
+ InstantiatedExplicitSpecifier = instantiateExplicitSpecifier(
+ SemaRef, TemplateArgs, DGuide->getExplicitSpecifier(), DGuide);
+ if (InstantiatedExplicitSpecifier.isInvalid())
+ return nullptr;
+ }
+
SmallVector<ParmVarDecl *, 4> Params;
TypeSourceInfo *TInfo = SubstFunctionType(D, Params);
if (!TInfo)
@@ -1670,8 +1769,9 @@ Decl *TemplateDeclInstantiator::VisitFunctionDecl(FunctionDecl *D,
FunctionDecl *Function;
if (auto *DGuide = dyn_cast<CXXDeductionGuideDecl>(D)) {
Function = CXXDeductionGuideDecl::Create(
- SemaRef.Context, DC, D->getInnerLocStart(), DGuide->isExplicit(),
- NameInfo, T, TInfo, D->getSourceRange().getEnd());
+ SemaRef.Context, DC, D->getInnerLocStart(),
+ InstantiatedExplicitSpecifier, NameInfo, T, TInfo,
+ D->getSourceRange().getEnd());
if (DGuide->isCopyDeductionCandidate())
cast<CXXDeductionGuideDecl>(Function)->setIsCopyDeductionCandidate();
Function->setAccess(D->getAccess());
@@ -1679,7 +1779,7 @@ Decl *TemplateDeclInstantiator::VisitFunctionDecl(FunctionDecl *D,
Function = FunctionDecl::Create(
SemaRef.Context, DC, D->getInnerLocStart(), NameInfo, T, TInfo,
D->getCanonicalDecl()->getStorageClass(), D->isInlineSpecified(),
- D->hasWrittenPrototype(), D->isConstexpr());
+ D->hasWrittenPrototype(), D->getConstexprKind());
Function->setRangeEnd(D->getSourceRange().getEnd());
}
@@ -1894,10 +1994,10 @@ Decl *TemplateDeclInstantiator::VisitFunctionDecl(FunctionDecl *D,
return Function;
}
-Decl *
-TemplateDeclInstantiator::VisitCXXMethodDecl(CXXMethodDecl *D,
- TemplateParameterList *TemplateParams,
- bool IsClassScopeSpecialization) {
+Decl *TemplateDeclInstantiator::VisitCXXMethodDecl(
+ CXXMethodDecl *D, TemplateParameterList *TemplateParams,
+ Optional<const ASTTemplateArgumentListInfo *>
+ ClassScopeSpecializationArgs) {
FunctionTemplateDecl *FunctionTemplate = D->getDescribedFunctionTemplate();
if (FunctionTemplate && !TemplateParams) {
// We are creating a function template specialization from a function
@@ -1939,6 +2039,12 @@ TemplateDeclInstantiator::VisitCXXMethodDecl(CXXMethodDecl *D,
}
}
+ ExplicitSpecifier InstantiatedExplicitSpecifier =
+ instantiateExplicitSpecifier(SemaRef, TemplateArgs,
+ ExplicitSpecifier::getFromDecl(D), D);
+ if (InstantiatedExplicitSpecifier.isInvalid())
+ return nullptr;
+
SmallVector<ParmVarDecl *, 4> Params;
TypeSourceInfo *TInfo = SubstFunctionType(D, Params);
if (!TInfo)
@@ -1978,11 +2084,10 @@ TemplateDeclInstantiator::VisitCXXMethodDecl(CXXMethodDecl *D,
DeclarationNameInfo NameInfo
= SemaRef.SubstDeclarationNameInfo(D->getNameInfo(), TemplateArgs);
if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(D)) {
- Method = CXXConstructorDecl::Create(SemaRef.Context, Record,
- StartLoc, NameInfo, T, TInfo,
- Constructor->isExplicit(),
- Constructor->isInlineSpecified(),
- false, Constructor->isConstexpr());
+ Method = CXXConstructorDecl::Create(
+ SemaRef.Context, Record, StartLoc, NameInfo, T, TInfo,
+ InstantiatedExplicitSpecifier, Constructor->isInlineSpecified(), false,
+ Constructor->getConstexprKind());
Method->setRangeEnd(Constructor->getEndLoc());
} else if (CXXDestructorDecl *Destructor = dyn_cast<CXXDestructorDecl>(D)) {
Method = CXXDestructorDecl::Create(SemaRef.Context, Record,
@@ -1993,13 +2098,13 @@ TemplateDeclInstantiator::VisitCXXMethodDecl(CXXMethodDecl *D,
} else if (CXXConversionDecl *Conversion = dyn_cast<CXXConversionDecl>(D)) {
Method = CXXConversionDecl::Create(
SemaRef.Context, Record, StartLoc, NameInfo, T, TInfo,
- Conversion->isInlineSpecified(), Conversion->isExplicit(),
- Conversion->isConstexpr(), Conversion->getEndLoc());
+ Conversion->isInlineSpecified(), InstantiatedExplicitSpecifier,
+ Conversion->getConstexprKind(), Conversion->getEndLoc());
} else {
StorageClass SC = D->isStatic() ? SC_Static : SC_None;
Method = CXXMethodDecl::Create(SemaRef.Context, Record, StartLoc, NameInfo,
T, TInfo, SC, D->isInlineSpecified(),
- D->isConstexpr(), D->getEndLoc());
+ D->getConstexprKind(), D->getEndLoc());
}
if (D->isInlined())
@@ -2101,7 +2206,8 @@ TemplateDeclInstantiator::VisitCXXMethodDecl(CXXMethodDecl *D,
IsExplicitSpecialization = true;
} else if (const ASTTemplateArgumentListInfo *Info =
- D->getTemplateSpecializationArgsAsWritten()) {
+ ClassScopeSpecializationArgs.getValueOr(
+ D->getTemplateSpecializationArgsAsWritten())) {
SemaRef.LookupQualifiedName(Previous, DC);
TemplateArgumentListInfo ExplicitArgs(Info->getLAngleLoc(),
@@ -2116,6 +2222,14 @@ TemplateDeclInstantiator::VisitCXXMethodDecl(CXXMethodDecl *D,
Method->setInvalidDecl();
IsExplicitSpecialization = true;
+ } else if (ClassScopeSpecializationArgs) {
+ // Class-scope explicit specialization written without explicit template
+ // arguments.
+ SemaRef.LookupQualifiedName(Previous, DC);
+ if (SemaRef.CheckFunctionTemplateSpecialization(Method, nullptr, Previous))
+ Method->setInvalidDecl();
+
+ IsExplicitSpecialization = true;
} else if (!FunctionTemplate || TemplateParams || isFriend) {
SemaRef.LookupQualifiedName(Previous, Record);
@@ -2127,9 +2241,8 @@ TemplateDeclInstantiator::VisitCXXMethodDecl(CXXMethodDecl *D,
Previous.clear();
}
- if (!IsClassScopeSpecialization)
- SemaRef.CheckFunctionDeclaration(nullptr, Method, Previous,
- IsExplicitSpecialization);
+ SemaRef.CheckFunctionDeclaration(nullptr, Method, Previous,
+ IsExplicitSpecialization);
if (D->isPure())
SemaRef.CheckPureMethod(Method, SourceRange());
@@ -2152,6 +2265,12 @@ TemplateDeclInstantiator::VisitCXXMethodDecl(CXXMethodDecl *D,
if (D->isDeletedAsWritten())
SemaRef.SetDeclDeleted(Method, Method->getLocation());
+ // If this is an explicit specialization, mark the implicitly-instantiated
+ // template specialization as being an explicit specialization too.
+ // FIXME: Is this necessary?
+ if (IsExplicitSpecialization && !isFriend)
+ SemaRef.CompleteMemberSpecialization(Method, Previous);
+
// If there's a function template, let our caller handle it.
if (FunctionTemplate) {
// do nothing
@@ -2172,10 +2291,24 @@ TemplateDeclInstantiator::VisitCXXMethodDecl(CXXMethodDecl *D,
// Otherwise, add the declaration. We don't need to do this for
// class-scope specializations because we'll have matched them with
// the appropriate template.
- } else if (!IsClassScopeSpecialization) {
+ } else {
Owner->addDecl(Method);
}
+ // PR17480: Honor the used attribute to instantiate member function
+ // definitions
+ if (Method->hasAttr<UsedAttr>()) {
+ if (const auto *A = dyn_cast<CXXRecordDecl>(Owner)) {
+ SourceLocation Loc;
+ if (const MemberSpecializationInfo *MSInfo =
+ A->getMemberSpecializationInfo())
+ Loc = MSInfo->getPointOfInstantiation();
+ else if (const auto *Spec = dyn_cast<ClassTemplateSpecializationDecl>(A))
+ Loc = Spec->getPointOfInstantiation();
+ SemaRef.MarkFunctionReferenced(Loc, Method);
+ }
+ }
+
return Method;
}
@@ -2768,38 +2901,8 @@ Decl *TemplateDeclInstantiator::VisitUsingPackDecl(UsingPackDecl *D) {
Decl *TemplateDeclInstantiator::VisitClassScopeFunctionSpecializationDecl(
ClassScopeFunctionSpecializationDecl *Decl) {
CXXMethodDecl *OldFD = Decl->getSpecialization();
- CXXMethodDecl *NewFD =
- cast_or_null<CXXMethodDecl>(VisitCXXMethodDecl(OldFD, nullptr, true));
- if (!NewFD)
- return nullptr;
-
- TemplateArgumentListInfo ExplicitTemplateArgs;
- TemplateArgumentListInfo *ExplicitTemplateArgsPtr = nullptr;
- if (Decl->hasExplicitTemplateArgs()) {
- if (SemaRef.Subst(Decl->templateArgs().getArgumentArray(),
- Decl->templateArgs().size(), ExplicitTemplateArgs,
- TemplateArgs))
- return nullptr;
- ExplicitTemplateArgsPtr = &ExplicitTemplateArgs;
- }
-
- LookupResult Previous(SemaRef, NewFD->getNameInfo(), Sema::LookupOrdinaryName,
- Sema::ForExternalRedeclaration);
- SemaRef.LookupQualifiedName(Previous, SemaRef.CurContext);
- if (SemaRef.CheckFunctionTemplateSpecialization(
- NewFD, ExplicitTemplateArgsPtr, Previous)) {
- NewFD->setInvalidDecl();
- return NewFD;
- }
-
- // Associate the specialization with the pattern.
- FunctionDecl *Specialization = cast<FunctionDecl>(Previous.getFoundDecl());
- assert(Specialization && "Class scope Specialization is null");
- SemaRef.Context.setClassScopeSpecializationPattern(Specialization, OldFD);
-
- // FIXME: If this is a definition, check for redefinition errors!
-
- return NewFD;
+ return cast_or_null<CXXMethodDecl>(
+ VisitCXXMethodDecl(OldFD, nullptr, Decl->getTemplateArgsAsWritten()));
}
Decl *TemplateDeclInstantiator::VisitOMPThreadPrivateDecl(
@@ -2820,6 +2923,32 @@ Decl *TemplateDeclInstantiator::VisitOMPThreadPrivateDecl(
return TD;
}
+Decl *TemplateDeclInstantiator::VisitOMPAllocateDecl(OMPAllocateDecl *D) {
+ SmallVector<Expr *, 5> Vars;
+ for (auto *I : D->varlists()) {
+ Expr *Var = SemaRef.SubstExpr(I, TemplateArgs).get();
+ assert(isa<DeclRefExpr>(Var) && "allocate arg is not a DeclRefExpr");
+ Vars.push_back(Var);
+ }
+ SmallVector<OMPClause *, 4> Clauses;
+ // Copy map clauses from the original mapper.
+ for (OMPClause *C : D->clauselists()) {
+ auto *AC = cast<OMPAllocatorClause>(C);
+ ExprResult NewE = SemaRef.SubstExpr(AC->getAllocator(), TemplateArgs);
+ if (!NewE.isUsable())
+ continue;
+ OMPClause *IC = SemaRef.ActOnOpenMPAllocatorClause(
+ NewE.get(), AC->getBeginLoc(), AC->getLParenLoc(), AC->getEndLoc());
+ Clauses.push_back(IC);
+ }
+
+ Sema::DeclGroupPtrTy Res = SemaRef.ActOnOpenMPAllocateDirective(
+ D->getLocation(), Vars, Clauses, Owner);
+ if (Res.get().isNull())
+ return nullptr;
+ return Res.get().getSingleDecl();
+}
+
Decl *TemplateDeclInstantiator::VisitOMPRequiresDecl(OMPRequiresDecl *D) {
llvm_unreachable(
"Requires directive cannot be instantiated within a dependent context");
@@ -2925,6 +3054,95 @@ Decl *TemplateDeclInstantiator::VisitOMPDeclareReductionDecl(
return NewDRD;
}
+Decl *
+TemplateDeclInstantiator::VisitOMPDeclareMapperDecl(OMPDeclareMapperDecl *D) {
+ // Instantiate type and check if it is allowed.
+ const bool RequiresInstantiation =
+ D->getType()->isDependentType() ||
+ D->getType()->isInstantiationDependentType() ||
+ D->getType()->containsUnexpandedParameterPack();
+ QualType SubstMapperTy;
+ DeclarationName VN = D->getVarName();
+ if (RequiresInstantiation) {
+ SubstMapperTy = SemaRef.ActOnOpenMPDeclareMapperType(
+ D->getLocation(),
+ ParsedType::make(SemaRef.SubstType(D->getType(), TemplateArgs,
+ D->getLocation(), VN)));
+ } else {
+ SubstMapperTy = D->getType();
+ }
+ if (SubstMapperTy.isNull())
+ return nullptr;
+ // Create an instantiated copy of mapper.
+ auto *PrevDeclInScope = D->getPrevDeclInScope();
+ if (PrevDeclInScope && !PrevDeclInScope->isInvalidDecl()) {
+ PrevDeclInScope = cast<OMPDeclareMapperDecl>(
+ SemaRef.CurrentInstantiationScope->findInstantiationOf(PrevDeclInScope)
+ ->get<Decl *>());
+ }
+ OMPDeclareMapperDecl *NewDMD = SemaRef.ActOnOpenMPDeclareMapperDirectiveStart(
+ /*S=*/nullptr, Owner, D->getDeclName(), SubstMapperTy, D->getLocation(),
+ VN, D->getAccess(), PrevDeclInScope);
+ SemaRef.CurrentInstantiationScope->InstantiatedLocal(D, NewDMD);
+ SmallVector<OMPClause *, 6> Clauses;
+ bool IsCorrect = true;
+ if (!RequiresInstantiation) {
+ // Copy the mapper variable.
+ NewDMD->setMapperVarRef(D->getMapperVarRef());
+ // Copy map clauses from the original mapper.
+ for (OMPClause *C : D->clauselists())
+ Clauses.push_back(C);
+ } else {
+ // Instantiate the mapper variable.
+ DeclarationNameInfo DirName;
+ SemaRef.StartOpenMPDSABlock(OMPD_declare_mapper, DirName, /*S=*/nullptr,
+ (*D->clauselist_begin())->getBeginLoc());
+ SemaRef.ActOnOpenMPDeclareMapperDirectiveVarDecl(
+ NewDMD, /*S=*/nullptr, SubstMapperTy, D->getLocation(), VN);
+ SemaRef.CurrentInstantiationScope->InstantiatedLocal(
+ cast<DeclRefExpr>(D->getMapperVarRef())->getDecl(),
+ cast<DeclRefExpr>(NewDMD->getMapperVarRef())->getDecl());
+ auto *ThisContext = dyn_cast_or_null<CXXRecordDecl>(Owner);
+ Sema::CXXThisScopeRAII ThisScope(SemaRef, ThisContext, Qualifiers(),
+ ThisContext);
+ // Instantiate map clauses.
+ for (OMPClause *C : D->clauselists()) {
+ auto *OldC = cast<OMPMapClause>(C);
+ SmallVector<Expr *, 4> NewVars;
+ for (Expr *OE : OldC->varlists()) {
+ Expr *NE = SemaRef.SubstExpr(OE, TemplateArgs).get();
+ if (!NE) {
+ IsCorrect = false;
+ break;
+ }
+ NewVars.push_back(NE);
+ }
+ if (!IsCorrect)
+ break;
+ NestedNameSpecifierLoc NewQualifierLoc =
+ SemaRef.SubstNestedNameSpecifierLoc(OldC->getMapperQualifierLoc(),
+ TemplateArgs);
+ CXXScopeSpec SS;
+ SS.Adopt(NewQualifierLoc);
+ DeclarationNameInfo NewNameInfo = SemaRef.SubstDeclarationNameInfo(
+ OldC->getMapperIdInfo(), TemplateArgs);
+ OMPVarListLocTy Locs(OldC->getBeginLoc(), OldC->getLParenLoc(),
+ OldC->getEndLoc());
+ OMPClause *NewC = SemaRef.ActOnOpenMPMapClause(
+ OldC->getMapTypeModifiers(), OldC->getMapTypeModifiersLoc(), SS,
+ NewNameInfo, OldC->getMapType(), OldC->isImplicitMapType(),
+ OldC->getMapLoc(), OldC->getColonLoc(), NewVars, Locs);
+ Clauses.push_back(NewC);
+ }
+ SemaRef.EndOpenMPDSABlock(nullptr);
+ }
+ (void)SemaRef.ActOnOpenMPDeclareMapperDirectiveEnd(NewDMD, /*S=*/nullptr,
+ Clauses);
+ if (!IsCorrect)
+ return nullptr;
+ return NewDMD;
+}
+
Decl *TemplateDeclInstantiator::VisitOMPCapturedExprDecl(
OMPCapturedExprDecl * /*D*/) {
llvm_unreachable("Should not be met in templates");
@@ -2962,13 +3180,10 @@ TemplateDeclInstantiator::VisitClassTemplateSpecializationDecl(
"for a member class template");
// Lookup the already-instantiated declaration in the instantiation
- // of the class template. FIXME: Diagnose or assert if this fails?
- DeclContext::lookup_result Found
- = Owner->lookup(ClassTemplate->getDeclName());
- if (Found.empty())
- return nullptr;
- ClassTemplateDecl *InstClassTemplate
- = dyn_cast<ClassTemplateDecl>(Found.front());
+ // of the class template.
+ ClassTemplateDecl *InstClassTemplate =
+ cast_or_null<ClassTemplateDecl>(SemaRef.FindInstantiatedDecl(
+ D->getLocation(), ClassTemplate, TemplateArgs));
if (!InstClassTemplate)
return nullptr;
@@ -3077,6 +3292,7 @@ TemplateDeclInstantiator::VisitClassTemplateSpecializationDecl(
// Instantiate the members of the class-scope explicit specialization eagerly.
// We don't have support for lazy instantiation of an explicit specialization
// yet, and MSVC eagerly instantiates in this case.
+ // FIXME: This is wrong in standard C++.
if (D->isThisDeclarationADefinition() &&
SemaRef.InstantiateClass(D->getLocation(), InstD, D, TemplateArgs,
TSK_ImplicitInstantiation,
@@ -3094,6 +3310,12 @@ Decl *TemplateDeclInstantiator::VisitVarTemplateSpecializationDecl(
assert(VarTemplate &&
"A template specialization without specialized template?");
+ VarTemplateDecl *InstVarTemplate =
+ cast_or_null<VarTemplateDecl>(SemaRef.FindInstantiatedDecl(
+ D->getLocation(), VarTemplate, TemplateArgs));
+ if (!InstVarTemplate)
+ return nullptr;
+
// Substitute the current template arguments.
const TemplateArgumentListInfo &TemplateArgsInfo = D->getTemplateArgsInfo();
VarTemplateArgsInfo.setLAngleLoc(TemplateArgsInfo.getLAngleLoc());
@@ -3105,28 +3327,33 @@ Decl *TemplateDeclInstantiator::VisitVarTemplateSpecializationDecl(
// Check that the template argument list is well-formed for this template.
SmallVector<TemplateArgument, 4> Converted;
- if (SemaRef.CheckTemplateArgumentList(
- VarTemplate, VarTemplate->getBeginLoc(),
- const_cast<TemplateArgumentListInfo &>(VarTemplateArgsInfo), false,
- Converted))
+ if (SemaRef.CheckTemplateArgumentList(InstVarTemplate, D->getLocation(),
+ VarTemplateArgsInfo, false, Converted))
return nullptr;
- // Find the variable template specialization declaration that
- // corresponds to these arguments.
+ // Check whether we've already seen a declaration of this specialization.
void *InsertPos = nullptr;
- if (VarTemplateSpecializationDecl *VarSpec = VarTemplate->findSpecialization(
- Converted, InsertPos))
- // If we already have a variable template specialization, return it.
- return VarSpec;
+ VarTemplateSpecializationDecl *PrevDecl =
+ InstVarTemplate->findSpecialization(Converted, InsertPos);
+
+ // Check whether we've already seen a conflicting instantiation of this
+ // declaration (for instance, if there was a prior implicit instantiation).
+ bool Ignored;
+ if (PrevDecl && SemaRef.CheckSpecializationInstantiationRedecl(
+ D->getLocation(), D->getSpecializationKind(), PrevDecl,
+ PrevDecl->getSpecializationKind(),
+ PrevDecl->getPointOfInstantiation(), Ignored))
+ return nullptr;
- return VisitVarTemplateSpecializationDecl(VarTemplate, D, InsertPos,
- VarTemplateArgsInfo, Converted);
+ return VisitVarTemplateSpecializationDecl(
+ InstVarTemplate, D, InsertPos, VarTemplateArgsInfo, Converted, PrevDecl);
}
Decl *TemplateDeclInstantiator::VisitVarTemplateSpecializationDecl(
VarTemplateDecl *VarTemplate, VarDecl *D, void *InsertPos,
const TemplateArgumentListInfo &TemplateArgsInfo,
- ArrayRef<TemplateArgument> Converted) {
+ ArrayRef<TemplateArgument> Converted,
+ VarTemplateSpecializationDecl *PrevDecl) {
// Do substitution on the type of the declaration
TypeSourceInfo *DI =
@@ -3153,8 +3380,8 @@ Decl *TemplateDeclInstantiator::VisitVarTemplateSpecializationDecl(
if (SubstQualifier(D, Var))
return nullptr;
- SemaRef.BuildVariableInstantiation(Var, D, TemplateArgs, LateAttrs,
- Owner, StartingScope);
+ SemaRef.BuildVariableInstantiation(Var, D, TemplateArgs, LateAttrs, Owner,
+ StartingScope, false, PrevDecl);
return Var;
}
@@ -3174,6 +3401,10 @@ Decl *TemplateDeclInstantiator::VisitFriendTemplateDecl(FriendTemplateDecl *D) {
return nullptr;
}
+Decl *TemplateDeclInstantiator::VisitConceptDecl(ConceptDecl *D) {
+ llvm_unreachable("Concept definitions cannot reside inside a template");
+}
+
Decl *TemplateDeclInstantiator::VisitDecl(Decl *D) {
llvm_unreachable("Unexpected decl");
}
@@ -3506,7 +3737,7 @@ TemplateDeclInstantiator::SubstFunctionType(FunctionDecl *D,
Qualifiers ThisTypeQuals;
if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D)) {
ThisContext = cast<CXXRecordDecl>(Owner);
- ThisTypeQuals = Method->getTypeQualifiers();
+ ThisTypeQuals = Method->getMethodQualifiers();
}
TypeSourceInfo *NewTInfo
@@ -3633,25 +3864,25 @@ static bool addInstantiatedParametersToScope(Sema &S, FunctionDecl *Function,
Scope.MakeInstantiatedLocalArgPack(PatternParam);
Optional<unsigned> NumArgumentsInExpansion
= S.getNumArgumentsInExpansion(PatternParam->getType(), TemplateArgs);
- assert(NumArgumentsInExpansion &&
- "should only be called when all template arguments are known");
- QualType PatternType =
- PatternParam->getType()->castAs<PackExpansionType>()->getPattern();
- for (unsigned Arg = 0; Arg < *NumArgumentsInExpansion; ++Arg) {
- ParmVarDecl *FunctionParam = Function->getParamDecl(FParamIdx);
- FunctionParam->setDeclName(PatternParam->getDeclName());
- if (!PatternDecl->getType()->isDependentType()) {
- Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(S, Arg);
- QualType T = S.SubstType(PatternType, TemplateArgs,
- FunctionParam->getLocation(),
- FunctionParam->getDeclName());
- if (T.isNull())
- return true;
- FunctionParam->setType(T);
- }
+ if (NumArgumentsInExpansion) {
+ QualType PatternType =
+ PatternParam->getType()->castAs<PackExpansionType>()->getPattern();
+ for (unsigned Arg = 0; Arg < *NumArgumentsInExpansion; ++Arg) {
+ ParmVarDecl *FunctionParam = Function->getParamDecl(FParamIdx);
+ FunctionParam->setDeclName(PatternParam->getDeclName());
+ if (!PatternDecl->getType()->isDependentType()) {
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(S, Arg);
+ QualType T = S.SubstType(PatternType, TemplateArgs,
+ FunctionParam->getLocation(),
+ FunctionParam->getDeclName());
+ if (T.isNull())
+ return true;
+ FunctionParam->setType(T);
+ }
- Scope.InstantiatedLocalPackArg(PatternParam, FunctionParam);
- ++FParamIdx;
+ Scope.InstantiatedLocalPackArg(PatternParam, FunctionParam);
+ ++FParamIdx;
+ }
}
}
@@ -3882,9 +4113,9 @@ void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
// Never instantiate an explicit specialization except if it is a class scope
// explicit specialization.
- TemplateSpecializationKind TSK = Function->getTemplateSpecializationKind();
- if (TSK == TSK_ExplicitSpecialization &&
- !Function->getClassScopeSpecializationPattern())
+ TemplateSpecializationKind TSK =
+ Function->getTemplateSpecializationKindForInstantiation();
+ if (TSK == TSK_ExplicitSpecialization)
return;
// Find the function body that we'll be substituting.
@@ -3939,6 +4170,14 @@ void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
return;
}
+ llvm::TimeTraceScope TimeScope("InstantiateFunction", [&]() {
+ std::string Name;
+ llvm::raw_string_ostream OS(Name);
+ Function->getNameForDiagnostic(OS, getPrintingPolicy(),
+ /*Qualified=*/true);
+ return Name;
+ });
+
// If we're performing recursive template instantiation, create our own
// queue of pending implicit instantiations that we will instantiate later,
// while we're still within our own instantiation context.
@@ -4165,7 +4404,19 @@ void Sema::BuildVariableInstantiation(
const MultiLevelTemplateArgumentList &TemplateArgs,
LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner,
LocalInstantiationScope *StartingScope,
- bool InstantiatingVarTemplate) {
+ bool InstantiatingVarTemplate,
+ VarTemplateSpecializationDecl *PrevDeclForVarTemplateSpecialization) {
+ // Instantiating a partial specialization to produce a partial
+ // specialization.
+ bool InstantiatingVarTemplatePartialSpec =
+ isa<VarTemplatePartialSpecializationDecl>(OldVar) &&
+ isa<VarTemplatePartialSpecializationDecl>(NewVar);
+ // Instantiating from a variable template (or partial specialization) to
+ // produce a variable template specialization.
+ bool InstantiatingSpecFromTemplate =
+ isa<VarTemplateSpecializationDecl>(NewVar) &&
+ (OldVar->getDescribedVarTemplate() ||
+ isa<VarTemplatePartialSpecializationDecl>(OldVar));
// If we are instantiating a local extern declaration, the
// instantiation belongs lexically to the containing function.
@@ -4211,8 +4462,11 @@ void Sema::BuildVariableInstantiation(
NewVar->getLocation(), OldVar->getPreviousDecl(), TemplateArgs))
Previous.addDecl(NewPrev);
} else if (!isa<VarTemplateSpecializationDecl>(NewVar) &&
- OldVar->hasLinkage())
+ OldVar->hasLinkage()) {
LookupQualifiedName(Previous, NewVar->getDeclContext(), false);
+ } else if (PrevDeclForVarTemplateSpecialization) {
+ Previous.addDecl(PrevDeclForVarTemplateSpecialization);
+ }
CheckVariableDeclaration(NewVar, Previous);
if (!InstantiatingVarTemplate) {
@@ -4228,23 +4482,44 @@ void Sema::BuildVariableInstantiation(
// Link instantiations of static data members back to the template from
// which they were instantiated.
- if (NewVar->isStaticDataMember() && !InstantiatingVarTemplate)
+ //
+ // Don't do this when instantiating a template (we link the template itself
+ // back in that case) nor when instantiating a static data member template
+ // (that's not a member specialization).
+ if (NewVar->isStaticDataMember() && !InstantiatingVarTemplate &&
+ !InstantiatingSpecFromTemplate)
NewVar->setInstantiationOfStaticDataMember(OldVar,
TSK_ImplicitInstantiation);
+ // If the pattern is an (in-class) explicit specialization, then the result
+ // is also an explicit specialization.
+ if (VarTemplateSpecializationDecl *OldVTSD =
+ dyn_cast<VarTemplateSpecializationDecl>(OldVar)) {
+ if (OldVTSD->getSpecializationKind() == TSK_ExplicitSpecialization &&
+ !isa<VarTemplatePartialSpecializationDecl>(OldVTSD))
+ cast<VarTemplateSpecializationDecl>(NewVar)->setSpecializationKind(
+ TSK_ExplicitSpecialization);
+ }
+
// Forward the mangling number from the template to the instantiated decl.
Context.setManglingNumber(NewVar, Context.getManglingNumber(OldVar));
Context.setStaticLocalNumber(NewVar, Context.getStaticLocalNumber(OldVar));
- // Delay instantiation of the initializer for variable templates or inline
- // static data members until a definition of the variable is needed. We need
- // it right away if the type contains 'auto'.
- if ((!isa<VarTemplateSpecializationDecl>(NewVar) &&
- !InstantiatingVarTemplate &&
- !(OldVar->isInline() && OldVar->isThisDeclarationADefinition() &&
- !NewVar->isThisDeclarationADefinition())) ||
- NewVar->getType()->isUndeducedType())
+ // Figure out whether to eagerly instantiate the initializer.
+ if (InstantiatingVarTemplate || InstantiatingVarTemplatePartialSpec) {
+ // We're producing a template. Don't instantiate the initializer yet.
+ } else if (NewVar->getType()->isUndeducedType()) {
+ // We need the type to complete the declaration of the variable.
InstantiateVariableInitializer(NewVar, OldVar, TemplateArgs);
+ } else if (InstantiatingSpecFromTemplate ||
+ (OldVar->isInline() && OldVar->isThisDeclarationADefinition() &&
+ !NewVar->isThisDeclarationADefinition())) {
+ // Delay instantiation of the initializer for variable template
+ // specializations or inline static data members until a definition of the
+ // variable is needed.
+ } else {
+ InstantiateVariableInitializer(NewVar, OldVar, TemplateArgs);
+ }
// Diagnose unused local variables with dependent types, where the diagnostic
// will have been deferred.
@@ -4344,15 +4619,23 @@ void Sema::InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
if (Var->isInvalidDecl())
return;
- VarTemplateSpecializationDecl *VarSpec =
- dyn_cast<VarTemplateSpecializationDecl>(Var);
- VarDecl *PatternDecl = nullptr, *Def = nullptr;
+ // Never instantiate an explicitly-specialized entity.
+ TemplateSpecializationKind TSK =
+ Var->getTemplateSpecializationKindForInstantiation();
+ if (TSK == TSK_ExplicitSpecialization)
+ return;
+
+ // Find the pattern and the arguments to substitute into it.
+ VarDecl *PatternDecl = Var->getTemplateInstantiationPattern();
+ assert(PatternDecl && "no pattern for templated variable");
MultiLevelTemplateArgumentList TemplateArgs =
getTemplateInstantiationArgs(Var);
+ VarTemplateSpecializationDecl *VarSpec =
+ dyn_cast<VarTemplateSpecializationDecl>(Var);
if (VarSpec) {
// If this is a variable template specialization, make sure that it is
- // non-dependent, then find its instantiation pattern.
+ // non-dependent.
bool InstantiationDependent = false;
assert(!TemplateSpecializationType::anyDependentTemplateArguments(
VarSpec->getTemplateArgsInfo(), InstantiationDependent) &&
@@ -4360,37 +4643,6 @@ void Sema::InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
"not type-dependent");
(void)InstantiationDependent;
- // Find the variable initialization that we'll be substituting. If the
- // pattern was instantiated from a member template, look back further to
- // find the real pattern.
- assert(VarSpec->getSpecializedTemplate() &&
- "Specialization without specialized template?");
- llvm::PointerUnion<VarTemplateDecl *,
- VarTemplatePartialSpecializationDecl *> PatternPtr =
- VarSpec->getSpecializedTemplateOrPartial();
- if (PatternPtr.is<VarTemplatePartialSpecializationDecl *>()) {
- VarTemplatePartialSpecializationDecl *Tmpl =
- PatternPtr.get<VarTemplatePartialSpecializationDecl *>();
- while (VarTemplatePartialSpecializationDecl *From =
- Tmpl->getInstantiatedFromMember()) {
- if (Tmpl->isMemberSpecialization())
- break;
-
- Tmpl = From;
- }
- PatternDecl = Tmpl;
- } else {
- VarTemplateDecl *Tmpl = PatternPtr.get<VarTemplateDecl *>();
- while (VarTemplateDecl *From =
- Tmpl->getInstantiatedFromMemberTemplate()) {
- if (Tmpl->isMemberSpecialization())
- break;
-
- Tmpl = From;
- }
- PatternDecl = Tmpl->getTemplatedDecl();
- }
-
// If this is a static data member template, there might be an
// uninstantiated initializer on the declaration. If so, instantiate
// it now.
@@ -4434,20 +4686,12 @@ void Sema::InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
Local.Exit();
GlobalInstantiations.perform();
}
-
- // Find actual definition
- Def = PatternDecl->getDefinition(getASTContext());
} else {
- // If this is a static data member, find its out-of-line definition.
- assert(Var->isStaticDataMember() && "not a static data member?");
- PatternDecl = Var->getInstantiatedFromStaticDataMember();
-
- assert(PatternDecl && "data member was not instantiated from a template?");
- assert(PatternDecl->isStaticDataMember() && "not a static data member?");
- Def = PatternDecl->getDefinition();
+ assert(Var->isStaticDataMember() && PatternDecl->isStaticDataMember() &&
+ "not a static data member?");
}
- TemplateSpecializationKind TSK = Var->getTemplateSpecializationKind();
+ VarDecl *Def = PatternDecl->getDefinition(getASTContext());
// If we don't have a definition of the variable template, we won't perform
// any instantiation. Rather, we rely on the user to instantiate this
@@ -4469,7 +4713,6 @@ void Sema::InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
}
return;
}
-
}
// FIXME: We need to track the instantiation stack in order to know which
@@ -4481,18 +4724,17 @@ void Sema::InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
/*Complain*/DefinitionRequired))
return;
-
- // Never instantiate an explicit specialization.
- if (TSK == TSK_ExplicitSpecialization)
- return;
-
// C++11 [temp.explicit]p10:
// Except for inline functions, const variables of literal types, variables
// of reference types, [...] explicit instantiation declarations
// have the effect of suppressing the implicit instantiation of the entity
// to which they refer.
+ //
+ // FIXME: That's not exactly the same as "might be usable in constant
+ // expressions", which only allows constexpr variables and const integral
+ // types, not arbitrary const literal types.
if (TSK == TSK_ExplicitInstantiationDeclaration &&
- !Var->isUsableInConstantExpressions(getASTContext()))
+ !Var->mightBeUsableInConstantExpressions(getASTContext()))
return;
// Make sure to pass the instantiated variable to the consumer at the end.
@@ -5006,7 +5248,8 @@ NamedDecl *Sema::FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
if (isa<ParmVarDecl>(D) || isa<NonTypeTemplateParmDecl>(D) ||
isa<TemplateTypeParmDecl>(D) || isa<TemplateTemplateParmDecl>(D) ||
((ParentDC->isFunctionOrMethod() ||
- isa<OMPDeclareReductionDecl>(ParentDC)) &&
+ isa<OMPDeclareReductionDecl>(ParentDC) ||
+ isa<OMPDeclareMapperDecl>(ParentDC)) &&
ParentDC->isDependentContext()) ||
(isa<CXXRecordDecl>(D) && cast<CXXRecordDecl>(D)->isLambda())) {
// D is a local of some kind. Look into the map of local
@@ -5320,7 +5563,8 @@ void Sema::PerformPendingInstantiations(bool LocalOnly) {
// Check if the most recent declaration has changed the specialization kind
// and removed the need for implicit instantiation.
- switch (Var->getMostRecentDecl()->getTemplateSpecializationKind()) {
+ switch (Var->getMostRecentDecl()
+ ->getTemplateSpecializationKindForInstantiation()) {
case TSK_Undeclared:
llvm_unreachable("Cannot instantitiate an undeclared specialization.");
case TSK_ExplicitInstantiationDeclaration:
diff --git a/lib/Sema/SemaTemplateVariadic.cpp b/lib/Sema/SemaTemplateVariadic.cpp
index 0e7fc20d2487..d97626551a41 100644
--- a/lib/Sema/SemaTemplateVariadic.cpp
+++ b/lib/Sema/SemaTemplateVariadic.cpp
@@ -1,9 +1,8 @@
//===------- SemaTemplateVariadic.cpp - C++ Variadic Templates ------------===/
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//===----------------------------------------------------------------------===/
//
// This file implements semantic analysis for C++0x variadic templates.
@@ -40,11 +39,11 @@ namespace {
unsigned DepthLimit = (unsigned)-1;
void addUnexpanded(NamedDecl *ND, SourceLocation Loc = SourceLocation()) {
- if (auto *PVD = dyn_cast<ParmVarDecl>(ND)) {
+ if (auto *VD = dyn_cast<VarDecl>(ND)) {
// For now, the only problematic case is a generic lambda's templated
// call operator, so we don't need to look for all the other ways we
// could have reached a dependent parameter pack.
- auto *FD = dyn_cast<FunctionDecl>(PVD->getDeclContext());
+ auto *FD = dyn_cast<FunctionDecl>(VD->getDeclContext());
auto *FTD = FD ? FD->getDescribedFunctionTemplate() : nullptr;
if (FTD && FTD->getTemplateParameters()->getDepth() >= DepthLimit)
return;
@@ -314,11 +313,11 @@ Sema::DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
if (auto *LSI = dyn_cast<sema::LambdaScopeInfo>(Func)) {
if (N == FunctionScopes.size()) {
- for (auto &Param : Unexpanded) {
- auto *PD = dyn_cast_or_null<ParmVarDecl>(
- Param.first.dyn_cast<NamedDecl *>());
- if (PD && PD->getDeclContext() == LSI->CallOperator)
- LambdaParamPackReferences.push_back(Param);
+ for (auto &Pack : Unexpanded) {
+ auto *VD = dyn_cast_or_null<VarDecl>(
+ Pack.first.dyn_cast<NamedDecl *>());
+ if (VD && VD->getDeclContext() == LSI->CallOperator)
+ LambdaParamPackReferences.push_back(Pack);
}
}
@@ -587,11 +586,15 @@ Sema::CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc,
QualType Sema::CheckPackExpansion(QualType Pattern, SourceRange PatternRange,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions) {
- // C++0x [temp.variadic]p5:
+ // C++11 [temp.variadic]p5:
// The pattern of a pack expansion shall name one or more
// parameter packs that are not expanded by a nested pack
// expansion.
- if (!Pattern->containsUnexpandedParameterPack()) {
+ //
+ // A pattern containing a deduced type can't occur "naturally" but arises in
+ // the desugaring of an init-capture pack.
+ if (!Pattern->containsUnexpandedParameterPack() &&
+ !Pattern->getContainedDeducedType()) {
Diag(EllipsisLoc, diag::err_pack_expansion_without_parameter_packs)
<< PatternRange;
return QualType();
@@ -616,6 +619,7 @@ ExprResult Sema::CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
if (!Pattern->containsUnexpandedParameterPack()) {
Diag(EllipsisLoc, diag::err_pack_expansion_without_parameter_packs)
<< Pattern->getSourceRange();
+ CorrectDelayedTyposInExpr(Pattern);
return ExprError();
}
@@ -642,7 +646,7 @@ bool Sema::CheckParameterPacksForExpansion(
// Compute the depth and index for this parameter pack.
unsigned Depth = 0, Index = 0;
IdentifierInfo *Name;
- bool IsFunctionParameterPack = false;
+ bool IsVarDeclPack = false;
if (const TemplateTypeParmType *TTP
= i->first.dyn_cast<const TemplateTypeParmType *>()) {
@@ -651,8 +655,8 @@ bool Sema::CheckParameterPacksForExpansion(
Name = TTP->getIdentifier();
} else {
NamedDecl *ND = i->first.get<NamedDecl *>();
- if (isa<ParmVarDecl>(ND))
- IsFunctionParameterPack = true;
+ if (isa<VarDecl>(ND))
+ IsVarDeclPack = true;
else
std::tie(Depth, Index) = getDepthAndIndex(ND);
@@ -661,7 +665,7 @@ bool Sema::CheckParameterPacksForExpansion(
// Determine the size of this argument pack.
unsigned NewPackSize;
- if (IsFunctionParameterPack) {
+ if (IsVarDeclPack) {
// Figure out whether we're instantiating to an argument pack or not.
typedef LocalInstantiationScope::DeclArgumentPack DeclArgumentPack;
@@ -695,7 +699,7 @@ bool Sema::CheckParameterPacksForExpansion(
// Template argument deduction can extend the sequence of template
// arguments corresponding to a template parameter pack, even when the
// sequence contains explicitly specified template arguments.
- if (!IsFunctionParameterPack && CurrentInstantiationScope) {
+ if (!IsVarDeclPack && CurrentInstantiationScope) {
if (NamedDecl *PartialPack
= CurrentInstantiationScope->getPartiallySubstitutedPack()){
unsigned PartialDepth, PartialIndex;
@@ -779,8 +783,8 @@ Optional<unsigned> Sema::getNumArgumentsInExpansion(QualType T,
Index = TTP->getIndex();
} else {
NamedDecl *ND = Unexpanded[I].first.get<NamedDecl *>();
- if (isa<ParmVarDecl>(ND)) {
- // Function parameter pack.
+ if (isa<VarDecl>(ND)) {
+ // Function parameter pack or init-capture pack.
typedef LocalInstantiationScope::DeclArgumentPack DeclArgumentPack;
llvm::PointerUnion<Decl *, DeclArgumentPack *> *Instantiation
@@ -925,12 +929,16 @@ bool Sema::containsUnexpandedParameterPacks(Declarator &D) {
namespace {
// Callback to only accept typo corrections that refer to parameter packs.
-class ParameterPackValidatorCCC : public CorrectionCandidateCallback {
+class ParameterPackValidatorCCC final : public CorrectionCandidateCallback {
public:
bool ValidateCandidate(const TypoCorrection &candidate) override {
NamedDecl *ND = candidate.getCorrectionDecl();
return ND && ND->isParameterPack();
}
+
+ std::unique_ptr<CorrectionCandidateCallback> clone() override {
+ return llvm::make_unique<ParameterPackValidatorCCC>(*this);
+ }
};
}
@@ -966,18 +974,18 @@ ExprResult Sema::ActOnSizeofParameterPackExpr(Scope *S,
break;
case LookupResult::NotFound:
- case LookupResult::NotFoundInCurrentInstantiation:
+ case LookupResult::NotFoundInCurrentInstantiation: {
+ ParameterPackValidatorCCC CCC{};
if (TypoCorrection Corrected =
CorrectTypo(R.getLookupNameInfo(), R.getLookupKind(), S, nullptr,
- llvm::make_unique<ParameterPackValidatorCCC>(),
- CTK_ErrorRecovery)) {
+ CCC, CTK_ErrorRecovery)) {
diagnoseTypo(Corrected,
PDiag(diag::err_sizeof_pack_no_pack_name_suggest) << &Name,
PDiag(diag::note_parameter_pack_here));
ParameterPack = Corrected.getCorrectionDecl();
}
break;
-
+ }
case LookupResult::FoundOverloaded:
case LookupResult::FoundUnresolvedValue:
break;
@@ -1081,7 +1089,7 @@ Optional<unsigned> Sema::getFullyPackExpandedSize(TemplateArgument Arg) {
dyn_cast<SubstNonTypeTemplateParmPackExpr>(Arg.getAsExpr()))
Pack = Subst->getArgumentPack();
else if (auto *Subst = dyn_cast<FunctionParmPackExpr>(Arg.getAsExpr())) {
- for (ParmVarDecl *PD : *Subst)
+ for (VarDecl *PD : *Subst)
if (PD->isParameterPack())
return None;
return Subst->getNumExpansions();
@@ -1173,15 +1181,18 @@ ExprResult Sema::ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
}
BinaryOperatorKind Opc = ConvertTokenKindToBinaryOpcode(Operator);
- return BuildCXXFoldExpr(LParenLoc, LHS, Opc, EllipsisLoc, RHS, RParenLoc);
+ return BuildCXXFoldExpr(LParenLoc, LHS, Opc, EllipsisLoc, RHS, RParenLoc,
+ None);
}
ExprResult Sema::BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
BinaryOperatorKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
- SourceLocation RParenLoc) {
+ SourceLocation RParenLoc,
+ Optional<unsigned> NumExpansions) {
return new (Context) CXXFoldExpr(Context.DependentTy, LParenLoc, LHS,
- Operator, EllipsisLoc, RHS, RParenLoc);
+ Operator, EllipsisLoc, RHS, RParenLoc,
+ NumExpansions);
}
ExprResult Sema::BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc,
diff --git a/lib/Sema/SemaType.cpp b/lib/Sema/SemaType.cpp
index b4c075e9c46d..29acf6177eb9 100644
--- a/lib/Sema/SemaType.cpp
+++ b/lib/Sema/SemaType.cpp
@@ -1,9 +1,8 @@
//===--- SemaType.cpp - Semantic Analysis for Types -----------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -183,6 +182,10 @@ namespace {
SmallVector<TypeAttrPair, 8> AttrsForTypes;
bool AttrsForTypesSorted = true;
+ /// MacroQualifiedTypes mapping to macro expansion locations that will be
+ /// stored in a MacroQualifiedTypeLoc.
+ llvm::DenseMap<const MacroQualifiedType *, SourceLocation> LocsForMacros;
+
/// Flag to indicate we parsed a noderef attribute. This is used for
/// validating that noderef was used on a pointer or array.
bool parsedNoDeref;
@@ -256,13 +259,27 @@ namespace {
return T;
}
+ /// Completely replace the \c auto in \p TypeWithAuto by
+ /// \p Replacement. Also replace \p TypeWithAuto in \c TypeAttrPair if
+ /// necessary.
+ QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement) {
+ QualType T = sema.ReplaceAutoType(TypeWithAuto, Replacement);
+ if (auto *AttrTy = TypeWithAuto->getAs<AttributedType>()) {
+ // Attributed type still should be an attributed type after replacement.
+ auto *NewAttrTy = cast<AttributedType>(T.getTypePtr());
+ for (TypeAttrPair &A : AttrsForTypes) {
+ if (A.first == AttrTy)
+ A.first = NewAttrTy;
+ }
+ AttrsForTypesSorted = false;
+ }
+ return T;
+ }
+
/// Extract and remove the Attr* for a given attributed type.
const Attr *takeAttrForAttributedType(const AttributedType *AT) {
if (!AttrsForTypesSorted) {
- std::stable_sort(AttrsForTypes.begin(), AttrsForTypes.end(),
- [](const TypeAttrPair &A, const TypeAttrPair &B) {
- return A.first < B.first;
- });
+ llvm::stable_sort(AttrsForTypes, llvm::less_first());
AttrsForTypesSorted = true;
}
@@ -282,6 +299,19 @@ namespace {
llvm_unreachable("no Attr* for AttributedType*");
}
+ SourceLocation
+ getExpansionLocForMacroQualifiedType(const MacroQualifiedType *MQT) const {
+ auto FoundLoc = LocsForMacros.find(MQT);
+ assert(FoundLoc != LocsForMacros.end() &&
+ "Unable to find macro expansion location for MacroQualifedType");
+ return FoundLoc->second;
+ }
+
+ void setExpansionLocForMacroQualifiedType(const MacroQualifiedType *MQT,
+ SourceLocation Loc) {
+ LocsForMacros[MQT] = Loc;
+ }
+
void setParsedNoDeref(bool parsed) { parsedNoDeref = parsed; }
bool didParseNoDeref() const { return parsedNoDeref; }
@@ -518,8 +548,8 @@ static void distributeObjCPointerTypeAttrFromDeclarator(
// attribute from being applied multiple times and gives
// the source-location-filler something to work with.
state.saveDeclSpecAttrs();
- moveAttrFromListToList(attr, declarator.getAttributes(),
- declarator.getMutableDeclSpec().getAttributes());
+ declarator.getMutableDeclSpec().getAttributes().takeOneFrom(
+ declarator.getAttributes(), &attr);
return;
}
}
@@ -721,7 +751,7 @@ static void maybeSynthesizeBlockSignature(TypeProcessingState &state,
/*IsAmbiguous=*/false,
/*LParenLoc=*/NoLoc,
/*ArgInfo=*/nullptr,
- /*NumArgs=*/0,
+ /*NumParams=*/0,
/*EllipsisLoc=*/NoLoc,
/*RParenLoc=*/NoLoc,
/*RefQualifierIsLvalueRef=*/true,
@@ -1326,7 +1356,7 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
// "At least one type specifier shall be given in the declaration
// specifiers in each declaration, and in the specifier-qualifier list in
// each struct declaration and type name."
- if (S.getLangOpts().CPlusPlus) {
+ if (S.getLangOpts().CPlusPlus && !DS.isTypeSpecPipe()) {
S.Diag(DeclLoc, diag::err_missing_type_specifier)
<< DS.getSourceRange();
@@ -1334,7 +1364,9 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
// value being declared, poison it as invalid so we don't get chains of
// errors.
declarator.setInvalidType(true);
- } else if (S.getLangOpts().OpenCLVersion >= 200 && DS.isTypeSpecPipe()){
+ } else if ((S.getLangOpts().OpenCLVersion >= 200 ||
+ S.getLangOpts().OpenCLCPlusPlus) &&
+ DS.isTypeSpecPipe()) {
S.Diag(DeclLoc, diag::err_missing_actual_pipe_type)
<< DS.getSourceRange();
declarator.setInvalidType(true);
@@ -1434,7 +1466,8 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
break;
}
case DeclSpec::TST_int128:
- if (!S.Context.getTargetInfo().hasInt128Type())
+ if (!S.Context.getTargetInfo().hasInt128Type() &&
+ !(S.getLangOpts().OpenMP && S.getLangOpts().OpenMPIsDevice))
S.Diag(DS.getTypeSpecTypeLoc(), diag::err_type_unsupported)
<< "__int128";
if (DS.getTypeSpecSign() == DeclSpec::TSS_unsigned)
@@ -1442,7 +1475,16 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
else
Result = Context.Int128Ty;
break;
- case DeclSpec::TST_float16: Result = Context.Float16Ty; break;
+ case DeclSpec::TST_float16:
+ // CUDA host and device may have different _Float16 support, therefore
+ // do not diagnose _Float16 usage to avoid false alarm.
+ // ToDo: more precise diagnostics for CUDA.
+ if (!S.Context.getTargetInfo().hasFloat16Type() && !S.getLangOpts().CUDA &&
+ !(S.getLangOpts().OpenMP && S.getLangOpts().OpenMPIsDevice))
+ S.Diag(DS.getTypeSpecTypeLoc(), diag::err_type_unsupported)
+ << "_Float16";
+ Result = Context.Float16Ty;
+ break;
case DeclSpec::TST_half: Result = Context.HalfTy; break;
case DeclSpec::TST_float: Result = Context.FloatTy; break;
case DeclSpec::TST_double:
@@ -1452,7 +1494,8 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
Result = Context.DoubleTy;
break;
case DeclSpec::TST_float128:
- if (!S.Context.getTargetInfo().hasFloat128Type())
+ if (!S.Context.getTargetInfo().hasFloat128Type() &&
+ !(S.getLangOpts().OpenMP && S.getLangOpts().OpenMPIsDevice))
S.Diag(DS.getTypeSpecTypeLoc(), diag::err_type_unsupported)
<< "__float128";
Result = Context.Float128Ty;
@@ -1868,7 +1911,7 @@ static QualType inferARCLifetimeForPointee(Sema &S, QualType type,
}
static std::string getFunctionQualifiersAsString(const FunctionProtoType *FnTy){
- std::string Quals = FnTy->getTypeQuals().getAsString();
+ std::string Quals = FnTy->getMethodQuals().getAsString();
switch (FnTy->getRefQualifier()) {
case RQ_None:
@@ -1910,7 +1953,7 @@ static bool checkQualifiedFunction(Sema &S, QualType T, SourceLocation Loc,
QualifiedFunctionKind QFK) {
// Does T refer to a function type with a cv-qualifier or a ref-qualifier?
const FunctionProtoType *FPT = T->getAs<FunctionProtoType>();
- if (!FPT || (FPT->getTypeQuals().empty() && FPT->getRefQualifier() == RQ_None))
+ if (!FPT || (FPT->getMethodQuals().empty() && FPT->getRefQualifier() == RQ_None))
return false;
S.Diag(Loc, diag::err_compound_qualified_function_type)
@@ -2243,15 +2286,13 @@ QualType Sema::BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
}
if (T->isVariableArrayType() && !Context.getTargetInfo().isVLASupported()) {
- if (getLangOpts().CUDA) {
- // CUDA device code doesn't support VLAs.
- CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget();
- } else if (!getLangOpts().OpenMP ||
- shouldDiagnoseTargetSupportFromOpenMP()) {
- // Some targets don't support VLAs.
- Diag(Loc, diag::err_vla_unsupported);
- return QualType();
- }
+ // CUDA device code and some other targets don't support VLAs.
+ targetDiag(Loc, (getLangOpts().CUDA && getLangOpts().CUDAIsDevice)
+ ? diag::err_cuda_vla
+ : diag::err_vla_unsupported)
+ << ((getLangOpts().CUDA && getLangOpts().CUDAIsDevice)
+ ? CurrentCUDATarget()
+ : CFT_InvalidTarget);
}
// If this is not C99, extwarn about VLA's and C99 array size modifiers.
@@ -2415,6 +2456,11 @@ bool Sema::CheckFunctionReturnType(QualType T, SourceLocation Loc) {
return true;
}
+ if (T.hasNonTrivialToPrimitiveDestructCUnion() ||
+ T.hasNonTrivialToPrimitiveCopyCUnion())
+ checkNonTrivialCUnion(T, Loc, NTCUC_FunctionReturn,
+ NTCUK_Destruct|NTCUK_Copy);
+
return false;
}
@@ -2913,7 +2959,7 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
sema::LambdaScopeInfo *LSI = SemaRef.getCurLambda();
assert(LSI && "No LambdaScopeInfo on the stack!");
const unsigned TemplateParameterDepth = LSI->AutoTemplateParameterDepth;
- const unsigned AutoParameterPosition = LSI->AutoTemplateParams.size();
+ const unsigned AutoParameterPosition = LSI->TemplateParams.size();
const bool IsParameterPack = D.hasEllipsis();
// Create the TemplateTypeParmDecl here to retrieve the corresponding
@@ -2925,12 +2971,13 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
/*KeyLoc*/ SourceLocation(), /*NameLoc*/ D.getBeginLoc(),
TemplateParameterDepth, AutoParameterPosition,
/*Identifier*/ nullptr, false, IsParameterPack);
- LSI->AutoTemplateParams.push_back(CorrespondingTemplateParam);
+ CorrespondingTemplateParam->setImplicit();
+ LSI->TemplateParams.push_back(CorrespondingTemplateParam);
// Replace the 'auto' in the function parameter with this invented
// template type parameter.
// FIXME: Retain some type sugar to indicate that this was written
// as 'auto'.
- T = SemaRef.ReplaceAutoType(
+ T = state.ReplaceAutoType(
T, QualType(CorrespondingTemplateParam->getTypeForDecl(), 0));
}
break;
@@ -3916,6 +3963,25 @@ static Attr *createNullabilityAttr(ASTContext &Ctx, ParsedAttr &Attr,
llvm_unreachable("unknown NullabilityKind");
}
+// Diagnose whether this is a case with the multiple addr spaces.
+// Returns true if this is an invalid case.
+// ISO/IEC TR 18037 S5.3 (amending C99 6.7.3): "No type shall be qualified
+// by qualifiers for two or more different address spaces."
+static bool DiagnoseMultipleAddrSpaceAttributes(Sema &S, LangAS ASOld,
+ LangAS ASNew,
+ SourceLocation AttrLoc) {
+ if (ASOld != LangAS::Default) {
+ if (ASOld != ASNew) {
+ S.Diag(AttrLoc, diag::err_attribute_address_multiple_qualifiers);
+ return true;
+ }
+ // Emit a warning if they are identical; it's likely unintended.
+ S.Diag(AttrLoc,
+ diag::warn_attribute_address_multiple_identical_qualifiers);
+ }
+ return false;
+}
+
static TypeSourceInfo *
GetTypeSourceInfoForDeclarator(TypeProcessingState &State,
QualType T, TypeSourceInfo *ReturnTypeInfo);
@@ -3944,7 +4010,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// Does T refer to a function type with a cv-qualifier or a ref-qualifier?
bool IsQualifiedFunction = T->isFunctionProtoType() &&
- (!T->castAs<FunctionProtoType>()->getTypeQuals().empty() ||
+ (!T->castAs<FunctionProtoType>()->getMethodQuals().empty() ||
T->castAs<FunctionProtoType>()->getRefQualifier() != RQ_None);
// If T is 'decltype(auto)', the only declarators we can have are parens
@@ -4194,7 +4260,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
auto inferPointerNullability =
[&](SimplePointerKind pointerKind, SourceLocation pointerLoc,
SourceLocation pointerEndLoc,
- ParsedAttributesView &attrs) -> ParsedAttr * {
+ ParsedAttributesView &attrs, AttributePool &Pool) -> ParsedAttr * {
// We've seen a pointer.
if (NumPointersRemaining > 0)
--NumPointersRemaining;
@@ -4208,11 +4274,9 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
ParsedAttr::Syntax syntax = inferNullabilityCS
? ParsedAttr::AS_ContextSensitiveKeyword
: ParsedAttr::AS_Keyword;
- ParsedAttr *nullabilityAttr =
- state.getDeclarator().getAttributePool().create(
- S.getNullabilityKeyword(*inferNullability),
- SourceRange(pointerLoc), nullptr, SourceLocation(), nullptr, 0,
- syntax);
+ ParsedAttr *nullabilityAttr = Pool.create(
+ S.getNullabilityKeyword(*inferNullability), SourceRange(pointerLoc),
+ nullptr, SourceLocation(), nullptr, 0, syntax);
attrs.addAtEnd(nullabilityAttr);
@@ -4271,7 +4335,8 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
if (auto *attr = inferPointerNullability(
pointerKind, D.getDeclSpec().getTypeSpecTypeLoc(),
D.getDeclSpec().getEndLoc(),
- D.getMutableDeclSpec().getAttributes())) {
+ D.getMutableDeclSpec().getAttributes(),
+ D.getMutableDeclSpec().getAttributePool())) {
T = state.getAttributedType(
createNullabilityAttr(Context, *attr, *inferNullability), T, T);
}
@@ -4311,7 +4376,8 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// Handle pointer nullability.
inferPointerNullability(SimplePointerKind::BlockPointer, DeclType.Loc,
- DeclType.EndLoc, DeclType.getAttrs());
+ DeclType.EndLoc, DeclType.getAttrs(),
+ state.getDeclarator().getAttributePool());
T = S.BuildBlockPointerType(T, D.getIdentifierLoc(), Name);
if (DeclType.Cls.TypeQuals || LangOpts.OpenCL) {
@@ -4333,7 +4399,8 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// Handle pointer nullability
inferPointerNullability(SimplePointerKind::Pointer, DeclType.Loc,
- DeclType.EndLoc, DeclType.getAttrs());
+ DeclType.EndLoc, DeclType.getAttrs(),
+ state.getDeclarator().getAttributePool());
if (LangOpts.ObjC && T->getAs<ObjCObjectType>()) {
T = Context.getObjCObjectPointerType(T);
@@ -4454,7 +4521,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// If the function declarator has a prototype (i.e. it is not () and
// does not have a K&R-style identifier list), then the arguments are part
// of the type, otherwise the argument list is ().
- const DeclaratorChunk::FunctionTypeInfo &FTI = DeclType.Fun;
+ DeclaratorChunk::FunctionTypeInfo &FTI = DeclType.Fun;
IsQualifiedFunction =
FTI.hasMethodTypeQualifiers() || FTI.hasRefQualifier();
@@ -4558,7 +4625,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
if (FTI.isVariadic &&
!(D.getIdentifier() &&
((D.getIdentifier()->getName() == "printf" &&
- LangOpts.OpenCLVersion >= 120) ||
+ (LangOpts.OpenCLCPlusPlus || LangOpts.OpenCLVersion >= 120)) ||
D.getIdentifier()->getName().startswith("__")))) {
S.Diag(D.getIdentifierLoc(), diag::err_opencl_variadic_function);
D.setInvalidType(true);
@@ -4823,18 +4890,35 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
Exceptions,
EPI.ExceptionSpec);
- const auto &Spec = D.getCXXScopeSpec();
+ // FIXME: Set address space from attrs for C++ mode here.
// OpenCLCPlusPlus: A class member function has an address space.
- if (state.getSema().getLangOpts().OpenCLCPlusPlus &&
- ((!Spec.isEmpty() &&
- Spec.getScopeRep()->getKind() == NestedNameSpecifier::TypeSpec) ||
- state.getDeclarator().getContext() ==
- DeclaratorContext::MemberContext)) {
- LangAS CurAS = EPI.TypeQuals.getAddressSpace();
+ auto IsClassMember = [&]() {
+ return (!state.getDeclarator().getCXXScopeSpec().isEmpty() &&
+ state.getDeclarator()
+ .getCXXScopeSpec()
+ .getScopeRep()
+ ->getKind() == NestedNameSpecifier::TypeSpec) ||
+ state.getDeclarator().getContext() ==
+ DeclaratorContext::MemberContext;
+ };
+
+ if (state.getSema().getLangOpts().OpenCLCPlusPlus && IsClassMember()) {
+ LangAS ASIdx = LangAS::Default;
+ // Take address space attr if any and mark as invalid to avoid adding
+ // them later while creating QualType.
+ if (FTI.MethodQualifiers)
+ for (ParsedAttr &attr : FTI.MethodQualifiers->getAttributes()) {
+ LangAS ASIdxNew = attr.asOpenCLLangAS();
+ if (DiagnoseMultipleAddrSpaceAttributes(S, ASIdx, ASIdxNew,
+ attr.getLoc()))
+ D.setInvalidType(true);
+ else
+ ASIdx = ASIdxNew;
+ }
// If a class member function's address space is not set, set it to
// __generic.
LangAS AS =
- (CurAS == LangAS::Default ? LangAS::opencl_generic : CurAS);
+ (ASIdx == LangAS::Default ? LangAS::opencl_generic : ASIdx);
EPI.TypeQuals.addAddressSpace(AS);
}
T = Context.getFunctionType(T, ParamTys, EPI);
@@ -4848,7 +4932,8 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// Handle pointer nullability.
inferPointerNullability(SimplePointerKind::MemberPointer, DeclType.Loc,
- DeclType.EndLoc, DeclType.getAttrs());
+ DeclType.EndLoc, DeclType.getAttrs(),
+ state.getDeclarator().getAttributePool());
if (SS.isInvalid()) {
// Avoid emitting extra errors if we already errored on the scope.
@@ -4918,11 +5003,8 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
processTypeAttrs(state, T, TAL_DeclChunk, DeclType.getAttrs());
if (DeclType.Kind != DeclaratorChunk::Paren) {
- if (ExpectNoDerefChunk) {
- if (!IsNoDerefableChunk(DeclType))
- S.Diag(DeclType.Loc, diag::warn_noderef_on_non_pointer_or_array);
- ExpectNoDerefChunk = false;
- }
+ if (ExpectNoDerefChunk && !IsNoDerefableChunk(DeclType))
+ S.Diag(DeclType.Loc, diag::warn_noderef_on_non_pointer_or_array);
ExpectNoDerefChunk = state.didParseNoDeref();
}
@@ -4949,7 +5031,10 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
break;
case DeclaratorChunk::Function: {
const DeclaratorChunk::FunctionTypeInfo &FTI = DeclType.Fun;
- if (FTI.NumParams == 0 && !FTI.isVariadic)
+ // We supress the warning when there's no LParen location, as this
+ // indicates the declaration was an implicit declaration, which gets
+ // warned about separately via -Wimplicit-function-declaration.
+ if (FTI.NumParams == 0 && !FTI.isVariadic && FTI.getLParenLoc().isValid())
S.Diag(DeclType.Loc, diag::warn_strict_prototypes)
<< IsBlock
<< FixItHint::CreateInsertion(FTI.getRParenLoc(), "void");
@@ -5066,7 +5151,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// C++0x [dcl.constexpr]p9:
// A constexpr specifier used in an object declaration declares the object
// as const.
- if (D.getDeclSpec().isConstexprSpecified() && T->isObjectType()) {
+ if (D.getDeclSpec().hasConstexprSpecifier() && T->isObjectType()) {
T.addConst();
}
@@ -5310,6 +5395,11 @@ namespace {
Visit(TL.getModifiedLoc());
fillAttributedTypeLoc(TL, State);
}
+ void VisitMacroQualifiedTypeLoc(MacroQualifiedTypeLoc TL) {
+ Visit(TL.getInnerLoc());
+ TL.setExpansionLoc(
+ State.getExpansionLocForMacroQualifiedType(TL.getTypePtr()));
+ }
void VisitQualifiedTypeLoc(QualifiedTypeLoc TL) {
Visit(TL.getUnqualifiedLoc());
}
@@ -5583,6 +5673,9 @@ namespace {
assert(Chunk.Kind == DeclaratorChunk::Pipe);
TL.setKWLoc(Chunk.Loc);
}
+ void VisitMacroQualifiedTypeLoc(MacroQualifiedTypeLoc TL) {
+ TL.setExpansionLoc(Chunk.Loc);
+ }
void VisitTypeLoc(TypeLoc TL) {
llvm_unreachable("unsupported TypeLoc kind in declarator!");
@@ -5661,6 +5754,12 @@ GetTypeSourceInfoForDeclarator(TypeProcessingState &State,
CurrTL = ATL.getValueLoc().getUnqualifiedLoc();
}
+ while (MacroQualifiedTypeLoc TL = CurrTL.getAs<MacroQualifiedTypeLoc>()) {
+ TL.setExpansionLoc(
+ State.getExpansionLocForMacroQualifiedType(TL.getTypePtr()));
+ CurrTL = TL.getNextTypeLoc().getUnqualifiedLoc();
+ }
+
while (AttributedTypeLoc TL = CurrTL.getAs<AttributedTypeLoc>()) {
fillAttributedTypeLoc(TL, State);
CurrTL = TL.getNextTypeLoc().getUnqualifiedLoc();
@@ -5752,28 +5851,27 @@ ParsedType Sema::ActOnObjCInstanceType(SourceLocation Loc) {
// Type Attribute Processing
//===----------------------------------------------------------------------===//
-/// BuildAddressSpaceAttr - Builds a DependentAddressSpaceType if an expression
-/// is uninstantiated. If instantiated it will apply the appropriate address space
-/// to the type. This function allows dependent template variables to be used in
-/// conjunction with the address_space attribute
-QualType Sema::BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace,
- SourceLocation AttrLoc) {
+/// Build an AddressSpace index from a constant expression and diagnose any
+/// errors related to invalid address_spaces. Returns true on successfully
+/// building an AddressSpace index.
+static bool BuildAddressSpaceIndex(Sema &S, LangAS &ASIdx,
+ const Expr *AddrSpace,
+ SourceLocation AttrLoc) {
if (!AddrSpace->isValueDependent()) {
-
llvm::APSInt addrSpace(32);
- if (!AddrSpace->isIntegerConstantExpr(addrSpace, Context)) {
- Diag(AttrLoc, diag::err_attribute_argument_type)
+ if (!AddrSpace->isIntegerConstantExpr(addrSpace, S.Context)) {
+ S.Diag(AttrLoc, diag::err_attribute_argument_type)
<< "'address_space'" << AANT_ArgumentIntegerConstant
<< AddrSpace->getSourceRange();
- return QualType();
+ return false;
}
// Bounds checking.
if (addrSpace.isSigned()) {
if (addrSpace.isNegative()) {
- Diag(AttrLoc, diag::err_attribute_address_space_negative)
+ S.Diag(AttrLoc, diag::err_attribute_address_space_negative)
<< AddrSpace->getSourceRange();
- return QualType();
+ return false;
}
addrSpace.setIsSigned(false);
}
@@ -5782,27 +5880,31 @@ QualType Sema::BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace,
max =
Qualifiers::MaxAddressSpace - (unsigned)LangAS::FirstTargetAddressSpace;
if (addrSpace > max) {
- Diag(AttrLoc, diag::err_attribute_address_space_too_high)
+ S.Diag(AttrLoc, diag::err_attribute_address_space_too_high)
<< (unsigned)max.getZExtValue() << AddrSpace->getSourceRange();
- return QualType();
+ return false;
}
- LangAS ASIdx =
+ ASIdx =
getLangASFromTargetAS(static_cast<unsigned>(addrSpace.getZExtValue()));
+ return true;
+ }
- // If this type is already address space qualified with a different
- // address space, reject it.
- // ISO/IEC TR 18037 S5.3 (amending C99 6.7.3): "No type shall be qualified
- // by qualifiers for two or more different address spaces."
- if (T.getAddressSpace() != LangAS::Default) {
- if (T.getAddressSpace() != ASIdx) {
- Diag(AttrLoc, diag::err_attribute_address_multiple_qualifiers);
- return QualType();
- } else
- // Emit a warning if they are identical; it's likely unintended.
- Diag(AttrLoc,
- diag::warn_attribute_address_multiple_identical_qualifiers);
- }
+ // Default value for DependentAddressSpaceTypes
+ ASIdx = LangAS::Default;
+ return true;
+}
+
+/// BuildAddressSpaceAttr - Builds a DependentAddressSpaceType if an expression
+/// is uninstantiated. If instantiated it will apply the appropriate address
+/// space to the type. This function allows dependent template variables to be
+/// used in conjunction with the address_space attribute
+QualType Sema::BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace,
+ SourceLocation AttrLoc) {
+ if (!AddrSpace->isValueDependent()) {
+ if (DiagnoseMultipleAddrSpaceAttributes(*this, T.getAddressSpace(), ASIdx,
+ AttrLoc))
+ return QualType();
return Context.getAddrSpaceQualType(T, ASIdx);
}
@@ -5820,6 +5922,14 @@ QualType Sema::BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace,
return Context.getDependentAddressSpaceType(T, AddrSpace, AttrLoc);
}
+QualType Sema::BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace,
+ SourceLocation AttrLoc) {
+ LangAS ASIdx;
+ if (!BuildAddressSpaceIndex(*this, ASIdx, AddrSpace, AttrLoc))
+ return QualType();
+ return BuildAddressSpaceAttr(T, ASIdx, AddrSpace, AttrLoc);
+}
+
/// HandleAddressSpaceTypeAttribute - Process an address_space attribute on the
/// specified type. The attribute contains 1 argument, the id of the address
/// space for the type.
@@ -5856,7 +5966,8 @@ static void HandleAddressSpaceTypeAttribute(QualType &Type,
id.setIdentifier(Attr.getArgAsIdent(0)->Ident, Attr.getLoc());
ExprResult AddrSpace = S.ActOnIdExpression(
- S.getCurScope(), SS, TemplateKWLoc, id, false, false);
+ S.getCurScope(), SS, TemplateKWLoc, id, /*HasTrailingLParen=*/false,
+ /*IsAddressOfOperand=*/false);
if (AddrSpace.isInvalid())
return;
@@ -5865,49 +5976,51 @@ static void HandleAddressSpaceTypeAttribute(QualType &Type,
ASArgExpr = static_cast<Expr *>(Attr.getArgAsExpr(0));
}
- // Create the DependentAddressSpaceType or append an address space onto
- // the type.
- QualType T = S.BuildAddressSpaceAttr(Type, ASArgExpr, Attr.getLoc());
+ LangAS ASIdx;
+ if (!BuildAddressSpaceIndex(S, ASIdx, ASArgExpr, Attr.getLoc())) {
+ Attr.setInvalid();
+ return;
+ }
- if (!T.isNull()) {
- ASTContext &Ctx = S.Context;
- auto *ASAttr = ::new (Ctx) AddressSpaceAttr(
- Attr.getRange(), Ctx, Attr.getAttributeSpellingListIndex(),
- static_cast<unsigned>(T.getQualifiers().getAddressSpace()));
- Type = State.getAttributedType(ASAttr, T, T);
+ ASTContext &Ctx = S.Context;
+ auto *ASAttr = ::new (Ctx) AddressSpaceAttr(
+ Attr.getRange(), Ctx, Attr.getAttributeSpellingListIndex(),
+ static_cast<unsigned>(ASIdx));
+
+ // If the expression is not value dependent (not templated), then we can
+ // apply the address space qualifiers just to the equivalent type.
+ // Otherwise, we make an AttributedType with the modified and equivalent
+ // type the same, and wrap it in a DependentAddressSpaceType. When this
+ // dependent type is resolved, the qualifier is added to the equivalent type
+ // later.
+ QualType T;
+ if (!ASArgExpr->isValueDependent()) {
+ QualType EquivType =
+ S.BuildAddressSpaceAttr(Type, ASIdx, ASArgExpr, Attr.getLoc());
+ if (EquivType.isNull()) {
+ Attr.setInvalid();
+ return;
+ }
+ T = State.getAttributedType(ASAttr, Type, EquivType);
} else {
- Attr.setInvalid();
+ T = State.getAttributedType(ASAttr, Type, Type);
+ T = S.BuildAddressSpaceAttr(T, ASIdx, ASArgExpr, Attr.getLoc());
}
+
+ if (!T.isNull())
+ Type = T;
+ else
+ Attr.setInvalid();
} else {
// The keyword-based type attributes imply which address space to use.
- switch (Attr.getKind()) {
- case ParsedAttr::AT_OpenCLGlobalAddressSpace:
- ASIdx = LangAS::opencl_global; break;
- case ParsedAttr::AT_OpenCLLocalAddressSpace:
- ASIdx = LangAS::opencl_local; break;
- case ParsedAttr::AT_OpenCLConstantAddressSpace:
- ASIdx = LangAS::opencl_constant; break;
- case ParsedAttr::AT_OpenCLGenericAddressSpace:
- ASIdx = LangAS::opencl_generic; break;
- case ParsedAttr::AT_OpenCLPrivateAddressSpace:
- ASIdx = LangAS::opencl_private; break;
- default:
+ ASIdx = Attr.asOpenCLLangAS();
+ if (ASIdx == LangAS::Default)
llvm_unreachable("Invalid address space");
- }
- // If this type is already address space qualified with a different
- // address space, reject it.
- // ISO/IEC TR 18037 S5.3 (amending C99 6.7.3): "No type shall be qualified by
- // qualifiers for two or more different address spaces."
- if (Type.getAddressSpace() != LangAS::Default) {
- if (Type.getAddressSpace() != ASIdx) {
- S.Diag(Attr.getLoc(), diag::err_attribute_address_multiple_qualifiers);
- Attr.setInvalid();
- return;
- } else
- // Emit a warning if they are identical; it's likely unintended.
- S.Diag(Attr.getLoc(),
- diag::warn_attribute_address_multiple_identical_qualifiers);
+ if (DiagnoseMultipleAddrSpaceAttributes(S, Type.getAddressSpace(), ASIdx,
+ Attr.getLoc())) {
+ Attr.setInvalid();
+ return;
}
Type = S.Context.getAddrSpaceQualType(Type, ASIdx);
@@ -6837,6 +6950,57 @@ static bool handleFunctionTypeAttr(TypeProcessingState &state, ParsedAttr &attr,
return true;
}
+ if (attr.getKind() == ParsedAttr::AT_NoThrow) {
+ // Delay if this is not a function type.
+ if (!unwrapped.isFunctionType())
+ return false;
+
+ if (S.CheckAttrNoArgs(attr)) {
+ attr.setInvalid();
+ return true;
+ }
+
+ // Otherwise we can process right away.
+ auto *Proto = unwrapped.get()->castAs<FunctionProtoType>();
+
+ // MSVC ignores nothrow if it is in conflict with an explicit exception
+ // specification.
+ if (Proto->hasExceptionSpec()) {
+ switch (Proto->getExceptionSpecType()) {
+ case EST_None:
+ llvm_unreachable("This doesn't have an exception spec!");
+
+ case EST_DynamicNone:
+ case EST_BasicNoexcept:
+ case EST_NoexceptTrue:
+ case EST_NoThrow:
+ // Exception spec doesn't conflict with nothrow, so don't warn.
+ LLVM_FALLTHROUGH;
+ case EST_Unparsed:
+ case EST_Uninstantiated:
+ case EST_DependentNoexcept:
+ case EST_Unevaluated:
+ // We don't have enough information to properly determine if there is a
+ // conflict, so suppress the warning.
+ break;
+ case EST_Dynamic:
+ case EST_MSAny:
+ case EST_NoexceptFalse:
+ S.Diag(attr.getLoc(), diag::warn_nothrow_attribute_ignored);
+ break;
+ }
+ return true;
+ }
+
+ type = unwrapped.wrap(
+ S, S.Context
+ .getFunctionTypeWithExceptionSpec(
+ QualType{Proto, 0},
+ FunctionProtoType::ExceptionSpecInfo{EST_NoThrow})
+ ->getAs<FunctionType>());
+ return true;
+ }
+
// Delay if the type didn't work out to a function.
if (!unwrapped.isFunctionType()) return false;
@@ -6871,19 +7035,16 @@ static bool handleFunctionTypeAttr(TypeProcessingState &state, ParsedAttr &attr,
if (!supportsVariadicCall(CC)) {
const FunctionProtoType *FnP = dyn_cast<FunctionProtoType>(fn);
if (FnP && FnP->isVariadic()) {
- unsigned DiagID = diag::err_cconv_varargs;
-
// stdcall and fastcall are ignored with a warning for GCC and MS
// compatibility.
- bool IsInvalid = true;
- if (CC == CC_X86StdCall || CC == CC_X86FastCall) {
- DiagID = diag::warn_cconv_varargs;
- IsInvalid = false;
- }
+ if (CC == CC_X86StdCall || CC == CC_X86FastCall)
+ return S.Diag(attr.getLoc(), diag::warn_cconv_unsupported)
+ << FunctionType::getNameForCallConv(CC)
+ << (int)Sema::CallingConventionIgnoredReason::VariadicFunction;
- S.Diag(attr.getLoc(), DiagID) << FunctionType::getNameForCallConv(CC);
- if (IsInvalid) attr.setInvalid();
- return true;
+ attr.setInvalid();
+ return S.Diag(attr.getLoc(), diag::err_cconv_varargs)
+ << FunctionType::getNameForCallConv(CC);
}
}
@@ -6910,12 +7071,16 @@ static bool handleFunctionTypeAttr(TypeProcessingState &state, ParsedAttr &attr,
return true;
}
-bool Sema::hasExplicitCallingConv(QualType &T) {
- QualType R = T.IgnoreParens();
- while (const AttributedType *AT = dyn_cast<AttributedType>(R)) {
+bool Sema::hasExplicitCallingConv(QualType T) {
+ const AttributedType *AT;
+
+ // Stop if we'd be stripping off a typedef sugar node to reach the
+ // AttributedType.
+ while ((AT = T->getAs<AttributedType>()) &&
+ AT->getAs<TypedefType>() == T->getAs<TypedefType>()) {
if (AT->isCallingConv())
return true;
- R = AT->getModifiedType().IgnoreParens();
+ T = AT->getModifiedType();
}
return false;
}
@@ -6938,8 +7103,9 @@ void Sema::adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor,
// Issue a warning on ignored calling convention -- except of __stdcall.
// Again, this is what MS compiler does.
if (CurCC != CC_X86StdCall)
- Diag(Loc, diag::warn_cconv_structors)
- << FunctionType::getNameForCallConv(CurCC);
+ Diag(Loc, diag::warn_cconv_unsupported)
+ << FunctionType::getNameForCallConv(CurCC)
+ << (int)Sema::CallingConventionIgnoredReason::ConstructorDestructor;
// Default adjustment.
} else {
// Only adjust types with the default convention. For example, on Windows
@@ -6986,7 +7152,8 @@ static void HandleVectorSizeAttr(QualType &CurType, const ParsedAttr &Attr,
Id.setIdentifier(Attr.getArgAsIdent(0)->Ident, Attr.getLoc());
ExprResult Size = S.ActOnIdExpression(S.getCurScope(), SS, TemplateKWLoc,
- Id, false, false);
+ Id, /*HasTrailingLParen=*/false,
+ /*IsAddressOfOperand=*/false);
if (Size.isInvalid())
return;
@@ -7023,7 +7190,8 @@ static void HandleExtVectorTypeAttr(QualType &CurType, const ParsedAttr &Attr,
id.setIdentifier(Attr.getArgAsIdent(0)->Ident, Attr.getLoc());
ExprResult Size = S.ActOnIdExpression(S.getCurScope(), SS, TemplateKWLoc,
- id, false, false);
+ id, /*HasTrailingLParen=*/false,
+ /*IsAddressOfOperand=*/false);
if (Size.isInvalid())
return;
@@ -7234,8 +7402,13 @@ static void deduceOpenCLImplicitAddrSpace(TypeProcessingState &State,
// otherwise it will fail some sema check.
IsFuncReturnType || IsFuncType ||
// Do not deduce addr space for member types of struct, except the pointee
- // type of a pointer member type.
- (D.getContext() == DeclaratorContext::MemberContext && !IsPointee) ||
+ // type of a pointer member type or static data members.
+ (D.getContext() == DeclaratorContext::MemberContext &&
+ (!IsPointee &&
+ D.getDeclSpec().getStorageClassSpec() != DeclSpec::SCS_static)) ||
+ // Do not deduce addr space of non-pointee in type alias because it
+ // doesn't define any object.
+ (D.getContext() == DeclaratorContext::AliasDeclContext && !IsPointee) ||
// Do not deduce addr space for types used to define a typedef and the
// typedef itself, except the pointee type of a pointer type which is used
// to define the typedef.
@@ -7244,9 +7417,28 @@ static void deduceOpenCLImplicitAddrSpace(TypeProcessingState &State,
// Do not deduce addr space of the void type, e.g. in f(void), otherwise
// it will fail some sema check.
(T->isVoidType() && !IsPointee) ||
- // Do not deduce address spaces for dependent types because they might end
+ // Do not deduce addr spaces for dependent types because they might end
// up instantiating to a type with an explicit address space qualifier.
- T->isDependentType())
+ // Except for pointer or reference types because the addr space in
+ // template argument can only belong to a pointee.
+ (T->isDependentType() && !T->isPointerType() && !T->isReferenceType()) ||
+ // Do not deduce addr space of decltype because it will be taken from
+ // its argument.
+ T->isDecltypeType() ||
+ // OpenCL spec v2.0 s6.9.b:
+ // The sampler type cannot be used with the __local and __global address
+ // space qualifiers.
+ // OpenCL spec v2.0 s6.13.14:
+ // Samplers can also be declared as global constants in the program
+ // source using the following syntax.
+ // const sampler_t <sampler name> = <value>
+ // In codegen, file-scope sampler type variable has special handing and
+ // does not rely on address space qualifier. On the other hand, deducing
+ // address space of const sampler file-scope variable as global address
+ // space causes spurious diagnostic about __global address space
+ // qualifier, therefore do not deduce address space of file-scope sampler
+ // type variable.
+ (D.getContext() == DeclaratorContext::FileContext && T->isSamplerT()))
return;
LangAS ImpAddr = LangAS::Default;
@@ -7338,9 +7530,11 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
if (!IsTypeAttr)
continue;
}
- } else if (TAL != TAL_DeclChunk) {
+ } else if (TAL != TAL_DeclChunk &&
+ attr.getKind() != ParsedAttr::AT_AddressSpace) {
// Otherwise, only consider type processing for a C++11 attribute if
// it's actually been applied to a type.
+ // We also allow C++11 address_space attributes to pass through.
continue;
}
}
@@ -7444,7 +7638,7 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
state.getDeclarator().isPrototypeContext() &&
!hasOuterPointerLikeChunk(state.getDeclarator(), endIndex);
if (checkNullabilityTypeSpecifier(
- state,
+ state,
type,
attr,
allowOnArrayType)) {
@@ -7477,6 +7671,12 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
attr.setInvalid();
break;
+ case ParsedAttr::AT_NoThrow:
+ // Exception Specifications aren't generally supported in C mode throughout
+ // clang, so revert to attribute-based handling for C.
+ if (!state.getSema().getLangOpts().CPlusPlus)
+ break;
+ LLVM_FALLTHROUGH;
FUNCTION_TYPE_ATTRS_CASELIST:
attr.setUsedAsTypeAttr();
@@ -7490,6 +7690,20 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
distributeFunctionTypeAttr(state, attr, type);
break;
}
+
+ // Handle attributes that are defined in a macro. We do not want this to be
+ // applied to ObjC builtin attributes.
+ if (isa<AttributedType>(type) && attr.hasMacroIdentifier() &&
+ !type.getQualifiers().hasObjCLifetime() &&
+ !type.getQualifiers().hasObjCGCAttr() &&
+ attr.getKind() != ParsedAttr::AT_ObjCGC &&
+ attr.getKind() != ParsedAttr::AT_ObjCOwnership) {
+ const IdentifierInfo *MacroII = attr.getMacroIdentifier();
+ type = state.getSema().Context.getMacroQualifiedType(type, MacroII);
+ state.setExpansionLocForMacroQualifiedType(
+ cast<MacroQualifiedType>(type.getTypePtr()),
+ attr.getMacroExpansionLoc());
+ }
}
if (!state.getSema().getLangOpts().OpenCL ||
diff --git a/lib/Sema/TreeTransform.h b/lib/Sema/TreeTransform.h
index df14768cbe81..8df18b5c2784 100644
--- a/lib/Sema/TreeTransform.h
+++ b/lib/Sema/TreeTransform.h
@@ -1,9 +1,8 @@
//===------- TreeTransform.h - Semantic Tree Transformation -----*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//===----------------------------------------------------------------------===//
//
// This file implements a semantic tree transformation that takes a given
@@ -149,6 +148,11 @@ public:
/// statement node appears at most once in its containing declaration.
bool AlwaysRebuild() { return SemaRef.ArgumentPackSubstitutionIndex != -1; }
+ /// Whether the transformation is forming an expression or statement that
+ /// replaces the original. In this case, we'll reuse mangling numbers from
+ /// existing lambdas.
+ bool ReplacingOriginal() { return false; }
+
/// Returns the location of the entity being transformed, if that
/// information was not available elsewhere in the AST.
///
@@ -319,6 +323,13 @@ public:
TypeSourceInfo *TransformTypeWithDeducedTST(TypeSourceInfo *DI);
/// @}
+ /// The reason why the value of a statement is not discarded, if any.
+ enum StmtDiscardKind {
+ SDK_Discarded,
+ SDK_NotDiscarded,
+ SDK_StmtExprResult,
+ };
+
/// Transform the given statement.
///
/// By default, this routine transforms a statement by delegating to the
@@ -328,7 +339,7 @@ public:
/// other mechanism.
///
/// \returns the transformed statement.
- StmtResult TransformStmt(Stmt *S, bool DiscardedValue = false);
+ StmtResult TransformStmt(Stmt *S, StmtDiscardKind SDK = SDK_Discarded);
/// Transform the given statement.
///
@@ -444,8 +455,10 @@ public:
/// TransformDefinition. However, in some cases (e.g., lambda expressions),
/// the transformer itself has to transform the declarations. This routine
/// can be overridden by a subclass that keeps track of such mappings.
- void transformedLocalDecl(Decl *Old, Decl *New) {
- TransformedLocalDecls[Old] = New;
+ void transformedLocalDecl(Decl *Old, ArrayRef<Decl *> New) {
+ assert(New.size() == 1 &&
+ "must override transformedLocalDecl if performing pack expansion");
+ TransformedLocalDecls[Old] = New.front();
}
/// Transform the definition of the given declaration.
@@ -646,6 +659,12 @@ public:
Optional<unsigned> NumExpansions,
bool ExpectParameterPack);
+ /// Transform the body of a lambda-expression.
+ StmtResult TransformLambdaBody(LambdaExpr *E, Stmt *Body);
+ /// Alternative implementation of TransformLambdaBody that skips transforming
+ /// the body.
+ StmtResult SkipLambdaBody(LambdaExpr *E, Stmt *Body);
+
QualType TransformReferenceType(TypeLocBuilder &TLB, ReferenceTypeLoc TL);
StmtResult TransformCompoundStmt(CompoundStmt *S, bool IsStmtExpr);
@@ -673,6 +692,9 @@ public:
#define STMT(Node, Parent) \
LLVM_ATTRIBUTE_NOINLINE \
StmtResult Transform##Node(Node *S);
+#define VALUESTMT(Node, Parent) \
+ LLVM_ATTRIBUTE_NOINLINE \
+ StmtResult Transform##Node(Node *S, StmtDiscardKind SDK);
#define EXPR(Node, Parent) \
LLVM_ATTRIBUTE_NOINLINE \
ExprResult Transform##Node(Node *E);
@@ -874,6 +896,12 @@ public:
return SemaRef.Context.getTypeDeclType(Typedef);
}
+ /// Build a new MacroDefined type.
+ QualType RebuildMacroQualifiedType(QualType T,
+ const IdentifierInfo *MacroII) {
+ return SemaRef.Context.getMacroQualifiedType(T, MacroII);
+ }
+
/// Build a new class/struct/union type.
QualType RebuildRecordType(RecordDecl *Record) {
return SemaRef.Context.getTypeDeclType(Record);
@@ -1356,10 +1384,11 @@ public:
unsigned NumInputs, IdentifierInfo **Names,
MultiExprArg Constraints, MultiExprArg Exprs,
Expr *AsmString, MultiExprArg Clobbers,
+ unsigned NumLabels,
SourceLocation RParenLoc) {
return getSema().ActOnGCCAsmStmt(AsmLoc, IsSimple, IsVolatile, NumOutputs,
NumInputs, Names, Constraints, Exprs,
- AsmString, Clobbers, RParenLoc);
+ AsmString, Clobbers, NumLabels, RParenLoc);
}
/// Build a new MS style inline asm statement.
@@ -1545,6 +1574,16 @@ public:
return getSema().ActOnOpenMPSimdlenClause(Len, StartLoc, LParenLoc, EndLoc);
}
+ /// Build a new OpenMP 'allocator' clause.
+ ///
+ /// By default, performs semantic analysis to build the new OpenMP clause.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPAllocatorClause(Expr *A, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPAllocatorClause(A, StartLoc, LParenLoc, EndLoc);
+ }
+
/// Build a new OpenMP 'collapse' clause.
///
/// By default, performs semantic analysis to build the new OpenMP clause.
@@ -1795,17 +1834,30 @@ public:
///
/// By default, performs semantic analysis to build the new OpenMP clause.
/// Subclasses may override this routine to provide different behavior.
- OMPClause *
- RebuildOMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
- ArrayRef<SourceLocation> MapTypeModifiersLoc,
- OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
- SourceLocation MapLoc, SourceLocation ColonLoc,
- ArrayRef<Expr *> VarList, SourceLocation StartLoc,
- SourceLocation LParenLoc, SourceLocation EndLoc) {
+ OMPClause *RebuildOMPMapClause(
+ ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
+ ArrayRef<SourceLocation> MapTypeModifiersLoc,
+ CXXScopeSpec MapperIdScopeSpec, DeclarationNameInfo MapperId,
+ OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
+ SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList,
+ const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers) {
return getSema().ActOnOpenMPMapClause(MapTypeModifiers, MapTypeModifiersLoc,
- MapType, IsMapTypeImplicit, MapLoc,
- ColonLoc, VarList, StartLoc,
- LParenLoc, EndLoc);
+ MapperIdScopeSpec, MapperId, MapType,
+ IsMapTypeImplicit, MapLoc, ColonLoc,
+ VarList, Locs, UnresolvedMappers);
+ }
+
+ /// Build a new OpenMP 'allocate' clause.
+ ///
+ /// By default, performs semantic analysis to build the new OpenMP clause.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPAllocateClause(Expr *Allocate, ArrayRef<Expr *> VarList,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation ColonLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPAllocateClause(Allocate, VarList, StartLoc,
+ LParenLoc, ColonLoc, EndLoc);
}
/// Build a new OpenMP 'num_teams' clause.
@@ -1892,10 +1944,12 @@ public:
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
OMPClause *RebuildOMPToClause(ArrayRef<Expr *> VarList,
- SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation EndLoc) {
- return getSema().ActOnOpenMPToClause(VarList, StartLoc, LParenLoc, EndLoc);
+ CXXScopeSpec &MapperIdScopeSpec,
+ DeclarationNameInfo &MapperId,
+ const OMPVarListLocTy &Locs,
+ ArrayRef<Expr *> UnresolvedMappers) {
+ return getSema().ActOnOpenMPToClause(VarList, MapperIdScopeSpec, MapperId,
+ Locs, UnresolvedMappers);
}
/// Build a new OpenMP 'from' clause.
@@ -1903,11 +1957,12 @@ public:
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
OMPClause *RebuildOMPFromClause(ArrayRef<Expr *> VarList,
- SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation EndLoc) {
- return getSema().ActOnOpenMPFromClause(VarList, StartLoc, LParenLoc,
- EndLoc);
+ CXXScopeSpec &MapperIdScopeSpec,
+ DeclarationNameInfo &MapperId,
+ const OMPVarListLocTy &Locs,
+ ArrayRef<Expr *> UnresolvedMappers) {
+ return getSema().ActOnOpenMPFromClause(VarList, MapperIdScopeSpec, MapperId,
+ Locs, UnresolvedMappers);
}
/// Build a new OpenMP 'use_device_ptr' clause.
@@ -1915,11 +1970,8 @@ public:
/// By default, performs semantic analysis to build the new OpenMP clause.
/// Subclasses may override this routine to provide different behavior.
OMPClause *RebuildOMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
- SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation EndLoc) {
- return getSema().ActOnOpenMPUseDevicePtrClause(VarList, StartLoc, LParenLoc,
- EndLoc);
+ const OMPVarListLocTy &Locs) {
+ return getSema().ActOnOpenMPUseDevicePtrClause(VarList, Locs);
}
/// Build a new OpenMP 'is_device_ptr' clause.
@@ -1927,11 +1979,8 @@ public:
/// By default, performs semantic analysis to build the new OpenMP clause.
/// Subclasses may override this routine to provide different behavior.
OMPClause *RebuildOMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
- SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation EndLoc) {
- return getSema().ActOnOpenMPIsDevicePtrClause(VarList, StartLoc, LParenLoc,
- EndLoc);
+ const OMPVarListLocTy &Locs) {
+ return getSema().ActOnOpenMPIsDevicePtrClause(VarList, Locs);
}
/// Rebuild the operand to an Objective-C \@synchronized statement.
@@ -2232,8 +2281,8 @@ public:
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig = nullptr) {
- return getSema().ActOnCallExpr(/*Scope=*/nullptr, Callee, LParenLoc,
- Args, RParenLoc, ExecConfig);
+ return getSema().BuildCallExpr(/*Scope=*/nullptr, Callee, LParenLoc, Args,
+ RParenLoc, ExecConfig);
}
/// Build a new member access expression.
@@ -2603,6 +2652,16 @@ public:
ListInitialization);
}
+ /// Build a new C++ __builtin_bit_cast expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildBuiltinBitCastExpr(SourceLocation KWLoc,
+ TypeSourceInfo *TSI, Expr *Sub,
+ SourceLocation RParenLoc) {
+ return getSema().BuildBuiltinBitCastExpr(KWLoc, TSI, Sub, RParenLoc);
+ }
+
/// Build a new C++ typeid(type) expression.
///
/// By default, performs semantic analysis to build the new expression.
@@ -2660,8 +2719,7 @@ public:
ExprResult RebuildCXXThisExpr(SourceLocation ThisLoc,
QualType ThisType,
bool isImplicit) {
- getSema().CheckCXXThisCapture(ThisLoc);
- return new (getSema().Context) CXXThisExpr(ThisLoc, ThisType, isImplicit);
+ return getSema().BuildCXXThisExpr(ThisLoc, ThisType, isImplicit);
}
/// Build a new C++ throw expression.
@@ -2678,9 +2736,9 @@ public:
/// By default, builds a new default-argument expression, which does not
/// require any semantic analysis. Subclasses may override this routine to
/// provide different behavior.
- ExprResult RebuildCXXDefaultArgExpr(SourceLocation Loc,
- ParmVarDecl *Param) {
- return CXXDefaultArgExpr::Create(getSema().Context, Loc, Param);
+ ExprResult RebuildCXXDefaultArgExpr(SourceLocation Loc, ParmVarDecl *Param) {
+ return CXXDefaultArgExpr::Create(getSema().Context, Loc, Param,
+ getSema().CurContext);
}
/// Build a new C++11 default-initialization expression.
@@ -2690,7 +2748,8 @@ public:
/// routine to provide different behavior.
ExprResult RebuildCXXDefaultInitExpr(SourceLocation Loc,
FieldDecl *Field) {
- return CXXDefaultInitExpr::Create(getSema().Context, Loc, Field);
+ return CXXDefaultInitExpr::Create(getSema().Context, Loc, Field,
+ getSema().CurContext);
}
/// Build a new C++ zero-initialization expression.
@@ -2716,7 +2775,7 @@ public:
SourceRange TypeIdParens,
QualType AllocatedType,
TypeSourceInfo *AllocatedTypeInfo,
- Expr *ArraySize,
+ Optional<Expr *> ArraySize,
SourceRange DirectInitRange,
Expr *Initializer) {
return getSema().BuildCXXNew(StartLoc, UseGlobal,
@@ -2944,6 +3003,18 @@ public:
RParenLoc, Length, PartialArgs);
}
+ /// Build a new expression representing a call to a source location
+ /// builtin.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildSourceLocExpr(SourceLocExpr::IdentKind Kind,
+ SourceLocation BuiltinLoc,
+ SourceLocation RPLoc,
+ DeclContext *ParentContext) {
+ return getSema().BuildSourceLocExpr(Kind, BuiltinLoc, RPLoc, ParentContext);
+ }
+
/// Build a new Objective-C boxed expression.
///
/// By default, performs semantic analysis to build the new expression.
@@ -3219,9 +3290,10 @@ public:
ExprResult RebuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
BinaryOperatorKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
- SourceLocation RParenLoc) {
+ SourceLocation RParenLoc,
+ Optional<unsigned> NumExpansions) {
return getSema().BuildCXXFoldExpr(LParenLoc, LHS, Operator, EllipsisLoc,
- RHS, RParenLoc);
+ RHS, RParenLoc, NumExpansions);
}
/// Build an empty C++1z fold-expression with the given operator.
@@ -3270,7 +3342,7 @@ private:
};
template <typename Derived>
-StmtResult TreeTransform<Derived>::TransformStmt(Stmt *S, bool DiscardedValue) {
+StmtResult TreeTransform<Derived>::TransformStmt(Stmt *S, StmtDiscardKind SDK) {
if (!S)
return S;
@@ -3278,8 +3350,12 @@ StmtResult TreeTransform<Derived>::TransformStmt(Stmt *S, bool DiscardedValue) {
case Stmt::NoStmtClass: break;
// Transform individual statement nodes
+ // Pass SDK into statements that can produce a value
#define STMT(Node, Parent) \
case Stmt::Node##Class: return getDerived().Transform##Node(cast<Node>(S));
+#define VALUESTMT(Node, Parent) \
+ case Stmt::Node##Class: \
+ return getDerived().Transform##Node(cast<Node>(S), SDK);
#define ABSTRACT_STMT(Node)
#define EXPR(Node, Parent)
#include "clang/AST/StmtNodes.inc"
@@ -3291,10 +3367,10 @@ StmtResult TreeTransform<Derived>::TransformStmt(Stmt *S, bool DiscardedValue) {
#include "clang/AST/StmtNodes.inc"
{
ExprResult E = getDerived().TransformExpr(cast<Expr>(S));
- if (E.isInvalid())
- return StmtError();
- return getSema().ActOnExprStmt(E, DiscardedValue);
+ if (SDK == SDK_StmtExprResult)
+ E = getSema().ActOnStmtExprResult(E);
+ return getSema().ActOnExprStmt(E, SDK == SDK_Discarded);
}
}
@@ -3879,10 +3955,6 @@ template<typename Derived>
bool TreeTransform<Derived>::TransformTemplateArgument(
const TemplateArgumentLoc &Input,
TemplateArgumentLoc &Output, bool Uneval) {
- EnterExpressionEvaluationContext EEEC(
- SemaRef, Sema::ExpressionEvaluationContext::ConstantEvaluated,
- /*LambdaContextDecl=*/nullptr, /*ExprContext=*/
- Sema::ExpressionEvaluationContextRecord::EK_TemplateArgument);
const TemplateArgument &Arg = Input.getArgument();
switch (Arg.getKind()) {
case TemplateArgument::Null:
@@ -3931,9 +4003,11 @@ bool TreeTransform<Derived>::TransformTemplateArgument(
case TemplateArgument::Expression: {
// Template argument expressions are constant expressions.
EnterExpressionEvaluationContext Unevaluated(
- getSema(), Uneval
- ? Sema::ExpressionEvaluationContext::Unevaluated
- : Sema::ExpressionEvaluationContext::ConstantEvaluated);
+ getSema(),
+ Uneval ? Sema::ExpressionEvaluationContext::Unevaluated
+ : Sema::ExpressionEvaluationContext::ConstantEvaluated,
+ /*LambdaContextDecl=*/nullptr, /*ExprContext=*/
+ Sema::ExpressionEvaluationContextRecord::EK_TemplateArgument);
Expr *InputExpr = Input.getSourceExpression();
if (!InputExpr) InputExpr = Input.getArgument().getAsExpr();
@@ -4462,6 +4536,14 @@ QualType TreeTransform<Derived>::TransformDecayedType(TypeLocBuilder &TLB,
return Result;
}
+/// Helper to deduce addr space of a pointee type in OpenCL mode.
+/// If the type is updated it will be overwritten in PointeeType param.
+static void deduceOpenCLPointeeAddrSpace(Sema &SemaRef, QualType &PointeeType) {
+ if (PointeeType.getAddressSpace() == LangAS::Default)
+ PointeeType = SemaRef.Context.getAddrSpaceQualType(PointeeType,
+ LangAS::opencl_generic);
+}
+
template<typename Derived>
QualType TreeTransform<Derived>::TransformPointerType(TypeLocBuilder &TLB,
PointerTypeLoc TL) {
@@ -4470,6 +4552,9 @@ QualType TreeTransform<Derived>::TransformPointerType(TypeLocBuilder &TLB,
if (PointeeType.isNull())
return QualType();
+ if (SemaRef.getLangOpts().OpenCL)
+ deduceOpenCLPointeeAddrSpace(SemaRef, PointeeType);
+
QualType Result = TL.getType();
if (PointeeType->getAs<ObjCObjectType>()) {
// A dependent pointer type 'T *' has is being transformed such
@@ -4508,6 +4593,9 @@ TreeTransform<Derived>::TransformBlockPointerType(TypeLocBuilder &TLB,
if (PointeeType.isNull())
return QualType();
+ if (SemaRef.getLangOpts().OpenCL)
+ deduceOpenCLPointeeAddrSpace(SemaRef, PointeeType);
+
QualType Result = TL.getType();
if (getDerived().AlwaysRebuild() ||
PointeeType != TL.getPointeeLoc().getType()) {
@@ -4537,6 +4625,9 @@ TreeTransform<Derived>::TransformReferenceType(TypeLocBuilder &TLB,
if (PointeeType.isNull())
return QualType();
+ if (SemaRef.getLangOpts().OpenCL)
+ deduceOpenCLPointeeAddrSpace(SemaRef, PointeeType);
+
QualType Result = TL.getType();
if (getDerived().AlwaysRebuild() ||
PointeeType != T->getPointeeTypeAsWritten()) {
@@ -5301,13 +5392,6 @@ QualType TreeTransform<Derived>::TransformFunctionProtoType(
if (ResultType.isNull())
return QualType();
- // Return type can not be qualified with an address space.
- if (ResultType.getAddressSpace() != LangAS::Default) {
- SemaRef.Diag(TL.getReturnLoc().getBeginLoc(),
- diag::err_attribute_address_function_type);
- return QualType();
- }
-
if (getDerived().TransformFunctionTypeParams(
TL.getBeginLoc(), TL.getParams(),
TL.getTypePtr()->param_type_begin(),
@@ -6160,6 +6244,27 @@ TreeTransform<Derived>::TransformParenType(TypeLocBuilder &TLB,
return Result;
}
+template <typename Derived>
+QualType
+TreeTransform<Derived>::TransformMacroQualifiedType(TypeLocBuilder &TLB,
+ MacroQualifiedTypeLoc TL) {
+ QualType Inner = getDerived().TransformType(TLB, TL.getInnerLoc());
+ if (Inner.isNull())
+ return QualType();
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() || Inner != TL.getInnerLoc().getType()) {
+ Result =
+ getDerived().RebuildMacroQualifiedType(Inner, TL.getMacroIdentifier());
+ if (Result.isNull())
+ return QualType();
+ }
+
+ MacroQualifiedTypeLoc NewTL = TLB.push<MacroQualifiedTypeLoc>(Result);
+ NewTL.setExpansionLoc(TL.getExpansionLoc());
+ return Result;
+}
+
template<typename Derived>
QualType TreeTransform<Derived>::TransformDependentNameType(
TypeLocBuilder &TLB, DependentNameTypeLoc TL) {
@@ -6517,12 +6622,13 @@ TreeTransform<Derived>::TransformCompoundStmt(CompoundStmt *S,
bool IsStmtExpr) {
Sema::CompoundScopeRAII CompoundScope(getSema());
+ const Stmt *ExprResult = S->getStmtExprResult();
bool SubStmtInvalid = false;
bool SubStmtChanged = false;
SmallVector<Stmt*, 8> Statements;
for (auto *B : S->body()) {
- StmtResult Result =
- getDerived().TransformStmt(B, !IsStmtExpr || B != S->body_back());
+ StmtResult Result = getDerived().TransformStmt(
+ B, IsStmtExpr && B == ExprResult ? SDK_StmtExprResult : SDK_Discarded);
if (Result.isInvalid()) {
// Immediately fail if this was a DeclStmt, since it's very
@@ -6585,7 +6691,8 @@ TreeTransform<Derived>::TransformCaseStmt(CaseStmt *S) {
return StmtError();
// Transform the statement following the case
- StmtResult SubStmt = getDerived().TransformStmt(S->getSubStmt());
+ StmtResult SubStmt =
+ getDerived().TransformStmt(S->getSubStmt());
if (SubStmt.isInvalid())
return StmtError();
@@ -6593,11 +6700,11 @@ TreeTransform<Derived>::TransformCaseStmt(CaseStmt *S) {
return getDerived().RebuildCaseStmtBody(Case.get(), SubStmt.get());
}
-template<typename Derived>
-StmtResult
-TreeTransform<Derived>::TransformDefaultStmt(DefaultStmt *S) {
+template <typename Derived>
+StmtResult TreeTransform<Derived>::TransformDefaultStmt(DefaultStmt *S) {
// Transform the statement following the default case
- StmtResult SubStmt = getDerived().TransformStmt(S->getSubStmt());
+ StmtResult SubStmt =
+ getDerived().TransformStmt(S->getSubStmt());
if (SubStmt.isInvalid())
return StmtError();
@@ -6608,8 +6715,8 @@ TreeTransform<Derived>::TransformDefaultStmt(DefaultStmt *S) {
template<typename Derived>
StmtResult
-TreeTransform<Derived>::TransformLabelStmt(LabelStmt *S) {
- StmtResult SubStmt = getDerived().TransformStmt(S->getSubStmt());
+TreeTransform<Derived>::TransformLabelStmt(LabelStmt *S, StmtDiscardKind SDK) {
+ StmtResult SubStmt = getDerived().TransformStmt(S->getSubStmt(), SDK);
if (SubStmt.isInvalid())
return StmtError();
@@ -6618,6 +6725,11 @@ TreeTransform<Derived>::TransformLabelStmt(LabelStmt *S) {
if (!LD)
return StmtError();
+ // If we're transforming "in-place" (we're not creating new local
+ // declarations), assume we're replacing the old label statement
+ // and clear out the reference to it.
+ if (LD == S->getDecl())
+ S->getDecl()->setStmt(nullptr);
// FIXME: Pass the real colon location in.
return getDerived().RebuildLabelStmt(S->getIdentLoc(),
@@ -6643,7 +6755,9 @@ const Attr *TreeTransform<Derived>::TransformAttr(const Attr *R) {
}
template <typename Derived>
-StmtResult TreeTransform<Derived>::TransformAttributedStmt(AttributedStmt *S) {
+StmtResult
+TreeTransform<Derived>::TransformAttributedStmt(AttributedStmt *S,
+ StmtDiscardKind SDK) {
bool AttrsChanged = false;
SmallVector<const Attr *, 1> Attrs;
@@ -6654,7 +6768,7 @@ StmtResult TreeTransform<Derived>::TransformAttributedStmt(AttributedStmt *S) {
Attrs.push_back(R);
}
- StmtResult SubStmt = getDerived().TransformStmt(S->getSubStmt());
+ StmtResult SubStmt = getDerived().TransformStmt(S->getSubStmt(), SDK);
if (SubStmt.isInvalid())
return StmtError();
@@ -6967,6 +7081,16 @@ TreeTransform<Derived>::TransformGCCAsmStmt(GCCAsmStmt *S) {
Exprs.push_back(Result.get());
}
+ // Go through the Labels.
+ for (unsigned I = 0, E = S->getNumLabels(); I != E; ++I) {
+ Names.push_back(S->getLabelIdentifier(I));
+
+ ExprResult Result = getDerived().TransformExpr(S->getLabelExpr(I));
+ if (Result.isInvalid())
+ return StmtError();
+ ExprsChanged |= Result.get() != S->getLabelExpr(I);
+ Exprs.push_back(Result.get());
+ }
if (!getDerived().AlwaysRebuild() && !ExprsChanged)
return S;
@@ -6980,7 +7104,8 @@ TreeTransform<Derived>::TransformGCCAsmStmt(GCCAsmStmt *S) {
S->isVolatile(), S->getNumOutputs(),
S->getNumInputs(), Names.data(),
Constraints, Exprs, AsmString.get(),
- Clobbers, S->getRParenLoc());
+ Clobbers, S->getNumLabels(),
+ S->getRParenLoc());
}
template<typename Derived>
@@ -7039,7 +7164,7 @@ TreeTransform<Derived>::TransformCoroutineBodyStmt(CoroutineBodyStmt *S) {
auto *Promise = SemaRef.buildCoroutinePromise(FD->getLocation());
if (!Promise)
return StmtError();
- getDerived().transformedLocalDecl(S->getPromiseDecl(), Promise);
+ getDerived().transformedLocalDecl(S->getPromiseDecl(), {Promise});
ScopeInfo->CoroutinePromise = Promise;
// Transform the implicit coroutine statements we built during the initial
@@ -7071,13 +7196,22 @@ TreeTransform<Derived>::TransformCoroutineBodyStmt(CoroutineBodyStmt *S) {
Builder.ReturnValue = Res.get();
if (S->hasDependentPromiseType()) {
- assert(!Promise->getType()->isDependentType() &&
- "the promise type must no longer be dependent");
- assert(!S->getFallthroughHandler() && !S->getExceptionHandler() &&
- !S->getReturnStmtOnAllocFailure() && !S->getDeallocate() &&
- "these nodes should not have been built yet");
- if (!Builder.buildDependentStatements())
- return StmtError();
+ // PR41909: We may find a generic coroutine lambda definition within a
+ // template function that is being instantiated. In this case, the lambda
+ // will have a dependent promise type, until it is used in an expression
+ // that creates an instantiation with a non-dependent promise type. We
+ // should not assert or build coroutine dependent statements for such a
+ // generic lambda.
+ auto *MD = dyn_cast_or_null<CXXMethodDecl>(FD);
+ if (!MD || !MD->getParent()->isGenericLambda()) {
+ assert(!Promise->getType()->isDependentType() &&
+ "the promise type must no longer be dependent");
+ assert(!S->getFallthroughHandler() && !S->getExceptionHandler() &&
+ !S->getReturnStmtOnAllocFailure() && !S->getDeallocate() &&
+ "these nodes should not have been built yet");
+ if (!Builder.buildDependentStatements())
+ return StmtError();
+ }
} else {
if (auto *OnFallthrough = S->getFallthroughHandler()) {
StmtResult Res = getDerived().TransformStmt(OnFallthrough);
@@ -7359,7 +7493,8 @@ StmtResult
TreeTransform<Derived>::TransformObjCForCollectionStmt(
ObjCForCollectionStmt *S) {
// Transform the element statement.
- StmtResult Element = getDerived().TransformStmt(S->getElement());
+ StmtResult Element =
+ getDerived().TransformStmt(S->getElement(), SDK_NotDiscarded);
if (Element.isInvalid())
return StmtError();
@@ -8325,6 +8460,16 @@ TreeTransform<Derived>::TransformOMPSafelenClause(OMPSafelenClause *C) {
template <typename Derived>
OMPClause *
+TreeTransform<Derived>::TransformOMPAllocatorClause(OMPAllocatorClause *C) {
+ ExprResult E = getDerived().TransformExpr(C->getAllocator());
+ if (E.isInvalid())
+ return nullptr;
+ return getDerived().RebuildOMPAllocatorClause(
+ E.get(), C->getBeginLoc(), C->getLParenLoc(), C->getEndLoc());
+}
+
+template <typename Derived>
+OMPClause *
TreeTransform<Derived>::TransformOMPSimdlenClause(OMPSimdlenClause *C) {
ExprResult E = getDerived().TransformExpr(C->getSimdlen());
if (E.isInvalid())
@@ -8797,8 +8942,85 @@ TreeTransform<Derived>::TransformOMPDeviceClause(OMPDeviceClause *C) {
C->getLParenLoc(), C->getEndLoc());
}
+template <typename Derived, class T>
+bool transformOMPMappableExprListClause(
+ TreeTransform<Derived> &TT, OMPMappableExprListClause<T> *C,
+ llvm::SmallVectorImpl<Expr *> &Vars, CXXScopeSpec &MapperIdScopeSpec,
+ DeclarationNameInfo &MapperIdInfo,
+ llvm::SmallVectorImpl<Expr *> &UnresolvedMappers) {
+ // Transform expressions in the list.
+ Vars.reserve(C->varlist_size());
+ for (auto *VE : C->varlists()) {
+ ExprResult EVar = TT.getDerived().TransformExpr(cast<Expr>(VE));
+ if (EVar.isInvalid())
+ return true;
+ Vars.push_back(EVar.get());
+ }
+ // Transform mapper scope specifier and identifier.
+ NestedNameSpecifierLoc QualifierLoc;
+ if (C->getMapperQualifierLoc()) {
+ QualifierLoc = TT.getDerived().TransformNestedNameSpecifierLoc(
+ C->getMapperQualifierLoc());
+ if (!QualifierLoc)
+ return true;
+ }
+ MapperIdScopeSpec.Adopt(QualifierLoc);
+ MapperIdInfo = C->getMapperIdInfo();
+ if (MapperIdInfo.getName()) {
+ MapperIdInfo = TT.getDerived().TransformDeclarationNameInfo(MapperIdInfo);
+ if (!MapperIdInfo.getName())
+ return true;
+ }
+ // Build a list of all candidate OMPDeclareMapperDecls, which is provided by
+ // the previous user-defined mapper lookup in dependent environment.
+ for (auto *E : C->mapperlists()) {
+ // Transform all the decls.
+ if (E) {
+ auto *ULE = cast<UnresolvedLookupExpr>(E);
+ UnresolvedSet<8> Decls;
+ for (auto *D : ULE->decls()) {
+ NamedDecl *InstD =
+ cast<NamedDecl>(TT.getDerived().TransformDecl(E->getExprLoc(), D));
+ Decls.addDecl(InstD, InstD->getAccess());
+ }
+ UnresolvedMappers.push_back(UnresolvedLookupExpr::Create(
+ TT.getSema().Context, /*NamingClass=*/nullptr,
+ MapperIdScopeSpec.getWithLocInContext(TT.getSema().Context),
+ MapperIdInfo, /*ADL=*/true, ULE->isOverloaded(), Decls.begin(),
+ Decls.end()));
+ } else {
+ UnresolvedMappers.push_back(nullptr);
+ }
+ }
+ return false;
+}
+
template <typename Derived>
OMPClause *TreeTransform<Derived>::TransformOMPMapClause(OMPMapClause *C) {
+ OMPVarListLocTy Locs(C->getBeginLoc(), C->getLParenLoc(), C->getEndLoc());
+ llvm::SmallVector<Expr *, 16> Vars;
+ CXXScopeSpec MapperIdScopeSpec;
+ DeclarationNameInfo MapperIdInfo;
+ llvm::SmallVector<Expr *, 16> UnresolvedMappers;
+ if (transformOMPMappableExprListClause<Derived, OMPMapClause>(
+ *this, C, Vars, MapperIdScopeSpec, MapperIdInfo, UnresolvedMappers))
+ return nullptr;
+ return getDerived().RebuildOMPMapClause(
+ C->getMapTypeModifiers(), C->getMapTypeModifiersLoc(), MapperIdScopeSpec,
+ MapperIdInfo, C->getMapType(), C->isImplicitMapType(), C->getMapLoc(),
+ C->getColonLoc(), Vars, Locs, UnresolvedMappers);
+}
+
+template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPAllocateClause(OMPAllocateClause *C) {
+ Expr *Allocator = C->getAllocator();
+ if (Allocator) {
+ ExprResult AllocatorRes = getDerived().TransformExpr(Allocator);
+ if (AllocatorRes.isInvalid())
+ return nullptr;
+ Allocator = AllocatorRes.get();
+ }
llvm::SmallVector<Expr *, 16> Vars;
Vars.reserve(C->varlist_size());
for (auto *VE : C->varlists()) {
@@ -8807,10 +9029,9 @@ OMPClause *TreeTransform<Derived>::TransformOMPMapClause(OMPMapClause *C) {
return nullptr;
Vars.push_back(EVar.get());
}
- return getDerived().RebuildOMPMapClause(
- C->getMapTypeModifiers(), C->getMapTypeModifiersLoc(), C->getMapType(),
- C->isImplicitMapType(), C->getMapLoc(), C->getColonLoc(), Vars,
- C->getBeginLoc(), C->getLParenLoc(), C->getEndLoc());
+ return getDerived().RebuildOMPAllocateClause(
+ Allocator, Vars, C->getBeginLoc(), C->getLParenLoc(), C->getColonLoc(),
+ C->getEndLoc());
}
template <typename Derived>
@@ -8891,30 +9112,30 @@ TreeTransform<Derived>::TransformOMPDefaultmapClause(OMPDefaultmapClause *C) {
template <typename Derived>
OMPClause *TreeTransform<Derived>::TransformOMPToClause(OMPToClause *C) {
+ OMPVarListLocTy Locs(C->getBeginLoc(), C->getLParenLoc(), C->getEndLoc());
llvm::SmallVector<Expr *, 16> Vars;
- Vars.reserve(C->varlist_size());
- for (auto *VE : C->varlists()) {
- ExprResult EVar = getDerived().TransformExpr(cast<Expr>(VE));
- if (EVar.isInvalid())
- return 0;
- Vars.push_back(EVar.get());
- }
- return getDerived().RebuildOMPToClause(Vars, C->getBeginLoc(),
- C->getLParenLoc(), C->getEndLoc());
+ CXXScopeSpec MapperIdScopeSpec;
+ DeclarationNameInfo MapperIdInfo;
+ llvm::SmallVector<Expr *, 16> UnresolvedMappers;
+ if (transformOMPMappableExprListClause<Derived, OMPToClause>(
+ *this, C, Vars, MapperIdScopeSpec, MapperIdInfo, UnresolvedMappers))
+ return nullptr;
+ return getDerived().RebuildOMPToClause(Vars, MapperIdScopeSpec, MapperIdInfo,
+ Locs, UnresolvedMappers);
}
template <typename Derived>
OMPClause *TreeTransform<Derived>::TransformOMPFromClause(OMPFromClause *C) {
+ OMPVarListLocTy Locs(C->getBeginLoc(), C->getLParenLoc(), C->getEndLoc());
llvm::SmallVector<Expr *, 16> Vars;
- Vars.reserve(C->varlist_size());
- for (auto *VE : C->varlists()) {
- ExprResult EVar = getDerived().TransformExpr(cast<Expr>(VE));
- if (EVar.isInvalid())
- return 0;
- Vars.push_back(EVar.get());
- }
- return getDerived().RebuildOMPFromClause(Vars, C->getBeginLoc(),
- C->getLParenLoc(), C->getEndLoc());
+ CXXScopeSpec MapperIdScopeSpec;
+ DeclarationNameInfo MapperIdInfo;
+ llvm::SmallVector<Expr *, 16> UnresolvedMappers;
+ if (transformOMPMappableExprListClause<Derived, OMPFromClause>(
+ *this, C, Vars, MapperIdScopeSpec, MapperIdInfo, UnresolvedMappers))
+ return nullptr;
+ return getDerived().RebuildOMPFromClause(
+ Vars, MapperIdScopeSpec, MapperIdInfo, Locs, UnresolvedMappers);
}
template <typename Derived>
@@ -8928,8 +9149,8 @@ OMPClause *TreeTransform<Derived>::TransformOMPUseDevicePtrClause(
return nullptr;
Vars.push_back(EVar.get());
}
- return getDerived().RebuildOMPUseDevicePtrClause(
- Vars, C->getBeginLoc(), C->getLParenLoc(), C->getEndLoc());
+ OMPVarListLocTy Locs(C->getBeginLoc(), C->getLParenLoc(), C->getEndLoc());
+ return getDerived().RebuildOMPUseDevicePtrClause(Vars, Locs);
}
template <typename Derived>
@@ -8943,8 +9164,8 @@ TreeTransform<Derived>::TransformOMPIsDevicePtrClause(OMPIsDevicePtrClause *C) {
return nullptr;
Vars.push_back(EVar.get());
}
- return getDerived().RebuildOMPIsDevicePtrClause(
- Vars, C->getBeginLoc(), C->getLParenLoc(), C->getEndLoc());
+ OMPVarListLocTy Locs(C->getBeginLoc(), C->getLParenLoc(), C->getEndLoc());
+ return getDerived().RebuildOMPIsDevicePtrClause(Vars, Locs);
}
//===----------------------------------------------------------------------===//
@@ -9072,10 +9293,10 @@ TreeTransform<Derived>::TransformGenericSelectionExpr(GenericSelectionExpr *E) {
SmallVector<Expr *, 4> AssocExprs;
SmallVector<TypeSourceInfo *, 4> AssocTypes;
- for (unsigned i = 0; i != E->getNumAssocs(); ++i) {
- TypeSourceInfo *TS = E->getAssocTypeSourceInfo(i);
- if (TS) {
- TypeSourceInfo *AssocType = getDerived().TransformType(TS);
+ for (const GenericSelectionExpr::Association &Assoc : E->associations()) {
+ TypeSourceInfo *TSI = Assoc.getTypeSourceInfo();
+ if (TSI) {
+ TypeSourceInfo *AssocType = getDerived().TransformType(TSI);
if (!AssocType)
return ExprError();
AssocTypes.push_back(AssocType);
@@ -9083,7 +9304,8 @@ TreeTransform<Derived>::TransformGenericSelectionExpr(GenericSelectionExpr *E) {
AssocTypes.push_back(nullptr);
}
- ExprResult AssocExpr = getDerived().TransformExpr(E->getAssocExpr(i));
+ ExprResult AssocExpr =
+ getDerived().TransformExpr(Assoc.getAssociationExpr());
if (AssocExpr.isInvalid())
return ExprError();
AssocExprs.push_back(AssocExpr.get());
@@ -9974,6 +10196,19 @@ TreeTransform<Derived>::TransformCXXMemberCallExpr(CXXMemberCallExpr *E) {
return getDerived().TransformCallExpr(E);
}
+template <typename Derived>
+ExprResult TreeTransform<Derived>::TransformSourceLocExpr(SourceLocExpr *E) {
+ bool NeedRebuildFunc = E->getIdentKind() == SourceLocExpr::Function &&
+ getSema().CurContext != E->getParentContext();
+
+ if (!getDerived().AlwaysRebuild() && !NeedRebuildFunc)
+ return E;
+
+ return getDerived().RebuildSourceLocExpr(E->getIdentKind(), E->getBeginLoc(),
+ E->getEndLoc(),
+ getSema().CurContext);
+}
+
template<typename Derived>
ExprResult
TreeTransform<Derived>::TransformCUDAKernelCallExpr(CUDAKernelCallExpr *E) {
@@ -10032,6 +10267,22 @@ TreeTransform<Derived>::TransformCXXNamedCastExpr(CXXNamedCastExpr *E) {
template<typename Derived>
ExprResult
+TreeTransform<Derived>::TransformBuiltinBitCastExpr(BuiltinBitCastExpr *BCE) {
+ TypeSourceInfo *TSI =
+ getDerived().TransformType(BCE->getTypeInfoAsWritten());
+ if (!TSI)
+ return ExprError();
+
+ ExprResult Sub = getDerived().TransformExpr(BCE->getSubExpr());
+ if (Sub.isInvalid())
+ return ExprError();
+
+ return getDerived().RebuildBuiltinBitCastExpr(BCE->getBeginLoc(), TSI,
+ Sub.get(), BCE->getEndLoc());
+}
+
+template<typename Derived>
+ExprResult
TreeTransform<Derived>::TransformCXXStaticCastExpr(CXXStaticCastExpr *E) {
return getDerived().TransformCXXNamedCastExpr(E);
}
@@ -10169,8 +10420,9 @@ TreeTransform<Derived>::TransformCXXThisExpr(CXXThisExpr *E) {
QualType T = getSema().getCurrentThisType();
if (!getDerived().AlwaysRebuild() && T == E->getType()) {
- // Make sure that we capture 'this'.
- getSema().CheckCXXThisCapture(E->getBeginLoc());
+ // Mark it referenced in the new context regardless.
+ // FIXME: this is a bit instantiation-specific.
+ getSema().MarkThisReferenced(E);
return E;
}
@@ -10200,8 +10452,8 @@ TreeTransform<Derived>::TransformCXXDefaultArgExpr(CXXDefaultArgExpr *E) {
if (!Param)
return ExprError();
- if (!getDerived().AlwaysRebuild() &&
- Param == E->getParam())
+ if (!getDerived().AlwaysRebuild() && Param == E->getParam() &&
+ E->getUsedContext() == SemaRef.CurContext)
return E;
return getDerived().RebuildCXXDefaultArgExpr(E->getUsedLocation(), Param);
@@ -10215,7 +10467,8 @@ TreeTransform<Derived>::TransformCXXDefaultInitExpr(CXXDefaultInitExpr *E) {
if (!Field)
return ExprError();
- if (!getDerived().AlwaysRebuild() && Field == E->getField())
+ if (!getDerived().AlwaysRebuild() && Field == E->getField() &&
+ E->getUsedContext() == SemaRef.CurContext)
return E;
return getDerived().RebuildCXXDefaultInitExpr(E->getExprLoc(), Field);
@@ -10248,9 +10501,16 @@ TreeTransform<Derived>::TransformCXXNewExpr(CXXNewExpr *E) {
return ExprError();
// Transform the size of the array we're allocating (if any).
- ExprResult ArraySize = getDerived().TransformExpr(E->getArraySize());
- if (ArraySize.isInvalid())
- return ExprError();
+ Optional<Expr *> ArraySize;
+ if (Optional<Expr *> OldArraySize = E->getArraySize()) {
+ ExprResult NewArraySize;
+ if (*OldArraySize) {
+ NewArraySize = getDerived().TransformExpr(*OldArraySize);
+ if (NewArraySize.isInvalid())
+ return ExprError();
+ }
+ ArraySize = NewArraySize.get();
+ }
// Transform the placement arguments (if any).
bool ArgumentChanged = false;
@@ -10287,7 +10547,7 @@ TreeTransform<Derived>::TransformCXXNewExpr(CXXNewExpr *E) {
if (!getDerived().AlwaysRebuild() &&
AllocTypeInfo == E->getAllocatedTypeSourceInfo() &&
- ArraySize.get() == E->getArraySize() &&
+ ArraySize == E->getArraySize() &&
NewInit.get() == OldInit &&
OperatorNew == E->getOperatorNew() &&
OperatorDelete == E->getOperatorDelete() &&
@@ -10314,7 +10574,7 @@ TreeTransform<Derived>::TransformCXXNewExpr(CXXNewExpr *E) {
}
QualType AllocType = AllocTypeInfo->getType();
- if (!ArraySize.get()) {
+ if (!ArraySize) {
// If no array size was specified, but the new expression was
// instantiated with an array type (e.g., "new T" where T is
// instantiated with "int[4]"), extract the outer bound from the
@@ -10342,7 +10602,7 @@ TreeTransform<Derived>::TransformCXXNewExpr(CXXNewExpr *E) {
E->getBeginLoc(), E->isGlobalNew(),
/*FIXME:*/ E->getBeginLoc(), PlacementArgs,
/*FIXME:*/ E->getBeginLoc(), E->getTypeIdParens(), AllocType,
- AllocTypeInfo, ArraySize.get(), E->getDirectInitRange(), NewInit.get());
+ AllocTypeInfo, ArraySize, E->getDirectInitRange(), NewInit.get());
}
template<typename Derived>
@@ -10984,33 +11244,78 @@ TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) {
// Transform any init-capture expressions before entering the scope of the
// lambda body, because they are not semantically within that scope.
typedef std::pair<ExprResult, QualType> InitCaptureInfoTy;
- SmallVector<InitCaptureInfoTy, 8> InitCaptureExprsAndTypes;
- InitCaptureExprsAndTypes.resize(E->explicit_capture_end() -
- E->explicit_capture_begin());
+ struct TransformedInitCapture {
+ // The location of the ... if the result is retaining a pack expansion.
+ SourceLocation EllipsisLoc;
+ // Zero or more expansions of the init-capture.
+ SmallVector<InitCaptureInfoTy, 4> Expansions;
+ };
+ SmallVector<TransformedInitCapture, 4> InitCaptures;
+ InitCaptures.resize(E->explicit_capture_end() - E->explicit_capture_begin());
for (LambdaExpr::capture_iterator C = E->capture_begin(),
CEnd = E->capture_end();
C != CEnd; ++C) {
if (!E->isInitCapture(C))
continue;
- EnterExpressionEvaluationContext EEEC(
- getSema(), Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
- ExprResult NewExprInitResult = getDerived().TransformInitializer(
- C->getCapturedVar()->getInit(),
- C->getCapturedVar()->getInitStyle() == VarDecl::CallInit);
-
- if (NewExprInitResult.isInvalid())
- return ExprError();
- Expr *NewExprInit = NewExprInitResult.get();
+ TransformedInitCapture &Result = InitCaptures[C - E->capture_begin()];
VarDecl *OldVD = C->getCapturedVar();
- QualType NewInitCaptureType =
- getSema().buildLambdaInitCaptureInitialization(
- C->getLocation(), OldVD->getType()->isReferenceType(),
- OldVD->getIdentifier(),
- C->getCapturedVar()->getInitStyle() != VarDecl::CInit, NewExprInit);
- NewExprInitResult = NewExprInit;
- InitCaptureExprsAndTypes[C - E->capture_begin()] =
- std::make_pair(NewExprInitResult, NewInitCaptureType);
+
+ auto SubstInitCapture = [&](SourceLocation EllipsisLoc,
+ Optional<unsigned> NumExpansions) {
+ ExprResult NewExprInitResult = getDerived().TransformInitializer(
+ OldVD->getInit(), OldVD->getInitStyle() == VarDecl::CallInit);
+
+ if (NewExprInitResult.isInvalid()) {
+ Result.Expansions.push_back(InitCaptureInfoTy(ExprError(), QualType()));
+ return;
+ }
+ Expr *NewExprInit = NewExprInitResult.get();
+
+ QualType NewInitCaptureType =
+ getSema().buildLambdaInitCaptureInitialization(
+ C->getLocation(), OldVD->getType()->isReferenceType(),
+ EllipsisLoc, NumExpansions, OldVD->getIdentifier(),
+ C->getCapturedVar()->getInitStyle() != VarDecl::CInit,
+ NewExprInit);
+ Result.Expansions.push_back(
+ InitCaptureInfoTy(NewExprInit, NewInitCaptureType));
+ };
+
+ // If this is an init-capture pack, consider expanding the pack now.
+ if (OldVD->isParameterPack()) {
+ PackExpansionTypeLoc ExpansionTL = OldVD->getTypeSourceInfo()
+ ->getTypeLoc()
+ .castAs<PackExpansionTypeLoc>();
+ SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ SemaRef.collectUnexpandedParameterPacks(OldVD->getInit(), Unexpanded);
+
+ // Determine whether the set of unexpanded parameter packs can and should
+ // be expanded.
+ bool Expand = true;
+ bool RetainExpansion = false;
+ Optional<unsigned> OrigNumExpansions =
+ ExpansionTL.getTypePtr()->getNumExpansions();
+ Optional<unsigned> NumExpansions = OrigNumExpansions;
+ if (getDerived().TryExpandParameterPacks(
+ ExpansionTL.getEllipsisLoc(),
+ OldVD->getInit()->getSourceRange(), Unexpanded, Expand,
+ RetainExpansion, NumExpansions))
+ return ExprError();
+ if (Expand) {
+ for (unsigned I = 0; I != *NumExpansions; ++I) {
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), I);
+ SubstInitCapture(SourceLocation(), None);
+ }
+ }
+ if (!Expand || RetainExpansion) {
+ ForgetPartiallySubstitutedPackRAII Forget(getDerived());
+ SubstInitCapture(ExpansionTL.getEllipsisLoc(), NumExpansions);
+ Result.EllipsisLoc = ExpansionTL.getEllipsisLoc();
+ }
+ } else {
+ SubstInitCapture(SourceLocation(), None);
+ }
}
// Transform the template parameters, and add them to the current
@@ -11048,19 +11353,25 @@ TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) {
LSI->GLTemplateParameterList = TPL;
// Create the local class that will describe the lambda.
+ CXXRecordDecl *OldClass = E->getLambdaClass();
CXXRecordDecl *Class
= getSema().createLambdaClosureType(E->getIntroducerRange(),
NewCallOpTSI,
/*KnownDependent=*/false,
E->getCaptureDefault());
- getDerived().transformedLocalDecl(E->getLambdaClass(), Class);
+ getDerived().transformedLocalDecl(OldClass, {Class});
+
+ Optional<std::pair<unsigned, Decl*>> Mangling;
+ if (getDerived().ReplacingOriginal())
+ Mangling = std::make_pair(OldClass->getLambdaManglingNumber(),
+ OldClass->getLambdaContextDecl());
// Build the call operator.
CXXMethodDecl *NewCallOperator = getSema().startLambdaDefinition(
Class, E->getIntroducerRange(), NewCallOpTSI,
E->getCallOperator()->getEndLoc(),
NewCallOpTSI->getTypeLoc().castAs<FunctionProtoTypeLoc>().getParams(),
- E->getCallOperator()->isConstexpr());
+ E->getCallOperator()->getConstexprKind(), Mangling);
LSI->CallOperator = NewCallOperator;
@@ -11078,7 +11389,7 @@ TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) {
}
getDerived().transformAttrs(E->getCallOperator(), NewCallOperator);
- getDerived().transformedLocalDecl(E->getCallOperator(), NewCallOperator);
+ getDerived().transformedLocalDecl(E->getCallOperator(), {NewCallOperator});
// Introduce the context of the call operator.
Sema::ContextRAII SavedContext(getSema(), NewCallOperator,
@@ -11096,16 +11407,13 @@ TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) {
bool Invalid = false;
// Transform captures.
- bool FinishedExplicitCaptures = false;
for (LambdaExpr::capture_iterator C = E->capture_begin(),
CEnd = E->capture_end();
C != CEnd; ++C) {
// When we hit the first implicit capture, tell Sema that we've finished
// the list of explicit captures.
- if (!FinishedExplicitCaptures && C->isImplicit()) {
- getSema().finishLambdaExplicitCaptures(LSI);
- FinishedExplicitCaptures = true;
- }
+ if (C->isImplicit())
+ break;
// Capturing 'this' is trivial.
if (C->capturesThis()) {
@@ -11121,24 +11429,33 @@ TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) {
// Rebuild init-captures, including the implied field declaration.
if (E->isInitCapture(C)) {
- InitCaptureInfoTy InitExprTypePair =
- InitCaptureExprsAndTypes[C - E->capture_begin()];
- ExprResult Init = InitExprTypePair.first;
- QualType InitQualType = InitExprTypePair.second;
- if (Init.isInvalid() || InitQualType.isNull()) {
- Invalid = true;
- continue;
- }
+ TransformedInitCapture &NewC = InitCaptures[C - E->capture_begin()];
+
VarDecl *OldVD = C->getCapturedVar();
- VarDecl *NewVD = getSema().createLambdaInitCaptureVarDecl(
- OldVD->getLocation(), InitExprTypePair.second, OldVD->getIdentifier(),
- OldVD->getInitStyle(), Init.get());
- if (!NewVD)
- Invalid = true;
- else {
- getDerived().transformedLocalDecl(OldVD, NewVD);
+ llvm::SmallVector<Decl*, 4> NewVDs;
+
+ for (InitCaptureInfoTy &Info : NewC.Expansions) {
+ ExprResult Init = Info.first;
+ QualType InitQualType = Info.second;
+ if (Init.isInvalid() || InitQualType.isNull()) {
+ Invalid = true;
+ break;
+ }
+ VarDecl *NewVD = getSema().createLambdaInitCaptureVarDecl(
+ OldVD->getLocation(), InitQualType, NewC.EllipsisLoc,
+ OldVD->getIdentifier(), OldVD->getInitStyle(), Init.get());
+ if (!NewVD) {
+ Invalid = true;
+ break;
+ }
+ NewVDs.push_back(NewVD);
+ getSema().addInitCapture(LSI, NewVD);
}
- getSema().buildInitCaptureField(LSI, NewVD);
+
+ if (Invalid)
+ break;
+
+ getDerived().transformedLocalDecl(OldVD, NewVDs);
continue;
}
@@ -11205,17 +11522,16 @@ TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) {
getSema().tryCaptureVariable(CapturedVar, C->getLocation(), Kind,
EllipsisLoc);
}
- if (!FinishedExplicitCaptures)
- getSema().finishLambdaExplicitCaptures(LSI);
+ getSema().finishLambdaExplicitCaptures(LSI);
- // Enter a new evaluation context to insulate the lambda from any
- // cleanups from the enclosing full-expression.
+ // FIXME: Sema's lambda-building mechanism expects us to push an expression
+ // evaluation context even if we're not transforming the function body.
getSema().PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
// Instantiate the body of the lambda expression.
StmtResult Body =
- Invalid ? StmtError() : getDerived().TransformStmt(E->getBody());
+ Invalid ? StmtError() : getDerived().TransformLambdaBody(E, E->getBody());
// ActOnLambda* will pop the function scope for us.
FuncScopeCleanup.disable();
@@ -11240,6 +11556,52 @@ TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) {
}
template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformLambdaBody(LambdaExpr *E, Stmt *S) {
+ return TransformStmt(S);
+}
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::SkipLambdaBody(LambdaExpr *E, Stmt *S) {
+ // Transform captures.
+ for (LambdaExpr::capture_iterator C = E->capture_begin(),
+ CEnd = E->capture_end();
+ C != CEnd; ++C) {
+ // When we hit the first implicit capture, tell Sema that we've finished
+ // the list of explicit captures.
+ if (!C->isImplicit())
+ continue;
+
+ // Capturing 'this' is trivial.
+ if (C->capturesThis()) {
+ getSema().CheckCXXThisCapture(C->getLocation(), C->isExplicit(),
+ /*BuildAndDiagnose*/ true, nullptr,
+ C->getCaptureKind() == LCK_StarThis);
+ continue;
+ }
+ // Captured expression will be recaptured during captured variables
+ // rebuilding.
+ if (C->capturesVLAType())
+ continue;
+
+ assert(C->capturesVariable() && "unexpected kind of lambda capture");
+ assert(!E->isInitCapture(C) && "implicit init-capture?");
+
+ // Transform the captured variable.
+ VarDecl *CapturedVar = cast_or_null<VarDecl>(
+ getDerived().TransformDecl(C->getLocation(), C->getCapturedVar()));
+ if (!CapturedVar || CapturedVar->isInvalidDecl())
+ return StmtError();
+
+ // Capture the transformed variable.
+ getSema().tryCaptureVariable(CapturedVar, C->getLocation());
+ }
+
+ return S;
+}
+
+template<typename Derived>
ExprResult
TreeTransform<Derived>::TransformCXXUnresolvedConstructExpr(
CXXUnresolvedConstructExpr *E) {
@@ -11659,7 +12021,8 @@ TreeTransform<Derived>::TransformCXXFoldExpr(CXXFoldExpr *E) {
// be expanded.
bool Expand = true;
bool RetainExpansion = false;
- Optional<unsigned> NumExpansions;
+ Optional<unsigned> OrigNumExpansions = E->getNumExpansions(),
+ NumExpansions = OrigNumExpansions;
if (getDerived().TryExpandParameterPacks(E->getEllipsisLoc(),
Pattern->getSourceRange(),
Unexpanded,
@@ -11688,7 +12051,7 @@ TreeTransform<Derived>::TransformCXXFoldExpr(CXXFoldExpr *E) {
return getDerived().RebuildCXXFoldExpr(
E->getBeginLoc(), LHS.get(), E->getOperator(), E->getEllipsisLoc(),
- RHS.get(), E->getEndLoc());
+ RHS.get(), E->getEndLoc(), NumExpansions);
}
// The transform has determined that we should perform an elementwise
@@ -11709,7 +12072,7 @@ TreeTransform<Derived>::TransformCXXFoldExpr(CXXFoldExpr *E) {
Result = getDerived().RebuildCXXFoldExpr(
E->getBeginLoc(), Out.get(), E->getOperator(), E->getEllipsisLoc(),
- Result.get(), E->getEndLoc());
+ Result.get(), E->getEndLoc(), OrigNumExpansions);
if (Result.isInvalid())
return true;
}
@@ -11726,7 +12089,8 @@ TreeTransform<Derived>::TransformCXXFoldExpr(CXXFoldExpr *E) {
Result = getDerived().RebuildCXXFoldExpr(
E->getBeginLoc(), LeftFold ? Result.get() : Out.get(),
E->getOperator(), E->getEllipsisLoc(),
- LeftFold ? Out.get() : Result.get(), E->getEndLoc());
+ LeftFold ? Out.get() : Result.get(), E->getEndLoc(),
+ OrigNumExpansions);
} else if (Result.isUsable()) {
// We've got down to a single element; build a binary operator.
Result = getDerived().RebuildBinaryOperator(
@@ -11751,7 +12115,7 @@ TreeTransform<Derived>::TransformCXXFoldExpr(CXXFoldExpr *E) {
Result = getDerived().RebuildCXXFoldExpr(
E->getBeginLoc(), Result.get(), E->getOperator(), E->getEllipsisLoc(),
- Out.get(), E->getEndLoc());
+ Out.get(), E->getEndLoc(), OrigNumExpansions);
if (Result.isInvalid())
return true;
}
@@ -12277,8 +12641,7 @@ TreeTransform<Derived>::TransformBlockExpr(BlockExpr *E) {
VarDecl *oldCapture = I.getVariable();
// Ignore parameter packs.
- if (isa<ParmVarDecl>(oldCapture) &&
- cast<ParmVarDecl>(oldCapture)->isParameterPack())
+ if (oldCapture->isParameterPack())
continue;
VarDecl *newCapture =
diff --git a/lib/Sema/TypeLocBuilder.cpp b/lib/Sema/TypeLocBuilder.cpp
index 340b7fae78aa..b451403544ed 100644
--- a/lib/Sema/TypeLocBuilder.cpp
+++ b/lib/Sema/TypeLocBuilder.cpp
@@ -1,9 +1,8 @@
//===--- TypeLocBuilder.cpp - Type Source Info collector ------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Sema/TypeLocBuilder.h b/lib/Sema/TypeLocBuilder.h
index 536ea1c07f49..1e6883926a80 100644
--- a/lib/Sema/TypeLocBuilder.h
+++ b/lib/Sema/TypeLocBuilder.h
@@ -1,9 +1,8 @@
//===--- TypeLocBuilder.h - Type Source Info collector ----------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Serialization/ASTCommon.cpp b/lib/Serialization/ASTCommon.cpp
index ca826d83d471..aa3477a7d35e 100644
--- a/lib/Serialization/ASTCommon.cpp
+++ b/lib/Serialization/ASTCommon.cpp
@@ -1,9 +1,8 @@
//===--- ASTCommon.cpp - Common stuff for ASTReader/ASTWriter----*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -388,12 +387,15 @@ bool serialization::isRedeclarableDeclKind(unsigned Kind) {
case Decl::ClassScopeFunctionSpecialization:
case Decl::Import:
case Decl::OMPThreadPrivate:
+ case Decl::OMPAllocate:
case Decl::OMPRequires:
case Decl::OMPCapturedExpr:
case Decl::OMPDeclareReduction:
+ case Decl::OMPDeclareMapper:
case Decl::BuiltinTemplate:
case Decl::Decomposition:
case Decl::Binding:
+ case Decl::Concept:
return false;
// These indirectly derive from Redeclarable<T> but are not actually
diff --git a/lib/Serialization/ASTCommon.h b/lib/Serialization/ASTCommon.h
index 12e26c1fc2b9..296642e3674a 100644
--- a/lib/Serialization/ASTCommon.h
+++ b/lib/Serialization/ASTCommon.h
@@ -1,9 +1,8 @@
//===- ASTCommon.h - Common stuff for ASTReader/ASTWriter -*- C++ -*-=========//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -39,6 +38,7 @@ enum DeclUpdateKind {
UPD_MANGLING_NUMBER,
UPD_STATIC_LOCAL_NUMBER,
UPD_DECL_MARKED_OPENMP_THREADPRIVATE,
+ UPD_DECL_MARKED_OPENMP_ALLOCATE,
UPD_DECL_MARKED_OPENMP_DECLARETARGET,
UPD_DECL_EXPORTED,
UPD_ADDED_ATTR_TO_RECORD
@@ -109,6 +109,21 @@ template<typename Fn> void numberAnonymousDeclsWithin(const DeclContext *DC,
}
}
+/// Determine whether the given declaration will be included in the per-module
+/// initializer if it needs to be eagerly handed to the AST consumer. If so, we
+/// should not hand it to the consumer when deserializing it, nor include it in
+/// the list of eagerly deserialized declarations.
+inline bool isPartOfPerModuleInitializer(const Decl *D) {
+ if (isa<ImportDecl>(D))
+ return true;
+ // Template instantiations are notionally in an "instantiation unit" rather
+ // than in any particular translation unit, so they need not be part of any
+ // particular (sub)module's per-module initializer.
+ if (auto *VD = dyn_cast<VarDecl>(D))
+ return !isTemplateInstantiation(VD->getTemplateSpecializationKind());
+ return false;
+}
+
} // namespace serialization
} // namespace clang
diff --git a/lib/Serialization/ASTReader.cpp b/lib/Serialization/ASTReader.cpp
index e0b2b24a0d32..7f2c7f09e8a3 100644
--- a/lib/Serialization/ASTReader.cpp
+++ b/lib/Serialization/ASTReader.cpp
@@ -1,9 +1,8 @@
//===- ASTReader.cpp - AST File Reader ------------------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -47,7 +46,6 @@
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/LangOptions.h"
-#include "clang/Basic/MemoryBufferCache.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/ObjCRuntime.h"
#include "clang/Basic/OperatorKinds.h"
@@ -77,6 +75,7 @@
#include "clang/Serialization/ASTDeserializationListener.h"
#include "clang/Serialization/ContinuousRangeMap.h"
#include "clang/Serialization/GlobalModuleIndex.h"
+#include "clang/Serialization/InMemoryModuleCache.h"
#include "clang/Serialization/Module.h"
#include "clang/Serialization/ModuleFileExtension.h"
#include "clang/Serialization/ModuleManager.h"
@@ -93,6 +92,7 @@
#include "llvm/ADT/None.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/ScopeExit.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
@@ -101,7 +101,7 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Triple.h"
#include "llvm/ADT/iterator_range.h"
-#include "llvm/Bitcode/BitstreamReader.h"
+#include "llvm/Bitstream/BitstreamReader.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Compression.h"
@@ -1148,12 +1148,26 @@ bool ASTReader::ReadLexicalDeclContextStorage(ModuleFile &M,
assert(Offset != 0);
SavedStreamPosition SavedPosition(Cursor);
- Cursor.JumpToBit(Offset);
+ if (llvm::Error Err = Cursor.JumpToBit(Offset)) {
+ Error(std::move(Err));
+ return true;
+ }
RecordData Record;
StringRef Blob;
- unsigned Code = Cursor.ReadCode();
- unsigned RecCode = Cursor.readRecord(Code, Record, &Blob);
+ Expected<unsigned> MaybeCode = Cursor.ReadCode();
+ if (!MaybeCode) {
+ Error(MaybeCode.takeError());
+ return true;
+ }
+ unsigned Code = MaybeCode.get();
+
+ Expected<unsigned> MaybeRecCode = Cursor.readRecord(Code, Record, &Blob);
+ if (!MaybeRecCode) {
+ Error(MaybeRecCode.takeError());
+ return true;
+ }
+ unsigned RecCode = MaybeRecCode.get();
if (RecCode != DECL_CONTEXT_LEXICAL) {
Error("Expected lexical block");
return true;
@@ -1184,12 +1198,26 @@ bool ASTReader::ReadVisibleDeclContextStorage(ModuleFile &M,
assert(Offset != 0);
SavedStreamPosition SavedPosition(Cursor);
- Cursor.JumpToBit(Offset);
+ if (llvm::Error Err = Cursor.JumpToBit(Offset)) {
+ Error(std::move(Err));
+ return true;
+ }
RecordData Record;
StringRef Blob;
- unsigned Code = Cursor.ReadCode();
- unsigned RecCode = Cursor.readRecord(Code, Record, &Blob);
+ Expected<unsigned> MaybeCode = Cursor.ReadCode();
+ if (!MaybeCode) {
+ Error(MaybeCode.takeError());
+ return true;
+ }
+ unsigned Code = MaybeCode.get();
+
+ Expected<unsigned> MaybeRecCode = Cursor.readRecord(Code, Record, &Blob);
+ if (!MaybeRecCode) {
+ Error(MaybeRecCode.takeError());
+ return true;
+ }
+ unsigned RecCode = MaybeRecCode.get();
if (RecCode != DECL_CONTEXT_VISIBLE) {
Error("Expected visible lookup table block");
return true;
@@ -1219,6 +1247,10 @@ void ASTReader::Error(unsigned DiagID,
Diag(DiagID) << Arg1 << Arg2;
}
+void ASTReader::Error(llvm::Error &&Err) const {
+ Error(toString(std::move(Err)));
+}
+
//===----------------------------------------------------------------------===//
// Source Manager Deserialization
//===----------------------------------------------------------------------===//
@@ -1282,20 +1314,27 @@ bool ASTReader::ReadSourceManagerBlock(ModuleFile &F) {
SLocEntryCursor = F.Stream;
// The stream itself is going to skip over the source manager block.
- if (F.Stream.SkipBlock()) {
- Error("malformed block record in AST file");
+ if (llvm::Error Err = F.Stream.SkipBlock()) {
+ Error(std::move(Err));
return true;
}
// Enter the source manager block.
- if (SLocEntryCursor.EnterSubBlock(SOURCE_MANAGER_BLOCK_ID)) {
- Error("malformed source manager block record in AST file");
+ if (llvm::Error Err =
+ SLocEntryCursor.EnterSubBlock(SOURCE_MANAGER_BLOCK_ID)) {
+ Error(std::move(Err));
return true;
}
RecordData Record;
while (true) {
- llvm::BitstreamEntry E = SLocEntryCursor.advanceSkippingSubblocks();
+ Expected<llvm::BitstreamEntry> MaybeE =
+ SLocEntryCursor.advanceSkippingSubblocks();
+ if (!MaybeE) {
+ Error(MaybeE.takeError());
+ return true;
+ }
+ llvm::BitstreamEntry E = MaybeE.get();
switch (E.Kind) {
case llvm::BitstreamEntry::SubBlock: // Handled for us already.
@@ -1312,7 +1351,13 @@ bool ASTReader::ReadSourceManagerBlock(ModuleFile &F) {
// Read a record.
Record.clear();
StringRef Blob;
- switch (SLocEntryCursor.readRecord(E.ID, Record, &Blob)) {
+ Expected<unsigned> MaybeRecord =
+ SLocEntryCursor.readRecord(E.ID, Record, &Blob);
+ if (!MaybeRecord) {
+ Error(MaybeRecord.takeError());
+ return true;
+ }
+ switch (MaybeRecord.get()) {
default: // Default behavior: ignore.
break;
@@ -1376,8 +1421,20 @@ bool ASTReader::ReadSLocEntry(int ID) {
StringRef Name) -> std::unique_ptr<llvm::MemoryBuffer> {
RecordData Record;
StringRef Blob;
- unsigned Code = SLocEntryCursor.ReadCode();
- unsigned RecCode = SLocEntryCursor.readRecord(Code, Record, &Blob);
+ Expected<unsigned> MaybeCode = SLocEntryCursor.ReadCode();
+ if (!MaybeCode) {
+ Error(MaybeCode.takeError());
+ return nullptr;
+ }
+ unsigned Code = MaybeCode.get();
+
+ Expected<unsigned> MaybeRecCode =
+ SLocEntryCursor.readRecord(Code, Record, &Blob);
+ if (!MaybeRecCode) {
+ Error(MaybeRecCode.takeError());
+ return nullptr;
+ }
+ unsigned RecCode = MaybeRecCode.get();
if (RecCode == SM_SLOC_BUFFER_BLOB_COMPRESSED) {
if (!llvm::zlib::isAvailable()) {
@@ -1401,12 +1458,23 @@ bool ASTReader::ReadSLocEntry(int ID) {
};
ModuleFile *F = GlobalSLocEntryMap.find(-ID)->second;
- F->SLocEntryCursor.JumpToBit(F->SLocEntryOffsets[ID - F->SLocEntryBaseID]);
+ if (llvm::Error Err = F->SLocEntryCursor.JumpToBit(
+ F->SLocEntryOffsets[ID - F->SLocEntryBaseID])) {
+ Error(std::move(Err));
+ return true;
+ }
+
BitstreamCursor &SLocEntryCursor = F->SLocEntryCursor;
unsigned BaseOffset = F->SLocEntryBaseOffset;
++NumSLocEntriesRead;
- llvm::BitstreamEntry Entry = SLocEntryCursor.advance();
+ Expected<llvm::BitstreamEntry> MaybeEntry = SLocEntryCursor.advance();
+ if (!MaybeEntry) {
+ Error(MaybeEntry.takeError());
+ return true;
+ }
+ llvm::BitstreamEntry Entry = MaybeEntry.get();
+
if (Entry.Kind != llvm::BitstreamEntry::Record) {
Error("incorrectly-formatted source location entry in AST file");
return true;
@@ -1414,7 +1482,13 @@ bool ASTReader::ReadSLocEntry(int ID) {
RecordData Record;
StringRef Blob;
- switch (SLocEntryCursor.readRecord(Entry.ID, Record, &Blob)) {
+ Expected<unsigned> MaybeSLOC =
+ SLocEntryCursor.readRecord(Entry.ID, Record, &Blob);
+ if (!MaybeSLOC) {
+ Error(MaybeSLOC.takeError());
+ return true;
+ }
+ switch (MaybeSLOC.get()) {
default:
Error("incorrectly-formatted source location entry in AST file");
return true;
@@ -1538,23 +1612,40 @@ SourceLocation ASTReader::getImportLocation(ModuleFile *F) {
return F->ImportedBy[0]->FirstLoc;
}
-/// ReadBlockAbbrevs - Enter a subblock of the specified BlockID with the
-/// specified cursor. Read the abbreviations that are at the top of the block
-/// and then leave the cursor pointing into the block.
+/// Enter a subblock of the specified BlockID with the specified cursor. Read
+/// the abbreviations that are at the top of the block and then leave the cursor
+/// pointing into the block.
bool ASTReader::ReadBlockAbbrevs(BitstreamCursor &Cursor, unsigned BlockID) {
- if (Cursor.EnterSubBlock(BlockID))
+ if (llvm::Error Err = Cursor.EnterSubBlock(BlockID)) {
+ // FIXME this drops errors on the floor.
+ consumeError(std::move(Err));
return true;
+ }
while (true) {
uint64_t Offset = Cursor.GetCurrentBitNo();
- unsigned Code = Cursor.ReadCode();
+ Expected<unsigned> MaybeCode = Cursor.ReadCode();
+ if (!MaybeCode) {
+ // FIXME this drops errors on the floor.
+ consumeError(MaybeCode.takeError());
+ return true;
+ }
+ unsigned Code = MaybeCode.get();
// We expect all abbrevs to be at the start of the block.
if (Code != llvm::bitc::DEFINE_ABBREV) {
- Cursor.JumpToBit(Offset);
+ if (llvm::Error Err = Cursor.JumpToBit(Offset)) {
+ // FIXME this drops errors on the floor.
+ consumeError(std::move(Err));
+ return true;
+ }
return false;
}
- Cursor.ReadAbbrevRecord();
+ if (llvm::Error Err = Cursor.ReadAbbrevRecord()) {
+ // FIXME this drops errors on the floor.
+ consumeError(std::move(Err));
+ return true;
+ }
}
}
@@ -1578,7 +1669,11 @@ MacroInfo *ASTReader::ReadMacroRecord(ModuleFile &F, uint64_t Offset) {
// after reading this macro.
SavedStreamPosition SavedPosition(Stream);
- Stream.JumpToBit(Offset);
+ if (llvm::Error Err = Stream.JumpToBit(Offset)) {
+ // FIXME this drops errors on the floor.
+ consumeError(std::move(Err));
+ return nullptr;
+ }
RecordData Record;
SmallVector<IdentifierInfo*, 16> MacroParams;
MacroInfo *Macro = nullptr;
@@ -1588,7 +1683,13 @@ MacroInfo *ASTReader::ReadMacroRecord(ModuleFile &F, uint64_t Offset) {
// pop it (removing all the abbreviations from the cursor) since we want to
// be able to reseek within the block and read entries.
unsigned Flags = BitstreamCursor::AF_DontPopBlockAtEnd;
- llvm::BitstreamEntry Entry = Stream.advanceSkippingSubblocks(Flags);
+ Expected<llvm::BitstreamEntry> MaybeEntry =
+ Stream.advanceSkippingSubblocks(Flags);
+ if (!MaybeEntry) {
+ Error(MaybeEntry.takeError());
+ return Macro;
+ }
+ llvm::BitstreamEntry Entry = MaybeEntry.get();
switch (Entry.Kind) {
case llvm::BitstreamEntry::SubBlock: // Handled for us already.
@@ -1604,8 +1705,13 @@ MacroInfo *ASTReader::ReadMacroRecord(ModuleFile &F, uint64_t Offset) {
// Read a record.
Record.clear();
- PreprocessorRecordTypes RecType =
- (PreprocessorRecordTypes)Stream.readRecord(Entry.ID, Record);
+ PreprocessorRecordTypes RecType;
+ if (Expected<unsigned> MaybeRecType = Stream.readRecord(Entry.ID, Record))
+ RecType = (PreprocessorRecordTypes)MaybeRecType.get();
+ else {
+ Error(MaybeRecType.takeError());
+ return Macro;
+ }
switch (RecType) {
case PP_MODULE_MACRO:
case PP_MACRO_DIRECTIVE_HISTORY:
@@ -1828,11 +1934,19 @@ void ASTReader::ReadDefinedMacros() {
continue;
BitstreamCursor Cursor = MacroCursor;
- Cursor.JumpToBit(I.MacroStartOffset);
+ if (llvm::Error Err = Cursor.JumpToBit(I.MacroStartOffset)) {
+ Error(std::move(Err));
+ return;
+ }
RecordData Record;
while (true) {
- llvm::BitstreamEntry E = Cursor.advanceSkippingSubblocks();
+ Expected<llvm::BitstreamEntry> MaybeE = Cursor.advanceSkippingSubblocks();
+ if (!MaybeE) {
+ Error(MaybeE.takeError());
+ return;
+ }
+ llvm::BitstreamEntry E = MaybeE.get();
switch (E.Kind) {
case llvm::BitstreamEntry::SubBlock: // Handled for us already.
@@ -1842,9 +1956,14 @@ void ASTReader::ReadDefinedMacros() {
case llvm::BitstreamEntry::EndBlock:
goto NextCursor;
- case llvm::BitstreamEntry::Record:
+ case llvm::BitstreamEntry::Record: {
Record.clear();
- switch (Cursor.readRecord(E.ID, Record)) {
+ Expected<unsigned> MaybeRecord = Cursor.readRecord(E.ID, Record);
+ if (!MaybeRecord) {
+ Error(MaybeRecord.takeError());
+ return;
+ }
+ switch (MaybeRecord.get()) {
default: // Default behavior: ignore.
break;
@@ -1862,6 +1981,7 @@ void ASTReader::ReadDefinedMacros() {
}
break;
}
+ }
}
NextCursor: ;
}
@@ -1962,7 +2082,10 @@ void ASTReader::resolvePendingMacro(IdentifierInfo *II,
BitstreamCursor &Cursor = M.MacroCursor;
SavedStreamPosition SavedPosition(Cursor);
- Cursor.JumpToBit(PMInfo.MacroDirectivesOffset);
+ if (llvm::Error Err = Cursor.JumpToBit(PMInfo.MacroDirectivesOffset)) {
+ Error(std::move(Err));
+ return;
+ }
struct ModuleMacroRecord {
SubmoduleID SubModID;
@@ -1976,15 +2099,26 @@ void ASTReader::resolvePendingMacro(IdentifierInfo *II,
// macro histroy.
RecordData Record;
while (true) {
- llvm::BitstreamEntry Entry =
+ Expected<llvm::BitstreamEntry> MaybeEntry =
Cursor.advance(BitstreamCursor::AF_DontPopBlockAtEnd);
+ if (!MaybeEntry) {
+ Error(MaybeEntry.takeError());
+ return;
+ }
+ llvm::BitstreamEntry Entry = MaybeEntry.get();
+
if (Entry.Kind != llvm::BitstreamEntry::Record) {
Error("malformed block record in AST file");
return;
}
Record.clear();
- switch ((PreprocessorRecordTypes)Cursor.readRecord(Entry.ID, Record)) {
+ Expected<unsigned> MaybePP = Cursor.readRecord(Entry.ID, Record);
+ if (!MaybePP) {
+ Error(MaybePP.takeError());
+ return;
+ }
+ switch ((PreprocessorRecordTypes)MaybePP.get()) {
case PP_MACRO_DIRECTIVE_HISTORY:
break;
@@ -2070,16 +2204,27 @@ ASTReader::readInputFileInfo(ModuleFile &F, unsigned ID) {
// Go find this input file.
BitstreamCursor &Cursor = F.InputFilesCursor;
SavedStreamPosition SavedPosition(Cursor);
- Cursor.JumpToBit(F.InputFileOffsets[ID-1]);
+ if (llvm::Error Err = Cursor.JumpToBit(F.InputFileOffsets[ID - 1])) {
+ // FIXME this drops errors on the floor.
+ consumeError(std::move(Err));
+ }
- unsigned Code = Cursor.ReadCode();
+ Expected<unsigned> MaybeCode = Cursor.ReadCode();
+ if (!MaybeCode) {
+ // FIXME this drops errors on the floor.
+ consumeError(MaybeCode.takeError());
+ }
+ unsigned Code = MaybeCode.get();
RecordData Record;
StringRef Blob;
- unsigned Result = Cursor.readRecord(Code, Record, &Blob);
- assert(static_cast<InputFileRecordTypes>(Result) == INPUT_FILE &&
- "invalid record type for input file");
- (void)Result;
+ if (Expected<unsigned> Maybe = Cursor.readRecord(Code, Record, &Blob))
+ assert(static_cast<InputFileRecordTypes>(Maybe.get()) == INPUT_FILE &&
+ "invalid record type for input file");
+ else {
+ // FIXME this drops errors on the floor.
+ consumeError(Maybe.takeError());
+ }
assert(Record[0] == ID && "Bogus stored ID or offset");
InputFileInfo R;
@@ -2109,7 +2254,10 @@ InputFile ASTReader::getInputFile(ModuleFile &F, unsigned ID, bool Complain) {
// Go find this input file.
BitstreamCursor &Cursor = F.InputFilesCursor;
SavedStreamPosition SavedPosition(Cursor);
- Cursor.JumpToBit(F.InputFileOffsets[ID-1]);
+ if (llvm::Error Err = Cursor.JumpToBit(F.InputFileOffsets[ID - 1])) {
+ // FIXME this drops errors on the floor.
+ consumeError(std::move(Err));
+ }
InputFileInfo FI = readInputFileInfo(F, ID);
off_t StoredSize = FI.StoredSize;
@@ -2258,14 +2406,23 @@ ASTReader::ASTReadResult ASTReader::ReadOptionsBlock(
BitstreamCursor &Stream, unsigned ClientLoadCapabilities,
bool AllowCompatibleConfigurationMismatch, ASTReaderListener &Listener,
std::string &SuggestedPredefines) {
- if (Stream.EnterSubBlock(OPTIONS_BLOCK_ID))
+ if (llvm::Error Err = Stream.EnterSubBlock(OPTIONS_BLOCK_ID)) {
+ // FIXME this drops errors on the floor.
+ consumeError(std::move(Err));
return Failure;
+ }
// Read all of the records in the options block.
RecordData Record;
ASTReadResult Result = Success;
while (true) {
- llvm::BitstreamEntry Entry = Stream.advance();
+ Expected<llvm::BitstreamEntry> MaybeEntry = Stream.advance();
+ if (!MaybeEntry) {
+ // FIXME this drops errors on the floor.
+ consumeError(MaybeEntry.takeError());
+ return Failure;
+ }
+ llvm::BitstreamEntry Entry = MaybeEntry.get();
switch (Entry.Kind) {
case llvm::BitstreamEntry::Error:
@@ -2282,7 +2439,13 @@ ASTReader::ASTReadResult ASTReader::ReadOptionsBlock(
// Read and process a record.
Record.clear();
- switch ((OptionsRecordTypes)Stream.readRecord(Entry.ID, Record)) {
+ Expected<unsigned> MaybeRecordType = Stream.readRecord(Entry.ID, Record);
+ if (!MaybeRecordType) {
+ // FIXME this drops errors on the floor.
+ consumeError(MaybeRecordType.takeError());
+ return Failure;
+ }
+ switch ((OptionsRecordTypes)MaybeRecordType.get()) {
case LANGUAGE_OPTIONS: {
bool Complain = (ClientLoadCapabilities & ARR_ConfigurationMismatch) == 0;
if (ParseLanguageOptions(Record, Complain, Listener,
@@ -2334,8 +2497,8 @@ ASTReader::ReadControlBlock(ModuleFile &F,
BitstreamCursor &Stream = F.Stream;
ASTReadResult Result = Success;
- if (Stream.EnterSubBlock(CONTROL_BLOCK_ID)) {
- Error("malformed block record in AST file");
+ if (llvm::Error Err = Stream.EnterSubBlock(CONTROL_BLOCK_ID)) {
+ Error(std::move(Err));
return Failure;
}
@@ -2360,8 +2523,14 @@ ASTReader::ReadControlBlock(ModuleFile &F,
RecordData Record;
unsigned NumInputs = 0;
unsigned NumUserInputs = 0;
+ StringRef BaseDirectoryAsWritten;
while (true) {
- llvm::BitstreamEntry Entry = Stream.advance();
+ Expected<llvm::BitstreamEntry> MaybeEntry = Stream.advance();
+ if (!MaybeEntry) {
+ Error(MaybeEntry.takeError());
+ return Failure;
+ }
+ llvm::BitstreamEntry Entry = MaybeEntry.get();
switch (Entry.Kind) {
case llvm::BitstreamEntry::Error:
@@ -2424,9 +2593,11 @@ ASTReader::ReadControlBlock(ModuleFile &F,
switch (Entry.ID) {
case INPUT_FILES_BLOCK_ID:
F.InputFilesCursor = Stream;
- if (Stream.SkipBlock() || // Skip with the main cursor
- // Read the abbreviations
- ReadBlockAbbrevs(F.InputFilesCursor, INPUT_FILES_BLOCK_ID)) {
+ if (llvm::Error Err = Stream.SkipBlock()) {
+ Error(std::move(Err));
+ return Failure;
+ }
+ if (ReadBlockAbbrevs(F.InputFilesCursor, INPUT_FILES_BLOCK_ID)) {
Error("malformed block record in AST file");
return Failure;
}
@@ -2462,15 +2633,15 @@ ASTReader::ReadControlBlock(ModuleFile &F,
// middle of a block.
if (Result != Success)
return Result;
- } else if (Stream.SkipBlock()) {
- Error("malformed block record in AST file");
+ } else if (llvm::Error Err = Stream.SkipBlock()) {
+ Error(std::move(Err));
return Failure;
}
continue;
default:
- if (Stream.SkipBlock()) {
- Error("malformed block record in AST file");
+ if (llvm::Error Err = Stream.SkipBlock()) {
+ Error(std::move(Err));
return Failure;
}
continue;
@@ -2484,7 +2655,13 @@ ASTReader::ReadControlBlock(ModuleFile &F,
// Read and process a record.
Record.clear();
StringRef Blob;
- switch ((ControlRecordTypes)Stream.readRecord(Entry.ID, Record, &Blob)) {
+ Expected<unsigned> MaybeRecordType =
+ Stream.readRecord(Entry.ID, Record, &Blob);
+ if (!MaybeRecordType) {
+ Error(MaybeRecordType.takeError());
+ return Failure;
+ }
+ switch ((ControlRecordTypes)MaybeRecordType.get()) {
case METADATA: {
if (Record[0] != VERSION_MAJOR && !DisableValidation) {
if ((ClientLoadCapabilities & ARR_VersionMismatch) == 0)
@@ -2560,7 +2737,9 @@ ASTReader::ReadControlBlock(ModuleFile &F,
ImportedName, /*FileMapOnly*/ true);
if (ImportedFile.empty())
- ImportedFile = ReadPath(F, Record, Idx);
+ // Use BaseDirectoryAsWritten to ensure we use the same path in the
+ // ModuleCache as when writing.
+ ImportedFile = ReadPath(BaseDirectoryAsWritten, Record, Idx);
else
SkipPath(Record, Idx);
@@ -2611,6 +2790,9 @@ ASTReader::ReadControlBlock(ModuleFile &F,
case MODULE_NAME:
F.ModuleName = Blob;
+ Diag(diag::remark_module_import)
+ << F.ModuleName << F.FileName << (ImportedBy ? true : false)
+ << (ImportedBy ? StringRef(ImportedBy->ModuleName) : StringRef());
if (Listener)
Listener->ReadModuleName(F.ModuleName);
@@ -2622,6 +2804,9 @@ ASTReader::ReadControlBlock(ModuleFile &F,
break;
case MODULE_DIRECTORY: {
+ // Save the BaseDirectory as written in the PCM for computing the module
+ // filename for the ModuleCache.
+ BaseDirectoryAsWritten = Blob;
assert(!F.ModuleName.empty() &&
"MODULE_DIRECTORY found before MODULE_NAME");
// If we've already loaded a module map file covering this module, we may
@@ -2673,15 +2858,20 @@ ASTReader::ASTReadResult
ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
BitstreamCursor &Stream = F.Stream;
- if (Stream.EnterSubBlock(AST_BLOCK_ID)) {
- Error("malformed block record in AST file");
+ if (llvm::Error Err = Stream.EnterSubBlock(AST_BLOCK_ID)) {
+ Error(std::move(Err));
return Failure;
}
// Read all of the records and blocks for the AST file.
RecordData Record;
while (true) {
- llvm::BitstreamEntry Entry = Stream.advance();
+ Expected<llvm::BitstreamEntry> MaybeEntry = Stream.advance();
+ if (!MaybeEntry) {
+ Error(MaybeEntry.takeError());
+ return Failure;
+ }
+ llvm::BitstreamEntry Entry = MaybeEntry.get();
switch (Entry.Kind) {
case llvm::BitstreamEntry::Error:
@@ -2708,9 +2898,11 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
// cursor to it, enter the block and read the abbrevs in that block.
// With the main cursor, we just skip over it.
F.DeclsCursor = Stream;
- if (Stream.SkipBlock() || // Skip with the main cursor.
- // Read the abbrevs.
- ReadBlockAbbrevs(F.DeclsCursor, DECLTYPES_BLOCK_ID)) {
+ if (llvm::Error Err = Stream.SkipBlock()) {
+ Error(std::move(Err));
+ return Failure;
+ }
+ if (ReadBlockAbbrevs(F.DeclsCursor, DECLTYPES_BLOCK_ID)) {
Error("malformed block record in AST file");
return Failure;
}
@@ -2721,8 +2913,11 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
if (!PP.getExternalSource())
PP.setExternalSource(this);
- if (Stream.SkipBlock() ||
- ReadBlockAbbrevs(F.MacroCursor, PREPROCESSOR_BLOCK_ID)) {
+ if (llvm::Error Err = Stream.SkipBlock()) {
+ Error(std::move(Err));
+ return Failure;
+ }
+ if (ReadBlockAbbrevs(F.MacroCursor, PREPROCESSOR_BLOCK_ID)) {
Error("malformed block record in AST file");
return Failure;
}
@@ -2731,12 +2926,16 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
case PREPROCESSOR_DETAIL_BLOCK_ID:
F.PreprocessorDetailCursor = Stream;
- if (Stream.SkipBlock() ||
- ReadBlockAbbrevs(F.PreprocessorDetailCursor,
+
+ if (llvm::Error Err = Stream.SkipBlock()) {
+ Error(std::move(Err));
+ return Failure;
+ }
+ if (ReadBlockAbbrevs(F.PreprocessorDetailCursor,
PREPROCESSOR_DETAIL_BLOCK_ID)) {
- Error("malformed preprocessor detail record in AST file");
- return Failure;
- }
+ Error("malformed preprocessor detail record in AST file");
+ return Failure;
+ }
F.PreprocessorDetailStartOffset
= F.PreprocessorDetailCursor.GetCurrentBitNo();
@@ -2759,8 +2958,12 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
case COMMENTS_BLOCK_ID: {
BitstreamCursor C = Stream;
- if (Stream.SkipBlock() ||
- ReadBlockAbbrevs(C, COMMENTS_BLOCK_ID)) {
+
+ if (llvm::Error Err = Stream.SkipBlock()) {
+ Error(std::move(Err));
+ return Failure;
+ }
+ if (ReadBlockAbbrevs(C, COMMENTS_BLOCK_ID)) {
Error("malformed comments block in AST file");
return Failure;
}
@@ -2769,8 +2972,8 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
}
default:
- if (Stream.SkipBlock()) {
- Error("malformed block record in AST file");
+ if (llvm::Error Err = Stream.SkipBlock()) {
+ Error(std::move(Err));
return Failure;
}
break;
@@ -2785,8 +2988,13 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
// Read and process a record.
Record.clear();
StringRef Blob;
- auto RecordType =
- (ASTRecordTypes)Stream.readRecord(Entry.ID, Record, &Blob);
+ Expected<unsigned> MaybeRecordType =
+ Stream.readRecord(Entry.ID, Record, &Blob);
+ if (!MaybeRecordType) {
+ Error(MaybeRecordType.takeError());
+ return Failure;
+ }
+ ASTRecordTypes RecordType = (ASTRecordTypes)MaybeRecordType.get();
// If we're not loading an AST context, we don't care about most records.
if (!ContextObj) {
@@ -3805,10 +4013,13 @@ bool ASTReader::loadGlobalIndex() {
TriedLoadingGlobalIndex = true;
StringRef ModuleCachePath
= getPreprocessor().getHeaderSearchInfo().getModuleCachePath();
- std::pair<GlobalModuleIndex *, GlobalModuleIndex::ErrorCode> Result
- = GlobalModuleIndex::readIndex(ModuleCachePath);
- if (!Result.first)
+ std::pair<GlobalModuleIndex *, llvm::Error> Result =
+ GlobalModuleIndex::readIndex(ModuleCachePath);
+ if (llvm::Error Err = std::move(Result.second)) {
+ assert(!Result.first);
+ consumeError(std::move(Err)); // FIXME this drops errors on the floor.
return true;
+ }
GlobalIndex.reset(Result.first);
ModuleMgr.setGlobalIndex(GlobalIndex.get());
@@ -3837,7 +4048,14 @@ static void updateModuleTimestamp(ModuleFile &MF) {
/// true on failure.
static bool SkipCursorToBlock(BitstreamCursor &Cursor, unsigned BlockID) {
while (true) {
- llvm::BitstreamEntry Entry = Cursor.advance();
+ Expected<llvm::BitstreamEntry> MaybeEntry = Cursor.advance();
+ if (!MaybeEntry) {
+ // FIXME this drops errors on the floor.
+ consumeError(MaybeEntry.takeError());
+ return true;
+ }
+ llvm::BitstreamEntry Entry = MaybeEntry.get();
+
switch (Entry.Kind) {
case llvm::BitstreamEntry::Error:
case llvm::BitstreamEntry::EndBlock:
@@ -3845,19 +4063,30 @@ static bool SkipCursorToBlock(BitstreamCursor &Cursor, unsigned BlockID) {
case llvm::BitstreamEntry::Record:
// Ignore top-level records.
- Cursor.skipRecord(Entry.ID);
- break;
+ if (Expected<unsigned> Skipped = Cursor.skipRecord(Entry.ID))
+ break;
+ else {
+ // FIXME this drops errors on the floor.
+ consumeError(Skipped.takeError());
+ return true;
+ }
case llvm::BitstreamEntry::SubBlock:
if (Entry.ID == BlockID) {
- if (Cursor.EnterSubBlock(BlockID))
+ if (llvm::Error Err = Cursor.EnterSubBlock(BlockID)) {
+ // FIXME this drops the error on the floor.
+ consumeError(std::move(Err));
return true;
+ }
// Found it!
return false;
}
- if (Cursor.SkipBlock())
+ if (llvm::Error Err = Cursor.SkipBlock()) {
+ // FIXME this drops the error on the floor.
+ consumeError(std::move(Err));
return true;
+ }
}
}
}
@@ -4099,13 +4328,23 @@ ASTReader::ASTReadResult ASTReader::ReadAST(StringRef FileName,
static ASTFileSignature readASTFileSignature(StringRef PCH);
-/// Whether \p Stream starts with the AST/PCH file magic number 'CPCH'.
-static bool startsWithASTFileMagic(BitstreamCursor &Stream) {
- return Stream.canSkipToPos(4) &&
- Stream.Read(8) == 'C' &&
- Stream.Read(8) == 'P' &&
- Stream.Read(8) == 'C' &&
- Stream.Read(8) == 'H';
+/// Whether \p Stream doesn't start with the AST/PCH file magic number 'CPCH'.
+static llvm::Error doesntStartWithASTFileMagic(BitstreamCursor &Stream) {
+ // FIXME checking magic headers is done in other places such as
+ // SerializedDiagnosticReader and GlobalModuleIndex, but error handling isn't
+ // always done the same. Unify it all with a helper.
+ if (!Stream.canSkipToPos(4))
+ return llvm::createStringError(std::errc::illegal_byte_sequence,
+ "file too small to contain AST file magic");
+ for (unsigned C : {'C', 'P', 'C', 'H'})
+ if (Expected<llvm::SimpleBitstreamCursor::word_t> Res = Stream.Read(8)) {
+ if (Res.get() != C)
+ return llvm::createStringError(
+ std::errc::illegal_byte_sequence,
+ "file doesn't start with AST file magic");
+ } else
+ return Res.takeError();
+ return llvm::Error::success();
}
static unsigned moduleKindForDiagnostic(ModuleKind Kind) {
@@ -4142,6 +4381,9 @@ ASTReader::ReadASTCore(StringRef FileName,
switch (AddResult) {
case ModuleManager::AlreadyLoaded:
+ Diag(diag::remark_module_import)
+ << M->ModuleName << M->FileName << (ImportedBy ? true : false)
+ << (ImportedBy ? StringRef(ImportedBy->ModuleName) : StringRef());
return Success;
case ModuleManager::NewlyLoaded:
@@ -4175,22 +4417,35 @@ ASTReader::ReadASTCore(StringRef FileName,
assert(M && "Missing module file");
+ bool ShouldFinalizePCM = false;
+ auto FinalizeOrDropPCM = llvm::make_scope_exit([&]() {
+ auto &MC = getModuleManager().getModuleCache();
+ if (ShouldFinalizePCM)
+ MC.finalizePCM(FileName);
+ else
+ MC.tryToDropPCM(FileName);
+ });
ModuleFile &F = *M;
BitstreamCursor &Stream = F.Stream;
Stream = BitstreamCursor(PCHContainerRdr.ExtractPCH(*F.Buffer));
F.SizeInBits = F.Buffer->getBufferSize() * 8;
// Sniff for the signature.
- if (!startsWithASTFileMagic(Stream)) {
- Diag(diag::err_module_file_invalid) << moduleKindForDiagnostic(Type)
- << FileName;
+ if (llvm::Error Err = doesntStartWithASTFileMagic(Stream)) {
+ Diag(diag::err_module_file_invalid)
+ << moduleKindForDiagnostic(Type) << FileName << std::move(Err);
return Failure;
}
// This is used for compatibility with older PCH formats.
bool HaveReadControlBlock = false;
while (true) {
- llvm::BitstreamEntry Entry = Stream.advance();
+ Expected<llvm::BitstreamEntry> MaybeEntry = Stream.advance();
+ if (!MaybeEntry) {
+ Error(MaybeEntry.takeError());
+ return Failure;
+ }
+ llvm::BitstreamEntry Entry = MaybeEntry.get();
switch (Entry.Kind) {
case llvm::BitstreamEntry::Error:
@@ -4241,6 +4496,7 @@ ASTReader::ReadASTCore(StringRef FileName,
// Record that we've loaded this module.
Loaded.push_back(ImportedModule(M, ImportedBy, ImportLoc));
+ ShouldFinalizePCM = true;
return Success;
case UNHASHED_CONTROL_BLOCK_ID:
@@ -4250,15 +4506,15 @@ ASTReader::ReadASTCore(StringRef FileName,
return Failure;
default:
- if (Stream.SkipBlock()) {
- Error("malformed block record in AST file");
+ if (llvm::Error Err = Stream.SkipBlock()) {
+ Error(std::move(Err));
return Failure;
}
break;
}
}
- return Success;
+ llvm_unreachable("unexpected break; expected return");
}
ASTReader::ASTReadResult
@@ -4286,7 +4542,7 @@ ASTReader::readUnhashedControlBlock(ModuleFile &F, bool WasImportedBy,
}
if (Result == OutOfDate && F.Kind == MK_ImplicitModule) {
- // If this module has already been finalized in the PCMCache, we're stuck
+ // If this module has already been finalized in the ModuleCache, we're stuck
// with it; we can only load a single version of each module.
//
// This can happen when a module is imported in two contexts: in one, as a
@@ -4304,7 +4560,7 @@ ASTReader::readUnhashedControlBlock(ModuleFile &F, bool WasImportedBy,
// validation will fail during the as-system import since the PCM on disk
// doesn't guarantee that -Werror was respected. However, the -Werror
// flags were checked during the initial as-user import.
- if (PCMCache.isBufferFinal(F.FileName)) {
+ if (getModuleManager().getModuleCache().isPCMFinal(F.FileName)) {
Diag(diag::warn_module_system_bit_conflict) << F.FileName;
return Success;
}
@@ -4321,8 +4577,11 @@ ASTReader::ASTReadResult ASTReader::readUnhashedControlBlockImpl(
BitstreamCursor Stream(StreamData);
// Sniff for the signature.
- if (!startsWithASTFileMagic(Stream))
+ if (llvm::Error Err = doesntStartWithASTFileMagic(Stream)) {
+ // FIXME this drops the error on the floor.
+ consumeError(std::move(Err));
return Failure;
+ }
// Scan for the UNHASHED_CONTROL_BLOCK_ID block.
if (SkipCursorToBlock(Stream, UNHASHED_CONTROL_BLOCK_ID))
@@ -4332,7 +4591,13 @@ ASTReader::ASTReadResult ASTReader::readUnhashedControlBlockImpl(
RecordData Record;
ASTReadResult Result = Success;
while (true) {
- llvm::BitstreamEntry Entry = Stream.advance();
+ Expected<llvm::BitstreamEntry> MaybeEntry = Stream.advance();
+ if (!MaybeEntry) {
+ // FIXME this drops the error on the floor.
+ consumeError(MaybeEntry.takeError());
+ return Failure;
+ }
+ llvm::BitstreamEntry Entry = MaybeEntry.get();
switch (Entry.Kind) {
case llvm::BitstreamEntry::Error:
@@ -4349,8 +4614,12 @@ ASTReader::ASTReadResult ASTReader::readUnhashedControlBlockImpl(
// Read and process a record.
Record.clear();
- switch (
- (UnhashedControlBlockRecordTypes)Stream.readRecord(Entry.ID, Record)) {
+ Expected<unsigned> MaybeRecordType = Stream.readRecord(Entry.ID, Record);
+ if (!MaybeRecordType) {
+ // FIXME this drops the error.
+ return Failure;
+ }
+ switch ((UnhashedControlBlockRecordTypes)MaybeRecordType.get()) {
case SIGNATURE:
if (F)
std::copy(Record.begin(), Record.end(), F->Signature.data());
@@ -4402,12 +4671,19 @@ ASTReader::ASTReadResult ASTReader::ReadExtensionBlock(ModuleFile &F) {
RecordData Record;
while (true) {
- llvm::BitstreamEntry Entry = Stream.advance();
+ Expected<llvm::BitstreamEntry> MaybeEntry = Stream.advance();
+ if (!MaybeEntry) {
+ Error(MaybeEntry.takeError());
+ return Failure;
+ }
+ llvm::BitstreamEntry Entry = MaybeEntry.get();
+
switch (Entry.Kind) {
case llvm::BitstreamEntry::SubBlock:
- if (Stream.SkipBlock())
+ if (llvm::Error Err = Stream.SkipBlock()) {
+ Error(std::move(Err));
return Failure;
-
+ }
continue;
case llvm::BitstreamEntry::EndBlock:
@@ -4422,8 +4698,13 @@ ASTReader::ASTReadResult ASTReader::ReadExtensionBlock(ModuleFile &F) {
Record.clear();
StringRef Blob;
- unsigned RecCode = Stream.readRecord(Entry.ID, Record, &Blob);
- switch (RecCode) {
+ Expected<unsigned> MaybeRecCode =
+ Stream.readRecord(Entry.ID, Record, &Blob);
+ if (!MaybeRecCode) {
+ Error(MaybeRecCode.takeError());
+ return Failure;
+ }
+ switch (MaybeRecCode.get()) {
case EXTENSION_METADATA: {
ModuleFileExtensionMetadata Metadata;
if (parseModuleFileExtensionMetadata(Record, Blob, Metadata))
@@ -4594,8 +4875,11 @@ void ASTReader::finalizeForWriting() {
/// else returns 0.
static ASTFileSignature readASTFileSignature(StringRef PCH) {
BitstreamCursor Stream(PCH);
- if (!startsWithASTFileMagic(Stream))
+ if (llvm::Error Err = doesntStartWithASTFileMagic(Stream)) {
+ // FIXME this drops the error on the floor.
+ consumeError(std::move(Err));
return ASTFileSignature();
+ }
// Scan for the UNHASHED_CONTROL_BLOCK_ID block.
if (SkipCursorToBlock(Stream, UNHASHED_CONTROL_BLOCK_ID))
@@ -4604,13 +4888,27 @@ static ASTFileSignature readASTFileSignature(StringRef PCH) {
// Scan for SIGNATURE inside the diagnostic options block.
ASTReader::RecordData Record;
while (true) {
- llvm::BitstreamEntry Entry = Stream.advanceSkippingSubblocks();
+ Expected<llvm::BitstreamEntry> MaybeEntry =
+ Stream.advanceSkippingSubblocks();
+ if (!MaybeEntry) {
+ // FIXME this drops the error on the floor.
+ consumeError(MaybeEntry.takeError());
+ return ASTFileSignature();
+ }
+ llvm::BitstreamEntry Entry = MaybeEntry.get();
+
if (Entry.Kind != llvm::BitstreamEntry::Record)
return ASTFileSignature();
Record.clear();
StringRef Blob;
- if (SIGNATURE == Stream.readRecord(Entry.ID, Record, &Blob))
+ Expected<unsigned> MaybeRecord = Stream.readRecord(Entry.ID, Record, &Blob);
+ if (!MaybeRecord) {
+ // FIXME this drops the error on the floor.
+ consumeError(MaybeRecord.takeError());
+ return ASTFileSignature();
+ }
+ if (SIGNATURE == MaybeRecord.get())
return {{{(uint32_t)Record[0], (uint32_t)Record[1], (uint32_t)Record[2],
(uint32_t)Record[3], (uint32_t)Record[4]}}};
}
@@ -4634,8 +4932,8 @@ std::string ASTReader::getOriginalSourceFile(
BitstreamCursor Stream(PCHContainerRdr.ExtractPCH(**Buffer));
// Sniff for the signature.
- if (!startsWithASTFileMagic(Stream)) {
- Diags.Report(diag::err_fe_not_a_pch_file) << ASTFileName;
+ if (llvm::Error Err = doesntStartWithASTFileMagic(Stream)) {
+ Diags.Report(diag::err_fe_not_a_pch_file) << ASTFileName << std::move(Err);
return std::string();
}
@@ -4648,7 +4946,15 @@ std::string ASTReader::getOriginalSourceFile(
// Scan for ORIGINAL_FILE inside the control block.
RecordData Record;
while (true) {
- llvm::BitstreamEntry Entry = Stream.advanceSkippingSubblocks();
+ Expected<llvm::BitstreamEntry> MaybeEntry =
+ Stream.advanceSkippingSubblocks();
+ if (!MaybeEntry) {
+ // FIXME this drops errors on the floor.
+ consumeError(MaybeEntry.takeError());
+ return std::string();
+ }
+ llvm::BitstreamEntry Entry = MaybeEntry.get();
+
if (Entry.Kind == llvm::BitstreamEntry::EndBlock)
return std::string();
@@ -4659,7 +4965,13 @@ std::string ASTReader::getOriginalSourceFile(
Record.clear();
StringRef Blob;
- if (Stream.readRecord(Entry.ID, Record, &Blob) == ORIGINAL_FILE)
+ Expected<unsigned> MaybeRecord = Stream.readRecord(Entry.ID, Record, &Blob);
+ if (!MaybeRecord) {
+ // FIXME this drops the errors on the floor.
+ consumeError(MaybeRecord.takeError());
+ return std::string();
+ }
+ if (ORIGINAL_FILE == MaybeRecord.get())
return Blob.str();
}
}
@@ -4733,8 +5045,10 @@ bool ASTReader::readASTFileControlBlock(
BitstreamCursor Stream(Bytes);
// Sniff for the signature.
- if (!startsWithASTFileMagic(Stream))
+ if (llvm::Error Err = doesntStartWithASTFileMagic(Stream)) {
+ consumeError(std::move(Err)); // FIXME this drops errors on the floor.
return true;
+ }
// Scan for the CONTROL_BLOCK_ID block.
if (SkipCursorToBlock(Stream, CONTROL_BLOCK_ID))
@@ -4749,7 +5063,13 @@ bool ASTReader::readASTFileControlBlock(
std::string ModuleDir;
bool DoneWithControlBlock = false;
while (!DoneWithControlBlock) {
- llvm::BitstreamEntry Entry = Stream.advance();
+ Expected<llvm::BitstreamEntry> MaybeEntry = Stream.advance();
+ if (!MaybeEntry) {
+ // FIXME this drops the error on the floor.
+ consumeError(MaybeEntry.takeError());
+ return true;
+ }
+ llvm::BitstreamEntry Entry = MaybeEntry.get();
switch (Entry.Kind) {
case llvm::BitstreamEntry::SubBlock: {
@@ -4765,15 +5085,22 @@ bool ASTReader::readASTFileControlBlock(
case INPUT_FILES_BLOCK_ID:
InputFilesCursor = Stream;
- if (Stream.SkipBlock() ||
- (NeedsInputFiles &&
- ReadBlockAbbrevs(InputFilesCursor, INPUT_FILES_BLOCK_ID)))
+ if (llvm::Error Err = Stream.SkipBlock()) {
+ // FIXME this drops the error on the floor.
+ consumeError(std::move(Err));
+ return true;
+ }
+ if (NeedsInputFiles &&
+ ReadBlockAbbrevs(InputFilesCursor, INPUT_FILES_BLOCK_ID))
return true;
break;
default:
- if (Stream.SkipBlock())
+ if (llvm::Error Err = Stream.SkipBlock()) {
+ // FIXME this drops the error on the floor.
+ consumeError(std::move(Err));
return true;
+ }
break;
}
@@ -4795,8 +5122,13 @@ bool ASTReader::readASTFileControlBlock(
Record.clear();
StringRef Blob;
- unsigned RecCode = Stream.readRecord(Entry.ID, Record, &Blob);
- switch ((ControlRecordTypes)RecCode) {
+ Expected<unsigned> MaybeRecCode =
+ Stream.readRecord(Entry.ID, Record, &Blob);
+ if (!MaybeRecCode) {
+ // FIXME this drops the error.
+ return Failure;
+ }
+ switch ((ControlRecordTypes)MaybeRecCode.get()) {
case METADATA:
if (Record[0] != VERSION_MAJOR)
return true;
@@ -4833,13 +5165,28 @@ bool ASTReader::readASTFileControlBlock(
BitstreamCursor &Cursor = InputFilesCursor;
SavedStreamPosition SavedPosition(Cursor);
- Cursor.JumpToBit(InputFileOffs[I]);
+ if (llvm::Error Err = Cursor.JumpToBit(InputFileOffs[I])) {
+ // FIXME this drops errors on the floor.
+ consumeError(std::move(Err));
+ }
+
+ Expected<unsigned> MaybeCode = Cursor.ReadCode();
+ if (!MaybeCode) {
+ // FIXME this drops errors on the floor.
+ consumeError(MaybeCode.takeError());
+ }
+ unsigned Code = MaybeCode.get();
- unsigned Code = Cursor.ReadCode();
RecordData Record;
StringRef Blob;
bool shouldContinue = false;
- switch ((InputFileRecordTypes)Cursor.readRecord(Code, Record, &Blob)) {
+ Expected<unsigned> MaybeRecordType =
+ Cursor.readRecord(Code, Record, &Blob);
+ if (!MaybeRecordType) {
+ // FIXME this drops errors on the floor.
+ consumeError(MaybeRecordType.takeError());
+ }
+ switch ((InputFileRecordTypes)MaybeRecordType.get()) {
case INPUT_FILE:
bool Overridden = static_cast<bool>(Record[3]);
std::string Filename = Blob;
@@ -4882,30 +5229,42 @@ bool ASTReader::readASTFileControlBlock(
while (!SkipCursorToBlock(Stream, EXTENSION_BLOCK_ID)) {
bool DoneWithExtensionBlock = false;
while (!DoneWithExtensionBlock) {
- llvm::BitstreamEntry Entry = Stream.advance();
-
- switch (Entry.Kind) {
- case llvm::BitstreamEntry::SubBlock:
- if (Stream.SkipBlock())
- return true;
-
- continue;
+ Expected<llvm::BitstreamEntry> MaybeEntry = Stream.advance();
+ if (!MaybeEntry) {
+ // FIXME this drops the error.
+ return true;
+ }
+ llvm::BitstreamEntry Entry = MaybeEntry.get();
+
+ switch (Entry.Kind) {
+ case llvm::BitstreamEntry::SubBlock:
+ if (llvm::Error Err = Stream.SkipBlock()) {
+ // FIXME this drops the error on the floor.
+ consumeError(std::move(Err));
+ return true;
+ }
+ continue;
- case llvm::BitstreamEntry::EndBlock:
- DoneWithExtensionBlock = true;
- continue;
+ case llvm::BitstreamEntry::EndBlock:
+ DoneWithExtensionBlock = true;
+ continue;
- case llvm::BitstreamEntry::Error:
- return true;
+ case llvm::BitstreamEntry::Error:
+ return true;
- case llvm::BitstreamEntry::Record:
- break;
- }
+ case llvm::BitstreamEntry::Record:
+ break;
+ }
Record.clear();
StringRef Blob;
- unsigned RecCode = Stream.readRecord(Entry.ID, Record, &Blob);
- switch (RecCode) {
+ Expected<unsigned> MaybeRecCode =
+ Stream.readRecord(Entry.ID, Record, &Blob);
+ if (!MaybeRecCode) {
+ // FIXME this drops the error.
+ return true;
+ }
+ switch (MaybeRecCode.get()) {
case EXTENSION_METADATA: {
ModuleFileExtensionMetadata Metadata;
if (parseModuleFileExtensionMetadata(Record, Blob, Metadata))
@@ -4947,8 +5306,8 @@ bool ASTReader::isAcceptableASTFile(StringRef Filename, FileManager &FileMgr,
ASTReader::ASTReadResult
ASTReader::ReadSubmoduleBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
// Enter the submodule block.
- if (F.Stream.EnterSubBlock(SUBMODULE_BLOCK_ID)) {
- Error("malformed submodule block record in AST file");
+ if (llvm::Error Err = F.Stream.EnterSubBlock(SUBMODULE_BLOCK_ID)) {
+ Error(std::move(Err));
return Failure;
}
@@ -4957,7 +5316,13 @@ ASTReader::ReadSubmoduleBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
Module *CurrentModule = nullptr;
RecordData Record;
while (true) {
- llvm::BitstreamEntry Entry = F.Stream.advanceSkippingSubblocks();
+ Expected<llvm::BitstreamEntry> MaybeEntry =
+ F.Stream.advanceSkippingSubblocks();
+ if (!MaybeEntry) {
+ Error(MaybeEntry.takeError());
+ return Failure;
+ }
+ llvm::BitstreamEntry Entry = MaybeEntry.get();
switch (Entry.Kind) {
case llvm::BitstreamEntry::SubBlock: // Handled for us already.
@@ -4974,7 +5339,12 @@ ASTReader::ReadSubmoduleBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
// Read a record.
StringRef Blob;
Record.clear();
- auto Kind = F.Stream.readRecord(Entry.ID, Record, &Blob);
+ Expected<unsigned> MaybeKind = F.Stream.readRecord(Entry.ID, Record, &Blob);
+ if (!MaybeKind) {
+ Error(MaybeKind.takeError());
+ return Failure;
+ }
+ unsigned Kind = MaybeKind.get();
if ((Kind == SUBMODULE_METADATA) != First) {
Error("submodule metadata record should be at beginning of block");
@@ -5451,10 +5821,20 @@ PreprocessedEntity *ASTReader::ReadPreprocessedEntity(unsigned Index) {
}
SavedStreamPosition SavedPosition(M.PreprocessorDetailCursor);
- M.PreprocessorDetailCursor.JumpToBit(PPOffs.BitOffset);
+ if (llvm::Error Err =
+ M.PreprocessorDetailCursor.JumpToBit(PPOffs.BitOffset)) {
+ Error(std::move(Err));
+ return nullptr;
+ }
+
+ Expected<llvm::BitstreamEntry> MaybeEntry =
+ M.PreprocessorDetailCursor.advance(BitstreamCursor::AF_DontPopBlockAtEnd);
+ if (!MaybeEntry) {
+ Error(MaybeEntry.takeError());
+ return nullptr;
+ }
+ llvm::BitstreamEntry Entry = MaybeEntry.get();
- llvm::BitstreamEntry Entry =
- M.PreprocessorDetailCursor.advance(BitstreamCursor::AF_DontPopBlockAtEnd);
if (Entry.Kind != llvm::BitstreamEntry::Record)
return nullptr;
@@ -5464,10 +5844,13 @@ PreprocessedEntity *ASTReader::ReadPreprocessedEntity(unsigned Index) {
PreprocessingRecord &PPRec = *PP.getPreprocessingRecord();
StringRef Blob;
RecordData Record;
- PreprocessorDetailRecordTypes RecType =
- (PreprocessorDetailRecordTypes)M.PreprocessorDetailCursor.readRecord(
- Entry.ID, Record, &Blob);
- switch (RecType) {
+ Expected<unsigned> MaybeRecType =
+ M.PreprocessorDetailCursor.readRecord(Entry.ID, Record, &Blob);
+ if (!MaybeRecType) {
+ Error(MaybeRecType.takeError());
+ return nullptr;
+ }
+ switch ((PreprocessorDetailRecordTypes)MaybeRecType.get()) {
case PPD_MACRO_EXPANSION: {
bool isBuiltin = Record[0];
IdentifierInfo *Name = nullptr;
@@ -5876,10 +6259,24 @@ QualType ASTReader::readTypeRecord(unsigned Index) {
Deserializing AType(this);
unsigned Idx = 0;
- DeclsCursor.JumpToBit(Loc.Offset);
+ if (llvm::Error Err = DeclsCursor.JumpToBit(Loc.Offset)) {
+ Error(std::move(Err));
+ return QualType();
+ }
RecordData Record;
- unsigned Code = DeclsCursor.ReadCode();
- switch ((TypeCode)DeclsCursor.readRecord(Code, Record)) {
+ Expected<unsigned> MaybeCode = DeclsCursor.ReadCode();
+ if (!MaybeCode) {
+ Error(MaybeCode.takeError());
+ return QualType();
+ }
+ unsigned Code = MaybeCode.get();
+
+ Expected<unsigned> MaybeTypeCode = DeclsCursor.readRecord(Code, Record);
+ if (!MaybeTypeCode) {
+ Error(MaybeTypeCode.takeError());
+ return QualType();
+ }
+ switch ((TypeCode)MaybeTypeCode.get()) {
case TYPE_EXT_QUAL: {
if (Record.size() != 2) {
Error("Incorrect encoding of extended qualifier type");
@@ -6120,8 +6517,13 @@ QualType ASTReader::readTypeRecord(unsigned Index) {
case TYPE_AUTO: {
QualType Deduced = readType(*Loc.F, Record, Idx);
AutoTypeKeyword Keyword = (AutoTypeKeyword)Record[Idx++];
- bool IsDependent = Deduced.isNull() ? Record[Idx++] : false;
- return Context.getAutoType(Deduced, Keyword, IsDependent);
+ bool IsDependent = false, IsPack = false;
+ if (Deduced.isNull()) {
+ IsDependent = Record[Idx] > 0;
+ IsPack = Record[Idx] > 1;
+ ++Idx;
+ }
+ return Context.getAutoType(Deduced, Keyword, IsDependent, IsPack);
}
case TYPE_DEDUCED_TEMPLATE_SPECIALIZATION: {
@@ -6179,6 +6581,16 @@ QualType ASTReader::readTypeRecord(unsigned Index) {
return Context.getParenType(InnerType);
}
+ case TYPE_MACRO_QUALIFIED: {
+ if (Record.size() != 2) {
+ Error("incorrect encoding of macro defined type");
+ return QualType();
+ }
+ QualType UnderlyingTy = readType(*Loc.F, Record, Idx);
+ IdentifierInfo *MacroII = GetIdentifierInfo(*Loc.F, Record, Idx);
+ return Context.getMacroQualifiedType(UnderlyingTy, MacroII);
+ }
+
case TYPE_PACK_EXPANSION: {
if (Record.size() != 2) {
Error("incorrect encoding of pack expansion type");
@@ -6500,6 +6912,10 @@ void TypeLocReader::VisitAdjustedTypeLoc(AdjustedTypeLoc TL) {
// nothing to do
}
+void TypeLocReader::VisitMacroQualifiedTypeLoc(MacroQualifiedTypeLoc TL) {
+ TL.setExpansionLoc(ReadSourceLocation());
+}
+
void TypeLocReader::VisitBlockPointerTypeLoc(BlockPointerTypeLoc TL) {
TL.setCaretLoc(ReadSourceLocation());
}
@@ -7165,13 +7581,26 @@ ASTReader::GetExternalCXXCtorInitializers(uint64_t Offset) {
RecordLocation Loc = getLocalBitOffset(Offset);
BitstreamCursor &Cursor = Loc.F->DeclsCursor;
SavedStreamPosition SavedPosition(Cursor);
- Cursor.JumpToBit(Loc.Offset);
+ if (llvm::Error Err = Cursor.JumpToBit(Loc.Offset)) {
+ Error(std::move(Err));
+ return nullptr;
+ }
ReadingKindTracker ReadingKind(Read_Decl, *this);
RecordData Record;
- unsigned Code = Cursor.ReadCode();
- unsigned RecCode = Cursor.readRecord(Code, Record);
- if (RecCode != DECL_CXX_CTOR_INITIALIZERS) {
+ Expected<unsigned> MaybeCode = Cursor.ReadCode();
+ if (!MaybeCode) {
+ Error(MaybeCode.takeError());
+ return nullptr;
+ }
+ unsigned Code = MaybeCode.get();
+
+ Expected<unsigned> MaybeRecCode = Cursor.readRecord(Code, Record);
+ if (!MaybeRecCode) {
+ Error(MaybeRecCode.takeError());
+ return nullptr;
+ }
+ if (MaybeRecCode.get() != DECL_CXX_CTOR_INITIALIZERS) {
Error("malformed AST file: missing C++ ctor initializers");
return nullptr;
}
@@ -7187,11 +7616,27 @@ CXXBaseSpecifier *ASTReader::GetExternalCXXBaseSpecifiers(uint64_t Offset) {
RecordLocation Loc = getLocalBitOffset(Offset);
BitstreamCursor &Cursor = Loc.F->DeclsCursor;
SavedStreamPosition SavedPosition(Cursor);
- Cursor.JumpToBit(Loc.Offset);
+ if (llvm::Error Err = Cursor.JumpToBit(Loc.Offset)) {
+ Error(std::move(Err));
+ return nullptr;
+ }
ReadingKindTracker ReadingKind(Read_Decl, *this);
RecordData Record;
- unsigned Code = Cursor.ReadCode();
- unsigned RecCode = Cursor.readRecord(Code, Record);
+
+ Expected<unsigned> MaybeCode = Cursor.ReadCode();
+ if (!MaybeCode) {
+ Error(MaybeCode.takeError());
+ return nullptr;
+ }
+ unsigned Code = MaybeCode.get();
+
+ Expected<unsigned> MaybeRecCode = Cursor.readRecord(Code, Record);
+ if (!MaybeRecCode) {
+ Error(MaybeCode.takeError());
+ return nullptr;
+ }
+ unsigned RecCode = MaybeRecCode.get();
+
if (RecCode != DECL_CXX_BASE_SPECIFIERS) {
Error("malformed AST file: missing C++ base specifiers");
return nullptr;
@@ -7399,7 +7844,10 @@ Stmt *ASTReader::GetExternalDeclStmt(uint64_t Offset) {
// Offset here is a global offset across the entire chain.
RecordLocation Loc = getLocalBitOffset(Offset);
- Loc.F->DeclsCursor.JumpToBit(Loc.Offset);
+ if (llvm::Error Err = Loc.F->DeclsCursor.JumpToBit(Loc.Offset)) {
+ Error(std::move(Err));
+ return nullptr;
+ }
assert(NumCurrentElementsDeserializing == 0 &&
"should not be called while already deserializing");
Deserializing D(this);
@@ -7500,9 +7948,8 @@ void ASTReader::FindFileRegionDecls(FileID File,
SourceLocation EndLoc = BeginLoc.getLocWithOffset(Length);
DeclIDComp DIDComp(*this, *DInfo.Mod);
- ArrayRef<serialization::LocalDeclID>::iterator
- BeginIt = std::lower_bound(DInfo.Decls.begin(), DInfo.Decls.end(),
- BeginLoc, DIDComp);
+ ArrayRef<serialization::LocalDeclID>::iterator BeginIt =
+ llvm::lower_bound(DInfo.Decls, BeginLoc, DIDComp);
if (BeginIt != DInfo.Decls.begin())
--BeginIt;
@@ -7514,9 +7961,8 @@ void ASTReader::FindFileRegionDecls(FileID File,
->isTopLevelDeclInObjCContainer())
--BeginIt;
- ArrayRef<serialization::LocalDeclID>::iterator
- EndIt = std::upper_bound(DInfo.Decls.begin(), DInfo.Decls.end(),
- EndLoc, DIDComp);
+ ArrayRef<serialization::LocalDeclID>::iterator EndIt =
+ llvm::upper_bound(DInfo.Decls, EndLoc, DIDComp);
if (EndIt != DInfo.Decls.end())
++EndIt;
@@ -7806,7 +8252,7 @@ void ASTReader::UpdateSema() {
// Update the state of pragmas. Use the same API as if we had encountered the
// pragma in the source.
if(OptimizeOffPragmaLocation.isValid())
- SemaObj->ActOnPragmaOptimize(/* IsOn = */ false, OptimizeOffPragmaLocation);
+ SemaObj->ActOnPragmaOptimize(/* On = */ false, OptimizeOffPragmaLocation);
if (PragmaMSStructState != -1)
SemaObj->ActOnPragmaMSStruct((PragmaMSStructKind)PragmaMSStructState);
if (PointersToMembersPragmaLocation.isValid()) {
@@ -8507,7 +8953,7 @@ unsigned ASTReader::getModuleFileID(ModuleFile *F) {
return ((F->BaseSubmoduleID + NUM_PREDEF_SUBMODULE_IDS) << 1) | 1;
auto PCHModules = getModuleManager().pch_modules();
- auto I = std::find(PCHModules.begin(), PCHModules.end(), F);
+ auto I = llvm::find(PCHModules, F);
assert(I != PCHModules.end() && "emitting reference to unknown file");
return (I - PCHModules.end()) << 1;
}
@@ -8710,6 +9156,11 @@ ASTReader::ReadTemplateName(ModuleFile &F, const RecordData &Record,
return Context.getOverloadedTemplateName(Decls.begin(), Decls.end());
}
+ case TemplateName::AssumedTemplate: {
+ DeclarationName Name = ReadDeclarationName(F, Record, Idx);
+ return Context.getAssumedTemplateName(Name);
+ }
+
case TemplateName::QualifiedTemplate: {
NestedNameSpecifier *NNS = ReadNestedNameSpecifier(F, Record, Idx);
bool hasTemplKeyword = Record[Idx++];
@@ -9057,6 +9508,62 @@ ASTReader::ReadSourceRange(ModuleFile &F, const RecordData &Record,
return SourceRange(beg, end);
}
+static FixedPointSemantics
+ReadFixedPointSemantics(const SmallVectorImpl<uint64_t> &Record,
+ unsigned &Idx) {
+ unsigned Width = Record[Idx++];
+ unsigned Scale = Record[Idx++];
+ uint64_t Tmp = Record[Idx++];
+ bool IsSigned = Tmp & 0x1;
+ bool IsSaturated = Tmp & 0x2;
+ bool HasUnsignedPadding = Tmp & 0x4;
+ return FixedPointSemantics(Width, Scale, IsSigned, IsSaturated,
+ HasUnsignedPadding);
+}
+
+APValue ASTReader::ReadAPValue(const RecordData &Record, unsigned &Idx) {
+ unsigned Kind = Record[Idx++];
+ switch (Kind) {
+ case APValue::None:
+ return APValue();
+ case APValue::Indeterminate:
+ return APValue::IndeterminateValue();
+ case APValue::Int:
+ return APValue(ReadAPSInt(Record, Idx));
+ case APValue::Float: {
+ const llvm::fltSemantics &FloatSema = llvm::APFloatBase::EnumToSemantics(
+ static_cast<llvm::APFloatBase::Semantics>(Record[Idx++]));
+ return APValue(ReadAPFloat(Record, FloatSema, Idx));
+ }
+ case APValue::FixedPoint: {
+ FixedPointSemantics FPSema = ReadFixedPointSemantics(Record, Idx);
+ return APValue(APFixedPoint(ReadAPInt(Record, Idx), FPSema));
+ }
+ case APValue::ComplexInt: {
+ llvm::APSInt First = ReadAPSInt(Record, Idx);
+ return APValue(std::move(First), ReadAPSInt(Record, Idx));
+ }
+ case APValue::ComplexFloat: {
+ const llvm::fltSemantics &FloatSema1 = llvm::APFloatBase::EnumToSemantics(
+ static_cast<llvm::APFloatBase::Semantics>(Record[Idx++]));
+ llvm::APFloat First = ReadAPFloat(Record, FloatSema1, Idx);
+ const llvm::fltSemantics &FloatSema2 = llvm::APFloatBase::EnumToSemantics(
+ static_cast<llvm::APFloatBase::Semantics>(Record[Idx++]));
+ return APValue(std::move(First), ReadAPFloat(Record, FloatSema2, Idx));
+ }
+ case APValue::LValue:
+ case APValue::Vector:
+ case APValue::Array:
+ case APValue::Struct:
+ case APValue::Union:
+ case APValue::MemberPointer:
+ case APValue::AddrLabelDiff:
+ // TODO : Handle all these APValue::ValueKind.
+ return APValue();
+ }
+ llvm_unreachable("Invalid APValue::ValueKind");
+}
+
/// Read an integral value
llvm::APInt ASTReader::ReadAPInt(const RecordData &Record, unsigned &Idx) {
unsigned BitWidth = Record[Idx++];
@@ -9094,6 +9601,14 @@ std::string ASTReader::ReadPath(ModuleFile &F, const RecordData &Record,
return Filename;
}
+std::string ASTReader::ReadPath(StringRef BaseDirectory,
+ const RecordData &Record, unsigned &Idx) {
+ std::string Filename = ReadString(Record, Idx);
+ if (!BaseDirectory.empty())
+ ResolveImportedPath(Filename, BaseDirectory);
+ return Filename;
+}
+
VersionTuple ASTReader::ReadVersionTuple(const RecordData &Record,
unsigned &Idx) {
unsigned Major = Record[Idx++];
@@ -9160,8 +9675,14 @@ void ASTReader::ReadComments() {
RecordData Record;
while (true) {
- llvm::BitstreamEntry Entry =
- Cursor.advanceSkippingSubblocks(BitstreamCursor::AF_DontPopBlockAtEnd);
+ Expected<llvm::BitstreamEntry> MaybeEntry =
+ Cursor.advanceSkippingSubblocks(
+ BitstreamCursor::AF_DontPopBlockAtEnd);
+ if (!MaybeEntry) {
+ Error(MaybeEntry.takeError());
+ return;
+ }
+ llvm::BitstreamEntry Entry = MaybeEntry.get();
switch (Entry.Kind) {
case llvm::BitstreamEntry::SubBlock: // Handled for us already.
@@ -9177,7 +9698,12 @@ void ASTReader::ReadComments() {
// Read a record.
Record.clear();
- switch ((CommentRecordTypes)Cursor.readRecord(Entry.ID, Record)) {
+ Expected<unsigned> MaybeComment = Cursor.readRecord(Entry.ID, Record);
+ if (!MaybeComment) {
+ Error(MaybeComment.takeError());
+ return;
+ }
+ switch ((CommentRecordTypes)MaybeComment.get()) {
case COMMENTS_RAW_COMMENT: {
unsigned Idx = 0;
SourceRange SR = ReadSourceRange(F, Record, Idx);
@@ -11601,7 +12127,8 @@ void ASTReader::pushExternalDeclIntoScope(NamedDecl *D, DeclarationName Name) {
}
}
-ASTReader::ASTReader(Preprocessor &PP, ASTContext *Context,
+ASTReader::ASTReader(Preprocessor &PP, InMemoryModuleCache &ModuleCache,
+ ASTContext *Context,
const PCHContainerReader &PCHContainerRdr,
ArrayRef<std::shared_ptr<ModuleFileExtension>> Extensions,
StringRef isysroot, bool DisableValidation,
@@ -11614,11 +12141,9 @@ ASTReader::ASTReader(Preprocessor &PP, ASTContext *Context,
: cast<ASTReaderListener>(new PCHValidator(PP, *this))),
SourceMgr(PP.getSourceManager()), FileMgr(PP.getFileManager()),
PCHContainerRdr(PCHContainerRdr), Diags(PP.getDiagnostics()), PP(PP),
- ContextObj(Context),
- ModuleMgr(PP.getFileManager(), PP.getPCMCache(), PCHContainerRdr,
- PP.getHeaderSearchInfo()),
- PCMCache(PP.getPCMCache()), DummyIdResolver(PP),
- ReadTimer(std::move(ReadTimer)), isysroot(isysroot),
+ ContextObj(Context), ModuleMgr(PP.getFileManager(), ModuleCache,
+ PCHContainerRdr, PP.getHeaderSearchInfo()),
+ DummyIdResolver(PP), ReadTimer(std::move(ReadTimer)), isysroot(isysroot),
DisableValidation(DisableValidation),
AllowASTWithCompilerErrors(AllowASTWithCompilerErrors),
AllowConfigurationMismatch(AllowConfigurationMismatch),
@@ -11648,8 +12173,8 @@ IdentifierResolver &ASTReader::getIdResolver() {
return SemaObj ? SemaObj->IdResolver : DummyIdResolver;
}
-unsigned ASTRecordReader::readRecord(llvm::BitstreamCursor &Cursor,
- unsigned AbbrevID) {
+Expected<unsigned> ASTRecordReader::readRecord(llvm::BitstreamCursor &Cursor,
+ unsigned AbbrevID) {
Idx = 0;
Record.clear();
return Cursor.readRecord(AbbrevID, Record);
@@ -11676,6 +12201,9 @@ OMPClause *OMPClauseReader::readClause() {
case OMPC_simdlen:
C = new (Context) OMPSimdlenClause();
break;
+ case OMPC_allocator:
+ C = new (Context) OMPAllocatorClause();
+ break;
case OMPC_collapse:
C = new (Context) OMPCollapseClause();
break;
@@ -11785,12 +12313,12 @@ OMPClause *OMPClauseReader::readClause() {
C = new (Context) OMPDeviceClause();
break;
case OMPC_map: {
- unsigned NumVars = Record.readInt();
- unsigned NumDeclarations = Record.readInt();
- unsigned NumLists = Record.readInt();
- unsigned NumComponents = Record.readInt();
- C = OMPMapClause::CreateEmpty(Context, NumVars, NumDeclarations, NumLists,
- NumComponents);
+ OMPMappableExprListSizeTy Sizes;
+ Sizes.NumVars = Record.readInt();
+ Sizes.NumUniqueDeclarations = Record.readInt();
+ Sizes.NumComponentLists = Record.readInt();
+ Sizes.NumComponents = Record.readInt();
+ C = OMPMapClause::CreateEmpty(Context, Sizes);
break;
}
case OMPC_num_teams:
@@ -11818,41 +12346,44 @@ OMPClause *OMPClauseReader::readClause() {
C = new (Context) OMPDefaultmapClause();
break;
case OMPC_to: {
- unsigned NumVars = Record.readInt();
- unsigned NumDeclarations = Record.readInt();
- unsigned NumLists = Record.readInt();
- unsigned NumComponents = Record.readInt();
- C = OMPToClause::CreateEmpty(Context, NumVars, NumDeclarations, NumLists,
- NumComponents);
+ OMPMappableExprListSizeTy Sizes;
+ Sizes.NumVars = Record.readInt();
+ Sizes.NumUniqueDeclarations = Record.readInt();
+ Sizes.NumComponentLists = Record.readInt();
+ Sizes.NumComponents = Record.readInt();
+ C = OMPToClause::CreateEmpty(Context, Sizes);
break;
}
case OMPC_from: {
- unsigned NumVars = Record.readInt();
- unsigned NumDeclarations = Record.readInt();
- unsigned NumLists = Record.readInt();
- unsigned NumComponents = Record.readInt();
- C = OMPFromClause::CreateEmpty(Context, NumVars, NumDeclarations, NumLists,
- NumComponents);
+ OMPMappableExprListSizeTy Sizes;
+ Sizes.NumVars = Record.readInt();
+ Sizes.NumUniqueDeclarations = Record.readInt();
+ Sizes.NumComponentLists = Record.readInt();
+ Sizes.NumComponents = Record.readInt();
+ C = OMPFromClause::CreateEmpty(Context, Sizes);
break;
}
case OMPC_use_device_ptr: {
- unsigned NumVars = Record.readInt();
- unsigned NumDeclarations = Record.readInt();
- unsigned NumLists = Record.readInt();
- unsigned NumComponents = Record.readInt();
- C = OMPUseDevicePtrClause::CreateEmpty(Context, NumVars, NumDeclarations,
- NumLists, NumComponents);
+ OMPMappableExprListSizeTy Sizes;
+ Sizes.NumVars = Record.readInt();
+ Sizes.NumUniqueDeclarations = Record.readInt();
+ Sizes.NumComponentLists = Record.readInt();
+ Sizes.NumComponents = Record.readInt();
+ C = OMPUseDevicePtrClause::CreateEmpty(Context, Sizes);
break;
}
case OMPC_is_device_ptr: {
- unsigned NumVars = Record.readInt();
- unsigned NumDeclarations = Record.readInt();
- unsigned NumLists = Record.readInt();
- unsigned NumComponents = Record.readInt();
- C = OMPIsDevicePtrClause::CreateEmpty(Context, NumVars, NumDeclarations,
- NumLists, NumComponents);
+ OMPMappableExprListSizeTy Sizes;
+ Sizes.NumVars = Record.readInt();
+ Sizes.NumUniqueDeclarations = Record.readInt();
+ Sizes.NumComponentLists = Record.readInt();
+ Sizes.NumComponents = Record.readInt();
+ C = OMPIsDevicePtrClause::CreateEmpty(Context, Sizes);
break;
}
+ case OMPC_allocate:
+ C = OMPAllocateClause::CreateEmpty(Context, Record.readInt());
+ break;
}
Visit(C);
C->setLocStart(Record.readSourceLocation());
@@ -11901,6 +12432,11 @@ void OMPClauseReader::VisitOMPSimdlenClause(OMPSimdlenClause *C) {
C->setLParenLoc(Record.readSourceLocation());
}
+void OMPClauseReader::VisitOMPAllocatorClause(OMPAllocatorClause *C) {
+ C->setAllocator(Record.readExpr());
+ C->setLParenLoc(Record.readSourceLocation());
+}
+
void OMPClauseReader::VisitOMPCollapseClause(OMPCollapseClause *C) {
C->setNumForLoops(Record.readSubExpr());
C->setLParenLoc(Record.readSourceLocation());
@@ -12289,6 +12825,10 @@ void OMPClauseReader::VisitOMPMapClause(OMPMapClause *C) {
I, static_cast<OpenMPMapModifierKind>(Record.readInt()));
C->setMapTypeModifierLoc(I, Record.readSourceLocation());
}
+ C->setMapperQualifierLoc(Record.readNestedNameSpecifierLoc());
+ DeclarationNameInfo DNI;
+ Record.readDeclarationNameInfo(DNI);
+ C->setMapperIdInfo(DNI);
C->setMapType(
static_cast<OpenMPMapClauseKind>(Record.readInt()));
C->setMapLoc(Record.readSourceLocation());
@@ -12301,9 +12841,15 @@ void OMPClauseReader::VisitOMPMapClause(OMPMapClause *C) {
SmallVector<Expr *, 16> Vars;
Vars.reserve(NumVars);
for (unsigned i = 0; i != NumVars; ++i)
- Vars.push_back(Record.readSubExpr());
+ Vars.push_back(Record.readExpr());
C->setVarRefs(Vars);
+ SmallVector<Expr *, 16> UDMappers;
+ UDMappers.reserve(NumVars);
+ for (unsigned I = 0; I < NumVars; ++I)
+ UDMappers.push_back(Record.readExpr());
+ C->setUDMapperRefs(UDMappers);
+
SmallVector<ValueDecl *, 16> Decls;
Decls.reserve(UniqueDecls);
for (unsigned i = 0; i < UniqueDecls; ++i)
@@ -12325,7 +12871,7 @@ void OMPClauseReader::VisitOMPMapClause(OMPMapClause *C) {
SmallVector<OMPClauseMappableExprCommon::MappableComponent, 32> Components;
Components.reserve(TotalComponents);
for (unsigned i = 0; i < TotalComponents; ++i) {
- Expr *AssociatedExpr = Record.readSubExpr();
+ Expr *AssociatedExpr = Record.readExpr();
auto *AssociatedDecl = Record.readDeclAs<ValueDecl>();
Components.push_back(OMPClauseMappableExprCommon::MappableComponent(
AssociatedExpr, AssociatedDecl));
@@ -12333,6 +12879,18 @@ void OMPClauseReader::VisitOMPMapClause(OMPMapClause *C) {
C->setComponents(Components, ListSizes);
}
+void OMPClauseReader::VisitOMPAllocateClause(OMPAllocateClause *C) {
+ C->setLParenLoc(Record.readSourceLocation());
+ C->setColonLoc(Record.readSourceLocation());
+ C->setAllocator(Record.readSubExpr());
+ unsigned NumVars = C->varlist_size();
+ SmallVector<Expr *, 16> Vars;
+ Vars.reserve(NumVars);
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Record.readSubExpr());
+ C->setVarRefs(Vars);
+}
+
void OMPClauseReader::VisitOMPNumTeamsClause(OMPNumTeamsClause *C) {
VisitOMPClauseWithPreInit(C);
C->setNumTeams(Record.readSubExpr());
@@ -12387,6 +12945,10 @@ void OMPClauseReader::VisitOMPDefaultmapClause(OMPDefaultmapClause *C) {
void OMPClauseReader::VisitOMPToClause(OMPToClause *C) {
C->setLParenLoc(Record.readSourceLocation());
+ C->setMapperQualifierLoc(Record.readNestedNameSpecifierLoc());
+ DeclarationNameInfo DNI;
+ Record.readDeclarationNameInfo(DNI);
+ C->setMapperIdInfo(DNI);
auto NumVars = C->varlist_size();
auto UniqueDecls = C->getUniqueDeclarationsNum();
auto TotalLists = C->getTotalComponentListNum();
@@ -12398,6 +12960,12 @@ void OMPClauseReader::VisitOMPToClause(OMPToClause *C) {
Vars.push_back(Record.readSubExpr());
C->setVarRefs(Vars);
+ SmallVector<Expr *, 16> UDMappers;
+ UDMappers.reserve(NumVars);
+ for (unsigned I = 0; I < NumVars; ++I)
+ UDMappers.push_back(Record.readSubExpr());
+ C->setUDMapperRefs(UDMappers);
+
SmallVector<ValueDecl *, 16> Decls;
Decls.reserve(UniqueDecls);
for (unsigned i = 0; i < UniqueDecls; ++i)
@@ -12429,6 +12997,10 @@ void OMPClauseReader::VisitOMPToClause(OMPToClause *C) {
void OMPClauseReader::VisitOMPFromClause(OMPFromClause *C) {
C->setLParenLoc(Record.readSourceLocation());
+ C->setMapperQualifierLoc(Record.readNestedNameSpecifierLoc());
+ DeclarationNameInfo DNI;
+ Record.readDeclarationNameInfo(DNI);
+ C->setMapperIdInfo(DNI);
auto NumVars = C->varlist_size();
auto UniqueDecls = C->getUniqueDeclarationsNum();
auto TotalLists = C->getTotalComponentListNum();
@@ -12440,6 +13012,12 @@ void OMPClauseReader::VisitOMPFromClause(OMPFromClause *C) {
Vars.push_back(Record.readSubExpr());
C->setVarRefs(Vars);
+ SmallVector<Expr *, 16> UDMappers;
+ UDMappers.reserve(NumVars);
+ for (unsigned I = 0; I < NumVars; ++I)
+ UDMappers.push_back(Record.readSubExpr());
+ C->setUDMapperRefs(UDMappers);
+
SmallVector<ValueDecl *, 16> Decls;
Decls.reserve(UniqueDecls);
for (unsigned i = 0; i < UniqueDecls; ++i)
diff --git a/lib/Serialization/ASTReaderDecl.cpp b/lib/Serialization/ASTReaderDecl.cpp
index 763ab527570d..3cac82ad421c 100644
--- a/lib/Serialization/ASTReaderDecl.cpp
+++ b/lib/Serialization/ASTReaderDecl.cpp
@@ -1,9 +1,8 @@
//===- ASTReaderDecl.cpp - Decl Deserialization ---------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -58,7 +57,7 @@
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/iterator_range.h"
-#include "llvm/Bitcode/BitstreamReader.h"
+#include "llvm/Bitstream/BitstreamReader.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/SaveAndRestore.h"
@@ -383,6 +382,7 @@ namespace clang {
void VisitBindingDecl(BindingDecl *BD);
void VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D);
DeclID VisitTemplateDecl(TemplateDecl *D);
+ void VisitConceptDecl(ConceptDecl *D);
RedeclarableResult VisitRedeclarableTemplateDecl(RedeclarableTemplateDecl *D);
void VisitClassTemplateDecl(ClassTemplateDecl *D);
void VisitBuiltinTemplateDecl(BuiltinTemplateDecl *D);
@@ -445,7 +445,9 @@ namespace clang {
void VisitObjCPropertyDecl(ObjCPropertyDecl *D);
void VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D);
void VisitOMPThreadPrivateDecl(OMPThreadPrivateDecl *D);
+ void VisitOMPAllocateDecl(OMPAllocateDecl *D);
void VisitOMPDeclareReductionDecl(OMPDeclareReductionDecl *D);
+ void VisitOMPDeclareMapperDecl(OMPDeclareMapperDecl *D);
void VisitOMPRequiresDecl(OMPRequiresDecl *D);
void VisitOMPCapturedExprDecl(OMPCapturedExprDecl *D);
};
@@ -792,6 +794,9 @@ ASTDeclReader::VisitRecordDeclImpl(RecordDecl *RD) {
RD->setNonTrivialToPrimitiveDefaultInitialize(Record.readInt());
RD->setNonTrivialToPrimitiveCopy(Record.readInt());
RD->setNonTrivialToPrimitiveDestroy(Record.readInt());
+ RD->setHasNonTrivialToPrimitiveDefaultInitializeCUnion(Record.readInt());
+ RD->setHasNonTrivialToPrimitiveDestructCUnion(Record.readInt());
+ RD->setHasNonTrivialToPrimitiveCopyCUnion(Record.readInt());
RD->setParamDestroyedInCallee(Record.readInt());
RD->setArgPassingRestrictions((RecordDecl::ArgPassingKind)Record.readInt());
return Redecl;
@@ -857,7 +862,6 @@ void ASTDeclReader::VisitFunctionDecl(FunctionDecl *FD) {
FD->setStorageClass(static_cast<StorageClass>(Record.readInt()));
FD->setInlineSpecified(Record.readInt());
FD->setImplicitlyInline(Record.readInt());
- FD->setExplicitSpecified(Record.readInt());
FD->setVirtualAsWritten(Record.readInt());
FD->setPure(Record.readInt());
FD->setHasInheritedPrototype(Record.readInt());
@@ -868,7 +872,7 @@ void ASTDeclReader::VisitFunctionDecl(FunctionDecl *FD) {
FD->setDefaulted(Record.readInt());
FD->setExplicitlyDefaulted(Record.readInt());
FD->setHasImplicitReturnZero(Record.readInt());
- FD->setConstexpr(Record.readInt());
+ FD->setConstexprKind(static_cast<ConstexprSpecKind>(Record.readInt()));
FD->setUsesSEHTry(Record.readInt());
FD->setHasSkippedBody(Record.readInt());
FD->setIsMultiVersion(Record.readInt());
@@ -927,12 +931,22 @@ void ASTDeclReader::VisitFunctionDecl(FunctionDecl *FD) {
TemplateArgumentListInfo TemplArgsInfo(LAngleLoc, RAngleLoc);
for (unsigned i = 0, e = TemplArgLocs.size(); i != e; ++i)
TemplArgsInfo.addArgument(TemplArgLocs[i]);
- FunctionTemplateSpecializationInfo *FTInfo
- = FunctionTemplateSpecializationInfo::Create(C, FD, Template, TSK,
- TemplArgList,
- HasTemplateArgumentsAsWritten ? &TemplArgsInfo
- : nullptr,
- POI);
+
+ MemberSpecializationInfo *MSInfo = nullptr;
+ if (Record.readInt()) {
+ auto *FD = ReadDeclAs<FunctionDecl>();
+ auto TSK = (TemplateSpecializationKind)Record.readInt();
+ SourceLocation POI = ReadSourceLocation();
+
+ MSInfo = new (C) MemberSpecializationInfo(FD, TSK);
+ MSInfo->setPointOfInstantiation(POI);
+ }
+
+ FunctionTemplateSpecializationInfo *FTInfo =
+ FunctionTemplateSpecializationInfo::Create(
+ C, FD, Template, TSK, TemplArgList,
+ HasTemplateArgumentsAsWritten ? &TemplArgsInfo : nullptr, POI,
+ MSInfo);
FD->TemplateOrSpecialization = FTInfo;
if (FD->isCanonicalDecl()) { // if canonical add to template's set.
@@ -955,7 +969,7 @@ void ASTDeclReader::VisitFunctionDecl(FunctionDecl *FD) {
else {
assert(Reader.getContext().getLangOpts().Modules &&
"already deserialized this template specialization");
- mergeRedeclarable(FD, ExistingInfo->Function, Redecl);
+ mergeRedeclarable(FD, ExistingInfo->getFunction(), Redecl);
}
}
break;
@@ -1449,8 +1463,10 @@ void ASTDeclReader::VisitParmVarDecl(ParmVarDecl *PD) {
void ASTDeclReader::VisitDecompositionDecl(DecompositionDecl *DD) {
VisitVarDecl(DD);
auto **BDs = DD->getTrailingObjects<BindingDecl *>();
- for (unsigned I = 0; I != DD->NumBindings; ++I)
+ for (unsigned I = 0; I != DD->NumBindings; ++I) {
BDs[I] = ReadDeclAs<BindingDecl>();
+ BDs[I]->setDecomposedDecl(DD);
+ }
}
void ASTDeclReader::VisitBindingDecl(BindingDecl *BD) {
@@ -1479,6 +1495,7 @@ void ASTDeclReader::VisitBlockDecl(BlockDecl *BD) {
BD->setBlockMissingReturnType(Record.readInt());
BD->setIsConversionFromLambda(Record.readInt());
BD->setDoesNotEscape(Record.readInt());
+ BD->setCanAvoidCopyToHeap(Record.readInt());
bool capturesCXXThis = Record.readInt();
unsigned numCaptures = Record.readInt();
@@ -1965,6 +1982,7 @@ ASTDeclReader::VisitCXXRecordDeclImpl(CXXRecordDecl *D) {
}
void ASTDeclReader::VisitCXXDeductionGuideDecl(CXXDeductionGuideDecl *D) {
+ D->setExplicitSpecifier(Record.readExplicitSpec());
VisitFunctionDecl(D);
D->setIsCopyDeductionCandidate(Record.readInt());
}
@@ -1990,6 +2008,7 @@ void ASTDeclReader::VisitCXXMethodDecl(CXXMethodDecl *D) {
void ASTDeclReader::VisitCXXConstructorDecl(CXXConstructorDecl *D) {
// We need the inherited constructor information to merge the declaration,
// so we have to read it before we call VisitCXXMethodDecl.
+ D->setExplicitSpecifier(Record.readExplicitSpec());
if (D->isInheritingConstructor()) {
auto *Shadow = ReadDeclAs<ConstructorUsingShadowDecl>();
auto *Ctor = ReadDeclAs<CXXConstructorDecl>();
@@ -2015,6 +2034,7 @@ void ASTDeclReader::VisitCXXDestructorDecl(CXXDestructorDecl *D) {
}
void ASTDeclReader::VisitCXXConversionDecl(CXXConversionDecl *D) {
+ D->setExplicitSpecifier(Record.readExplicitSpec());
VisitCXXMethodDecl(D);
}
@@ -2073,6 +2093,12 @@ DeclID ASTDeclReader::VisitTemplateDecl(TemplateDecl *D) {
return PatternID;
}
+void ASTDeclReader::VisitConceptDecl(ConceptDecl *D) {
+ VisitTemplateDecl(D);
+ D->ConstraintExpr = Record.readExpr();
+ mergeMergeable(D);
+}
+
ASTDeclReader::RedeclarableResult
ASTDeclReader::VisitRedeclarableTemplateDecl(RedeclarableTemplateDecl *D) {
RedeclarableResult Redecl = VisitRedeclarable(D);
@@ -2242,6 +2268,8 @@ void ASTDeclReader::VisitClassScopeFunctionSpecializationDecl(
ClassScopeFunctionSpecializationDecl *D) {
VisitDecl(D);
D->Specialization = ReadDeclAs<CXXMethodDecl>();
+ if (Record.readInt())
+ D->TemplateArgs = Record.readASTTemplateArgumentListInfo();
}
void ASTDeclReader::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) {
@@ -2632,6 +2660,24 @@ void ASTDeclReader::VisitOMPThreadPrivateDecl(OMPThreadPrivateDecl *D) {
D->setVars(Vars);
}
+void ASTDeclReader::VisitOMPAllocateDecl(OMPAllocateDecl *D) {
+ VisitDecl(D);
+ unsigned NumVars = D->varlist_size();
+ unsigned NumClauses = D->clauselist_size();
+ SmallVector<Expr *, 16> Vars;
+ Vars.reserve(NumVars);
+ for (unsigned i = 0; i != NumVars; ++i) {
+ Vars.push_back(Record.readExpr());
+ }
+ D->setVars(Vars);
+ SmallVector<OMPClause *, 8> Clauses;
+ Clauses.reserve(NumClauses);
+ OMPClauseReader ClauseReader(Record);
+ for (unsigned I = 0; I != NumClauses; ++I)
+ Clauses.push_back(ClauseReader.readClause());
+ D->setClauses(Clauses);
+}
+
void ASTDeclReader::VisitOMPRequiresDecl(OMPRequiresDecl * D) {
VisitDecl(D);
unsigned NumClauses = D->clauselist_size();
@@ -2660,6 +2706,22 @@ void ASTDeclReader::VisitOMPDeclareReductionDecl(OMPDeclareReductionDecl *D) {
D->PrevDeclInScope = ReadDeclID();
}
+void ASTDeclReader::VisitOMPDeclareMapperDecl(OMPDeclareMapperDecl *D) {
+ VisitValueDecl(D);
+ D->setLocation(ReadSourceLocation());
+ Expr *MapperVarRefE = Record.readExpr();
+ D->setMapperVarRef(MapperVarRefE);
+ D->VarName = Record.readDeclarationName();
+ D->PrevDeclInScope = ReadDeclID();
+ unsigned NumClauses = D->clauselist_size();
+ SmallVector<OMPClause *, 8> Clauses;
+ Clauses.reserve(NumClauses);
+ OMPClauseReader ClauseReader(Record);
+ for (unsigned I = 0; I != NumClauses; ++I)
+ Clauses.push_back(ClauseReader.readClause());
+ D->setClauses(Clauses);
+}
+
void ASTDeclReader::VisitOMPCapturedExprDecl(OMPCapturedExprDecl *D) {
VisitVarDecl(D);
}
@@ -2763,7 +2825,7 @@ static bool isConsumerInterestedIn(ASTContext &Ctx, Decl *D, bool HasBody) {
// An ImportDecl or VarDecl imported from a module map module will get
// emitted when we import the relevant module.
- if (isa<ImportDecl>(D) || isa<VarDecl>(D)) {
+ if (isPartOfPerModuleInitializer(D)) {
auto *M = D->getImportedOwningModule();
if (M && M->Kind == Module::ModuleMapModule &&
Ctx.DeclMustBeEmitted(D))
@@ -2777,7 +2839,8 @@ static bool isConsumerInterestedIn(ASTContext &Ctx, Decl *D, bool HasBody) {
isa<PragmaCommentDecl>(D) ||
isa<PragmaDetectMismatchDecl>(D))
return true;
- if (isa<OMPThreadPrivateDecl>(D) || isa<OMPDeclareReductionDecl>(D))
+ if (isa<OMPThreadPrivateDecl>(D) || isa<OMPDeclareReductionDecl>(D) ||
+ isa<OMPDeclareMapperDecl>(D) || isa<OMPAllocateDecl>(D))
return !D->getDeclContext()->isFunctionOrMethod();
if (const auto *Var = dyn_cast<VarDecl>(D))
return Var->isFileVarDecl() &&
@@ -3626,14 +3689,28 @@ Decl *ASTReader::ReadDeclRecord(DeclID ID) {
// Note that we are loading a declaration record.
Deserializing ADecl(this);
- DeclsCursor.JumpToBit(Loc.Offset);
+ auto Fail = [](const char *what, llvm::Error &&Err) {
+ llvm::report_fatal_error(Twine("ASTReader::ReadDeclRecord failed ") + what +
+ ": " + toString(std::move(Err)));
+ };
+
+ if (llvm::Error JumpFailed = DeclsCursor.JumpToBit(Loc.Offset))
+ Fail("jumping", std::move(JumpFailed));
ASTRecordReader Record(*this, *Loc.F);
ASTDeclReader Reader(*this, Record, Loc, ID, DeclLoc);
- unsigned Code = DeclsCursor.ReadCode();
+ Expected<unsigned> MaybeCode = DeclsCursor.ReadCode();
+ if (!MaybeCode)
+ Fail("reading code", MaybeCode.takeError());
+ unsigned Code = MaybeCode.get();
ASTContext &Context = getContext();
Decl *D = nullptr;
- switch ((DeclCode)Record.readRecord(DeclsCursor, Code)) {
+ Expected<unsigned> MaybeDeclCode = Record.readRecord(DeclsCursor, Code);
+ if (!MaybeDeclCode)
+ llvm::report_fatal_error(
+ "ASTReader::ReadDeclRecord failed reading decl code: " +
+ toString(MaybeDeclCode.takeError()));
+ switch ((DeclCode)MaybeDeclCode.get()) {
case DECL_CONTEXT_LEXICAL:
case DECL_CONTEXT_VISIBLE:
llvm_unreachable("Record cannot be de-serialized with ReadDeclRecord");
@@ -3701,10 +3778,7 @@ Decl *ASTReader::ReadDeclRecord(DeclID ID) {
D = CXXMethodDecl::CreateDeserialized(Context, ID);
break;
case DECL_CXX_CONSTRUCTOR:
- D = CXXConstructorDecl::CreateDeserialized(Context, ID, false);
- break;
- case DECL_CXX_INHERITED_CONSTRUCTOR:
- D = CXXConstructorDecl::CreateDeserialized(Context, ID, true);
+ D = CXXConstructorDecl::CreateDeserialized(Context, ID, Record.readInt());
break;
case DECL_CXX_DESTRUCTOR:
D = CXXDestructorDecl::CreateDeserialized(Context, ID);
@@ -3765,6 +3839,9 @@ Decl *ASTReader::ReadDeclRecord(DeclID ID) {
case DECL_TYPE_ALIAS_TEMPLATE:
D = TypeAliasTemplateDecl::CreateDeserialized(Context, ID);
break;
+ case DECL_CONCEPT:
+ D = ConceptDecl::CreateDeserialized(Context, ID);
+ break;
case DECL_STATIC_ASSERT:
D = StaticAssertDecl::CreateDeserialized(Context, ID);
break;
@@ -3848,12 +3925,21 @@ Decl *ASTReader::ReadDeclRecord(DeclID ID) {
case DECL_OMP_THREADPRIVATE:
D = OMPThreadPrivateDecl::CreateDeserialized(Context, ID, Record.readInt());
break;
+ case DECL_OMP_ALLOCATE: {
+ unsigned NumVars = Record.readInt();
+ unsigned NumClauses = Record.readInt();
+ D = OMPAllocateDecl::CreateDeserialized(Context, ID, NumVars, NumClauses);
+ break;
+ }
case DECL_OMP_REQUIRES:
D = OMPRequiresDecl::CreateDeserialized(Context, ID, Record.readInt());
break;
case DECL_OMP_DECLARE_REDUCTION:
D = OMPDeclareReductionDecl::CreateDeserialized(Context, ID);
break;
+ case DECL_OMP_DECLARE_MAPPER:
+ D = OMPDeclareMapperDecl::CreateDeserialized(Context, ID, Record.readInt());
+ break;
case DECL_OMP_CAPTUREDEXPR:
D = OMPCapturedExprDecl::CreateDeserialized(Context, ID);
break;
@@ -3966,12 +4052,25 @@ void ASTReader::loadDeclUpdateRecords(PendingUpdateRecord &Record) {
uint64_t Offset = FileAndOffset.second;
llvm::BitstreamCursor &Cursor = F->DeclsCursor;
SavedStreamPosition SavedPosition(Cursor);
- Cursor.JumpToBit(Offset);
- unsigned Code = Cursor.ReadCode();
+ if (llvm::Error JumpFailed = Cursor.JumpToBit(Offset))
+ // FIXME don't do a fatal error.
+ llvm::report_fatal_error(
+ "ASTReader::loadDeclUpdateRecords failed jumping: " +
+ toString(std::move(JumpFailed)));
+ Expected<unsigned> MaybeCode = Cursor.ReadCode();
+ if (!MaybeCode)
+ llvm::report_fatal_error(
+ "ASTReader::loadDeclUpdateRecords failed reading code: " +
+ toString(MaybeCode.takeError()));
+ unsigned Code = MaybeCode.get();
ASTRecordReader Record(*this, *F);
- unsigned RecCode = Record.readRecord(Cursor, Code);
- (void)RecCode;
- assert(RecCode == DECL_UPDATES && "Expected DECL_UPDATES record!");
+ if (Expected<unsigned> MaybeRecCode = Record.readRecord(Cursor, Code))
+ assert(MaybeRecCode.get() == DECL_UPDATES &&
+ "Expected DECL_UPDATES record!");
+ else
+ llvm::report_fatal_error(
+ "ASTReader::loadDeclUpdateRecords failed reading rec code: " +
+ toString(MaybeCode.takeError()));
ASTDeclReader Reader(*this, Record, RecordLocation(F, Offset), ID,
SourceLocation());
@@ -4035,13 +4134,25 @@ void ASTReader::loadPendingDeclChain(Decl *FirstLocal, uint64_t LocalOffset) {
llvm::BitstreamCursor &Cursor = M->DeclsCursor;
SavedStreamPosition SavedPosition(Cursor);
- Cursor.JumpToBit(LocalOffset);
+ if (llvm::Error JumpFailed = Cursor.JumpToBit(LocalOffset))
+ llvm::report_fatal_error(
+ "ASTReader::loadPendingDeclChain failed jumping: " +
+ toString(std::move(JumpFailed)));
RecordData Record;
- unsigned Code = Cursor.ReadCode();
- unsigned RecCode = Cursor.readRecord(Code, Record);
- (void)RecCode;
- assert(RecCode == LOCAL_REDECLARATIONS && "expected LOCAL_REDECLARATIONS record!");
+ Expected<unsigned> MaybeCode = Cursor.ReadCode();
+ if (!MaybeCode)
+ llvm::report_fatal_error(
+ "ASTReader::loadPendingDeclChain failed reading code: " +
+ toString(MaybeCode.takeError()));
+ unsigned Code = MaybeCode.get();
+ if (Expected<unsigned> MaybeRecCode = Cursor.readRecord(Code, Record))
+ assert(MaybeRecCode.get() == LOCAL_REDECLARATIONS &&
+ "expected LOCAL_REDECLARATIONS record!");
+ else
+ llvm::report_fatal_error(
+ "ASTReader::loadPendingDeclChain failed reading rec code: " +
+ toString(MaybeCode.takeError()));
// FIXME: We have several different dispatches on decl kind here; maybe
// we should instead generate one loop per kind and dispatch up-front?
@@ -4444,6 +4555,16 @@ void ASTDeclReader::UpdateDecl(Decl *D,
ReadSourceRange()));
break;
+ case UPD_DECL_MARKED_OPENMP_ALLOCATE: {
+ auto AllocatorKind =
+ static_cast<OMPAllocateDeclAttr::AllocatorTypeTy>(Record.readInt());
+ Expr *Allocator = Record.readExpr();
+ SourceRange SR = ReadSourceRange();
+ D->addAttr(OMPAllocateDeclAttr::CreateImplicit(
+ Reader.getContext(), AllocatorKind, Allocator, SR));
+ break;
+ }
+
case UPD_DECL_EXPORTED: {
unsigned SubmoduleID = readSubmoduleID();
auto *Exported = cast<NamedDecl>(D);
diff --git a/lib/Serialization/ASTReaderInternals.h b/lib/Serialization/ASTReaderInternals.h
index 37a929907dca..265a77fdb215 100644
--- a/lib/Serialization/ASTReaderInternals.h
+++ b/lib/Serialization/ASTReaderInternals.h
@@ -1,9 +1,8 @@
//===- ASTReaderInternals.h - AST Reader Internals --------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Serialization/ASTReaderStmt.cpp b/lib/Serialization/ASTReaderStmt.cpp
index 60abea95bfaf..afaaa543bb27 100644
--- a/lib/Serialization/ASTReaderStmt.cpp
+++ b/lib/Serialization/ASTReaderStmt.cpp
@@ -1,9 +1,8 @@
//===- ASTReaderStmt.cpp - Stmt/Expr Deserialization ----------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -53,7 +52,7 @@
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/Bitcode/BitstreamReader.h"
+#include "llvm/Bitstream/BitstreamReader.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
#include <algorithm>
@@ -112,7 +111,7 @@ namespace clang {
/// The number of record fields required for the Stmt class
/// itself.
- static const unsigned NumStmtFields = 0;
+ static const unsigned NumStmtFields = 1;
/// The number of record fields required for the Expr class
/// itself.
@@ -148,6 +147,7 @@ void ASTStmtReader::ReadTemplateKWAndArgsInfo(ASTTemplateKWAndArgsInfo &Args,
}
void ASTStmtReader::VisitStmt(Stmt *S) {
+ S->setIsOMPStructuredBlock(Record.readInt());
assert(Record.getIdx() == NumStmtFields && "Incorrect statement field count");
}
@@ -370,12 +370,14 @@ void ASTStmtReader::VisitAsmStmt(AsmStmt *S) {
void ASTStmtReader::VisitGCCAsmStmt(GCCAsmStmt *S) {
VisitAsmStmt(S);
+ S->NumLabels = Record.readInt();
S->setRParenLoc(ReadSourceLocation());
S->setAsmString(cast_or_null<StringLiteral>(Record.readSubStmt()));
unsigned NumOutputs = S->getNumOutputs();
unsigned NumInputs = S->getNumInputs();
unsigned NumClobbers = S->getNumClobbers();
+ unsigned NumLabels = S->getNumLabels();
// Outputs and inputs
SmallVector<IdentifierInfo *, 16> Names;
@@ -392,9 +394,14 @@ void ASTStmtReader::VisitGCCAsmStmt(GCCAsmStmt *S) {
for (unsigned I = 0; I != NumClobbers; ++I)
Clobbers.push_back(cast_or_null<StringLiteral>(Record.readSubStmt()));
+ // Labels
+ for (unsigned I = 0, N = NumLabels; I != N; ++I)
+ Exprs.push_back(Record.readSubStmt());
+
S->setOutputsAndInputsAndClobbers(Record.getContext(),
Names.data(), Constraints.data(),
Exprs.data(), NumOutputs, NumInputs,
+ NumLabels,
Clobbers.data(), NumClobbers);
}
@@ -526,6 +533,18 @@ void ASTStmtReader::VisitExpr(Expr *E) {
void ASTStmtReader::VisitConstantExpr(ConstantExpr *E) {
VisitExpr(E);
+ E->ConstantExprBits.ResultKind = Record.readInt();
+ switch (E->ConstantExprBits.ResultKind) {
+ case ConstantExpr::RSK_Int64: {
+ E->Int64Result() = Record.readInt();
+ uint64_t tmp = Record.readInt();
+ E->ConstantExprBits.IsUnsigned = tmp & 0x1;
+ E->ConstantExprBits.BitWidth = tmp >> 1;
+ break;
+ }
+ case ConstantExpr::RSK_APValue:
+ E->APValueResult() = Record.readAPValue();
+ }
E->setSubExpr(Record.readSubExpr());
}
@@ -547,6 +566,7 @@ void ASTStmtReader::VisitDeclRefExpr(DeclRefExpr *E) {
E->DeclRefExprBits.HasTemplateKWAndArgsInfo = Record.readInt();
E->DeclRefExprBits.HadMultipleCandidates = Record.readInt();
E->DeclRefExprBits.RefersToEnclosingVariableOrCapture = Record.readInt();
+ E->DeclRefExprBits.NonOdrUseReason = Record.readInt();
unsigned NumTemplateArgs = 0;
if (E->hasTemplateKWAndArgsInfo())
NumTemplateArgs = Record.readInt();
@@ -582,7 +602,8 @@ void ASTStmtReader::VisitFixedPointLiteral(FixedPointLiteral *E) {
void ASTStmtReader::VisitFloatingLiteral(FloatingLiteral *E) {
VisitExpr(E);
- E->setRawSemantics(static_cast<Stmt::APFloatSemantics>(Record.readInt()));
+ E->setRawSemantics(
+ static_cast<llvm::APFloatBase::Semantics>(Record.readInt()));
E->setExact(Record.readInt());
E->setValue(Record.getContext(), Record.readAPFloat(E->getSemantics()));
E->setLocation(ReadSourceLocation());
@@ -745,9 +766,47 @@ void ASTStmtReader::VisitCXXMemberCallExpr(CXXMemberCallExpr *E) {
}
void ASTStmtReader::VisitMemberExpr(MemberExpr *E) {
- // Don't call VisitExpr, this is fully initialized at creation.
- assert(E->getStmtClass() == Stmt::MemberExprClass &&
- "It's a subclass, we must advance Idx!");
+ VisitExpr(E);
+
+ bool HasQualifier = Record.readInt();
+ bool HasFoundDecl = Record.readInt();
+ bool HasTemplateInfo = Record.readInt();
+ unsigned NumTemplateArgs = Record.readInt();
+
+ E->Base = Record.readSubExpr();
+ E->MemberDecl = Record.readDeclAs<ValueDecl>();
+ Record.readDeclarationNameLoc(E->MemberDNLoc, E->MemberDecl->getDeclName());
+ E->MemberLoc = Record.readSourceLocation();
+ E->MemberExprBits.IsArrow = Record.readInt();
+ E->MemberExprBits.HasQualifierOrFoundDecl = HasQualifier || HasFoundDecl;
+ E->MemberExprBits.HasTemplateKWAndArgsInfo = HasTemplateInfo;
+ E->MemberExprBits.HadMultipleCandidates = Record.readInt();
+ E->MemberExprBits.NonOdrUseReason = Record.readInt();
+ E->MemberExprBits.OperatorLoc = Record.readSourceLocation();
+
+ if (HasQualifier || HasFoundDecl) {
+ DeclAccessPair FoundDecl;
+ if (HasFoundDecl) {
+ auto *FoundD = Record.readDeclAs<NamedDecl>();
+ auto AS = (AccessSpecifier)Record.readInt();
+ FoundDecl = DeclAccessPair::make(FoundD, AS);
+ } else {
+ FoundDecl = DeclAccessPair::make(E->MemberDecl,
+ E->MemberDecl->getAccess());
+ }
+ E->getTrailingObjects<MemberExprNameQualifier>()->FoundDecl = FoundDecl;
+
+ NestedNameSpecifierLoc QualifierLoc;
+ if (HasQualifier)
+ QualifierLoc = Record.readNestedNameSpecifierLoc();
+ E->getTrailingObjects<MemberExprNameQualifier>()->QualifierLoc =
+ QualifierLoc;
+ }
+
+ if (HasTemplateInfo)
+ ReadTemplateKWAndArgsInfo(
+ *E->getTrailingObjects<ASTTemplateKWAndArgsInfo>(),
+ E->getTrailingObjects<TemplateArgumentLoc>(), NumTemplateArgs);
}
void ASTStmtReader::VisitObjCIsaExpr(ObjCIsaExpr *E) {
@@ -968,6 +1027,15 @@ void ASTStmtReader::VisitVAArgExpr(VAArgExpr *E) {
E->setIsMicrosoftABI(Record.readInt());
}
+void ASTStmtReader::VisitSourceLocExpr(SourceLocExpr *E) {
+ VisitExpr(E);
+ E->ParentContext = ReadDeclAs<DeclContext>();
+ E->BuiltinLoc = ReadSourceLocation();
+ E->RParenLoc = ReadSourceLocation();
+ E->SourceLocExprBits.Kind =
+ static_cast<SourceLocExpr::IdentKind>(Record.readInt());
+}
+
void ASTStmtReader::VisitAddrLabelExpr(AddrLabelExpr *E) {
VisitExpr(E);
E->setAmpAmpLoc(ReadSourceLocation());
@@ -1023,21 +1091,24 @@ void ASTStmtReader::VisitBlockExpr(BlockExpr *E) {
void ASTStmtReader::VisitGenericSelectionExpr(GenericSelectionExpr *E) {
VisitExpr(E);
- E->NumAssocs = Record.readInt();
- E->AssocTypes = new (Record.getContext()) TypeSourceInfo*[E->NumAssocs];
- E->SubExprs =
- new(Record.getContext()) Stmt*[GenericSelectionExpr::END_EXPR+E->NumAssocs];
- E->SubExprs[GenericSelectionExpr::CONTROLLING] = Record.readSubExpr();
- for (unsigned I = 0, N = E->getNumAssocs(); I != N; ++I) {
- E->AssocTypes[I] = GetTypeSourceInfo();
- E->SubExprs[GenericSelectionExpr::END_EXPR+I] = Record.readSubExpr();
- }
+ unsigned NumAssocs = Record.readInt();
+ assert(NumAssocs == E->getNumAssocs() && "Wrong NumAssocs!");
E->ResultIndex = Record.readInt();
-
- E->GenericLoc = ReadSourceLocation();
+ E->GenericSelectionExprBits.GenericLoc = ReadSourceLocation();
E->DefaultLoc = ReadSourceLocation();
E->RParenLoc = ReadSourceLocation();
+
+ Stmt **Stmts = E->getTrailingObjects<Stmt *>();
+ // Add 1 to account for the controlling expression which is the first
+ // expression in the trailing array of Stmt *. This is not needed for
+ // the trailing array of TypeSourceInfo *.
+ for (unsigned I = 0, N = NumAssocs + 1; I < N; ++I)
+ Stmts[I] = Record.readSubExpr();
+
+ TypeSourceInfo **TSIs = E->getTrailingObjects<TypeSourceInfo *>();
+ for (unsigned I = 0, N = NumAssocs; I < N; ++I)
+ TSIs[I] = GetTypeSourceInfo();
}
void ASTStmtReader::VisitPseudoObjectExpr(PseudoObjectExpr *E) {
@@ -1252,7 +1323,7 @@ void ASTStmtReader::VisitObjCAtFinallyStmt(ObjCAtFinallyStmt *S) {
}
void ASTStmtReader::VisitObjCAutoreleasePoolStmt(ObjCAutoreleasePoolStmt *S) {
- VisitStmt(S);
+ VisitStmt(S); // FIXME: no test coverage.
S->setSubStmt(Record.readSubStmt());
S->setAtLoc(ReadSourceLocation());
}
@@ -1272,14 +1343,14 @@ void ASTStmtReader::VisitObjCAtTryStmt(ObjCAtTryStmt *S) {
}
void ASTStmtReader::VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt *S) {
- VisitStmt(S);
+ VisitStmt(S); // FIXME: no test coverage.
S->setSynchExpr(Record.readSubStmt());
S->setSynchBody(Record.readSubStmt());
S->setAtSynchronizedLoc(ReadSourceLocation());
}
void ASTStmtReader::VisitObjCAtThrowStmt(ObjCAtThrowStmt *S) {
- VisitStmt(S);
+ VisitStmt(S); // FIXME: no test coverage.
S->setThrowExpr(Record.readSubStmt());
S->setThrowLoc(ReadSourceLocation());
}
@@ -1439,6 +1510,12 @@ void ASTStmtReader::VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *E) {
E->setRParenLoc(ReadSourceLocation());
}
+void ASTStmtReader::VisitBuiltinBitCastExpr(BuiltinBitCastExpr *E) {
+ VisitExplicitCastExpr(E);
+ E->KWLoc = ReadSourceLocation();
+ E->RParenLoc = ReadSourceLocation();
+}
+
void ASTStmtReader::VisitUserDefinedLiteral(UserDefinedLiteral *E) {
VisitCallExpr(E);
E->UDSuffixLoc = ReadSourceLocation();
@@ -1484,12 +1561,14 @@ void ASTStmtReader::VisitCXXThrowExpr(CXXThrowExpr *E) {
void ASTStmtReader::VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E) {
VisitExpr(E);
E->Param = ReadDeclAs<ParmVarDecl>();
+ E->UsedContext = ReadDeclAs<DeclContext>();
E->CXXDefaultArgExprBits.Loc = ReadSourceLocation();
}
void ASTStmtReader::VisitCXXDefaultInitExpr(CXXDefaultInitExpr *E) {
VisitExpr(E);
E->Field = ReadDeclAs<FieldDecl>();
+ E->UsedContext = ReadDeclAs<DeclContext>();
E->CXXDefaultInitExprBits.Loc = ReadSourceLocation();
}
@@ -1789,9 +1868,9 @@ void ASTStmtReader::VisitFunctionParmPackExpr(FunctionParmPackExpr *E) {
E->NumParameters = Record.readInt();
E->ParamPack = ReadDeclAs<ParmVarDecl>();
E->NameLoc = ReadSourceLocation();
- auto **Parms = E->getTrailingObjects<ParmVarDecl *>();
+ auto **Parms = E->getTrailingObjects<VarDecl *>();
for (unsigned i = 0, n = E->NumParameters; i != n; ++i)
- Parms[i] = ReadDeclAs<ParmVarDecl>();
+ Parms[i] = ReadDeclAs<VarDecl>();
}
void ASTStmtReader::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E) {
@@ -1807,6 +1886,7 @@ void ASTStmtReader::VisitCXXFoldExpr(CXXFoldExpr *E) {
E->LParenLoc = ReadSourceLocation();
E->EllipsisLoc = ReadSourceLocation();
E->RParenLoc = ReadSourceLocation();
+ E->NumExpansions = Record.readInt();
E->SubExprs[0] = Record.readSubExpr();
E->SubExprs[1] = Record.readSubExpr();
E->Opcode = (BinaryOperatorKind)Record.readInt();
@@ -1889,7 +1969,7 @@ void ASTStmtReader::VisitSEHTryStmt(SEHTryStmt *S) {
void ASTStmtReader::VisitCUDAKernelCallExpr(CUDAKernelCallExpr *E) {
VisitCallExpr(E);
- E->setConfig(cast<CallExpr>(Record.readSubExpr()));
+ E->setPreArg(CUDAKernelCallExpr::CONFIG, Record.readSubExpr());
}
//===----------------------------------------------------------------------===//
@@ -2318,7 +2398,13 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
Stmt::EmptyShell Empty;
while (true) {
- llvm::BitstreamEntry Entry = Cursor.advanceSkippingSubblocks();
+ llvm::Expected<llvm::BitstreamEntry> MaybeEntry =
+ Cursor.advanceSkippingSubblocks();
+ if (!MaybeEntry) {
+ Error(toString(MaybeEntry.takeError()));
+ return nullptr;
+ }
+ llvm::BitstreamEntry Entry = MaybeEntry.get();
switch (Entry.Kind) {
case llvm::BitstreamEntry::SubBlock: // Handled for us already.
@@ -2336,7 +2422,12 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
Stmt *S = nullptr;
bool Finished = false;
bool IsStmtReference = false;
- switch ((StmtCode)Record.readRecord(Cursor, Entry.ID)) {
+ Expected<unsigned> MaybeStmtCode = Record.readRecord(Cursor, Entry.ID);
+ if (!MaybeStmtCode) {
+ Error(toString(MaybeStmtCode.takeError()));
+ return nullptr;
+ }
+ switch ((StmtCode)MaybeStmtCode.get()) {
case STMT_STOP:
Finished = true;
break;
@@ -2449,7 +2540,11 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
break;
case EXPR_CONSTANT:
- S = new (Context) ConstantExpr(Empty);
+ S = ConstantExpr::CreateEmpty(
+ Context,
+ static_cast<ConstantExpr::ResultStorageKind>(
+ Record[ASTStmtReader::NumExprFields]),
+ Empty);
break;
case EXPR_PREDEFINED:
@@ -2465,7 +2560,7 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
/*HasFoundDecl=*/Record[ASTStmtReader::NumExprFields + 1],
/*HasTemplateKWAndArgsInfo=*/Record[ASTStmtReader::NumExprFields + 2],
/*NumTemplateArgs=*/Record[ASTStmtReader::NumExprFields + 2] ?
- Record[ASTStmtReader::NumExprFields + 5] : 0);
+ Record[ASTStmtReader::NumExprFields + 6] : 0);
break;
case EXPR_INTEGER_LITERAL:
@@ -2529,55 +2624,12 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
Context, /*NumArgs=*/Record[ASTStmtReader::NumExprFields], Empty);
break;
- case EXPR_MEMBER: {
- // We load everything here and fully initialize it at creation.
- // That way we can use MemberExpr::Create and don't have to duplicate its
- // logic with a MemberExpr::CreateEmpty.
-
- assert(Record.getIdx() == 0);
- NestedNameSpecifierLoc QualifierLoc;
- if (Record.readInt()) { // HasQualifier.
- QualifierLoc = Record.readNestedNameSpecifierLoc();
- }
-
- SourceLocation TemplateKWLoc;
- TemplateArgumentListInfo ArgInfo;
- bool HasTemplateKWAndArgsInfo = Record.readInt();
- if (HasTemplateKWAndArgsInfo) {
- TemplateKWLoc = Record.readSourceLocation();
- unsigned NumTemplateArgs = Record.readInt();
- ArgInfo.setLAngleLoc(Record.readSourceLocation());
- ArgInfo.setRAngleLoc(Record.readSourceLocation());
- for (unsigned i = 0; i != NumTemplateArgs; ++i)
- ArgInfo.addArgument(Record.readTemplateArgumentLoc());
- }
-
- bool HadMultipleCandidates = Record.readInt();
-
- auto *FoundD = Record.readDeclAs<NamedDecl>();
- auto AS = (AccessSpecifier)Record.readInt();
- DeclAccessPair FoundDecl = DeclAccessPair::make(FoundD, AS);
-
- QualType T = Record.readType();
- auto VK = static_cast<ExprValueKind>(Record.readInt());
- auto OK = static_cast<ExprObjectKind>(Record.readInt());
- Expr *Base = ReadSubExpr();
- auto *MemberD = Record.readDeclAs<ValueDecl>();
- SourceLocation MemberLoc = Record.readSourceLocation();
- DeclarationNameInfo MemberNameInfo(MemberD->getDeclName(), MemberLoc);
- bool IsArrow = Record.readInt();
- SourceLocation OperatorLoc = Record.readSourceLocation();
-
- S = MemberExpr::Create(Context, Base, IsArrow, OperatorLoc, QualifierLoc,
- TemplateKWLoc, MemberD, FoundDecl, MemberNameInfo,
- HasTemplateKWAndArgsInfo ? &ArgInfo : nullptr, T,
- VK, OK);
- Record.readDeclarationNameLoc(cast<MemberExpr>(S)->MemberDNLoc,
- MemberD->getDeclName());
- if (HadMultipleCandidates)
- cast<MemberExpr>(S)->setHadMultipleCandidates(true);
+ case EXPR_MEMBER:
+ S = MemberExpr::CreateEmpty(Context, Record[ASTStmtReader::NumExprFields],
+ Record[ASTStmtReader::NumExprFields + 1],
+ Record[ASTStmtReader::NumExprFields + 2],
+ Record[ASTStmtReader::NumExprFields + 3]);
break;
- }
case EXPR_BINARY_OPERATOR:
S = new (Context) BinaryOperator(Empty);
@@ -2647,6 +2699,10 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
S = new (Context) VAArgExpr(Empty);
break;
+ case EXPR_SOURCE_LOC:
+ S = new (Context) SourceLocExpr(Empty);
+ break;
+
case EXPR_ADDR_LABEL:
S = new (Context) AddrLabelExpr(Empty);
break;
@@ -2676,7 +2732,9 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
break;
case EXPR_GENERIC_SELECTION:
- S = new (Context) GenericSelectionExpr(Empty);
+ S = GenericSelectionExpr::CreateEmpty(
+ Context,
+ /*NumAssocs=*/Record[ASTStmtReader::NumExprFields]);
break;
case EXPR_OBJC_STRING_LITERAL:
@@ -2803,7 +2861,7 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
case STMT_CXX_TRY:
S = CXXTryStmt::Create(Context, Empty,
- /*NumHandlers=*/Record[ASTStmtReader::NumStmtFields]);
+ /*numHandlers=*/Record[ASTStmtReader::NumStmtFields]);
break;
case STMT_CXX_FOR_RANGE:
diff --git a/lib/Serialization/ASTWriter.cpp b/lib/Serialization/ASTWriter.cpp
index 37adcb70640d..10946f9b0d98 100644
--- a/lib/Serialization/ASTWriter.cpp
+++ b/lib/Serialization/ASTWriter.cpp
@@ -1,9 +1,8 @@
//===- ASTWriter.cpp - AST File Writer ------------------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -42,7 +41,6 @@
#include "clang/Basic/LLVM.h"
#include "clang/Basic/Lambda.h"
#include "clang/Basic/LangOptions.h"
-#include "clang/Basic/MemoryBufferCache.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/ObjCRuntime.h"
#include "clang/Basic/OpenCLOptions.h"
@@ -66,6 +64,7 @@
#include "clang/Sema/Sema.h"
#include "clang/Sema/Weak.h"
#include "clang/Serialization/ASTReader.h"
+#include "clang/Serialization/InMemoryModuleCache.h"
#include "clang/Serialization/Module.h"
#include "clang/Serialization/ModuleFileExtension.h"
#include "clang/Serialization/SerializationDiagnostic.h"
@@ -84,8 +83,8 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/Bitcode/BitCodes.h"
-#include "llvm/Bitcode/BitstreamWriter.h"
+#include "llvm/Bitstream/BitCodes.h"
+#include "llvm/Bitstream/BitstreamWriter.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compression.h"
#include "llvm/Support/DJB.h"
@@ -310,7 +309,7 @@ void ASTTypeWriter::VisitFunctionProtoType(const FunctionProtoType *T) {
Record.push_back(T->isVariadic());
Record.push_back(T->hasTrailingReturn());
- Record.push_back(T->getTypeQuals().getAsOpaqueValue());
+ Record.push_back(T->getMethodQuals().getAsOpaqueValue());
Record.push_back(static_cast<unsigned>(T->getRefQualifier()));
addExceptionSpec(T, Record);
@@ -323,7 +322,7 @@ void ASTTypeWriter::VisitFunctionProtoType(const FunctionProtoType *T) {
Record.push_back(T->getExtParameterInfo(I).getOpaqueValue());
}
- if (T->isVariadic() || T->hasTrailingReturn() || T->getTypeQuals() ||
+ if (T->isVariadic() || T->hasTrailingReturn() || T->getMethodQuals() ||
T->getRefQualifier() || T->getExceptionSpecType() != EST_None ||
T->hasExtParameterInfos())
AbbrevToUse = 0;
@@ -370,7 +369,8 @@ void ASTTypeWriter::VisitAutoType(const AutoType *T) {
Record.AddTypeRef(T->getDeducedType());
Record.push_back((unsigned)T->getKeyword());
if (T->getDeducedType().isNull())
- Record.push_back(T->isDependentType());
+ Record.push_back(T->containsUnexpandedParameterPack() ? 2 :
+ T->isDependentType() ? 1 : 0);
Code = TYPE_AUTO;
}
@@ -517,6 +517,12 @@ void ASTTypeWriter::VisitParenType(const ParenType *T) {
Code = TYPE_PAREN;
}
+void ASTTypeWriter::VisitMacroQualifiedType(const MacroQualifiedType *T) {
+ Record.AddTypeRef(T->getUnderlyingType());
+ Record.AddIdentifierRef(T->getMacroIdentifier());
+ Code = TYPE_MACRO_QUALIFIED;
+}
+
void ASTTypeWriter::VisitElaboratedType(const ElaboratedType *T) {
Record.push_back(T->getKeyword());
Record.AddNestedNameSpecifier(T->getQualifier());
@@ -803,6 +809,10 @@ void TypeLocWriter::VisitParenTypeLoc(ParenTypeLoc TL) {
Record.AddSourceLocation(TL.getRParenLoc());
}
+void TypeLocWriter::VisitMacroQualifiedTypeLoc(MacroQualifiedTypeLoc TL) {
+ Record.AddSourceLocation(TL.getExpansionLoc());
+}
+
void TypeLocWriter::VisitElaboratedTypeLoc(ElaboratedTypeLoc TL) {
Record.AddSourceLocation(TL.getElaboratedKeywordLoc());
Record.AddNestedNameSpecifierLoc(TL.getQualifierLoc());
@@ -1220,6 +1230,7 @@ void ASTWriter::WriteBlockInfoBlock() {
RECORD(TYPE_DEPENDENT_TEMPLATE_SPECIALIZATION);
RECORD(TYPE_DEPENDENT_SIZED_ARRAY);
RECORD(TYPE_PAREN);
+ RECORD(TYPE_MACRO_QUALIFIED);
RECORD(TYPE_PACK_EXPANSION);
RECORD(TYPE_ATTRIBUTED);
RECORD(TYPE_SUBST_TEMPLATE_TYPE_PARM_PACK);
@@ -1267,7 +1278,6 @@ void ASTWriter::WriteBlockInfoBlock() {
RECORD(DECL_CXX_RECORD);
RECORD(DECL_CXX_METHOD);
RECORD(DECL_CXX_CONSTRUCTOR);
- RECORD(DECL_CXX_INHERITED_CONSTRUCTOR);
RECORD(DECL_CXX_DESTRUCTOR);
RECORD(DECL_CXX_CONVERSION);
RECORD(DECL_ACCESS_SPEC);
@@ -1283,6 +1293,7 @@ void ASTWriter::WriteBlockInfoBlock() {
RECORD(DECL_TEMPLATE_TYPE_PARM);
RECORD(DECL_NON_TYPE_TEMPLATE_PARM);
RECORD(DECL_TEMPLATE_TEMPLATE_PARM);
+ RECORD(DECL_CONCEPT);
RECORD(DECL_TYPE_ALIAS_TEMPLATE);
RECORD(DECL_STATIC_ASSERT);
RECORD(DECL_CXX_BASE_SPECIFIERS);
@@ -1299,6 +1310,7 @@ void ASTWriter::WriteBlockInfoBlock() {
RECORD(DECL_PRAGMA_COMMENT);
RECORD(DECL_PRAGMA_DETECT_MISMATCH);
RECORD(DECL_OMP_DECLARE_REDUCTION);
+ RECORD(DECL_OMP_ALLOCATE);
// Statements and Exprs can occur in the Decls and Types block.
AddStmtsExprs(Stream, Record);
@@ -1435,7 +1447,7 @@ ASTFileSignature ASTWriter::writeUnhashedControlBlock(Preprocessor &PP,
Stream.EmitRecord(DIAGNOSTIC_OPTIONS, Record);
// Write out the diagnostic/pragma mappings.
- WritePragmaDiagnosticMappings(Diags, /* IsModule = */ WritingModule);
+ WritePragmaDiagnosticMappings(Diags, /* isModule = */ WritingModule);
// Leave the options block.
Stream.ExitBlock();
@@ -4278,14 +4290,32 @@ void ASTWriter::WriteOpenCLExtensionTypes(Sema &SemaRef) {
if (!SemaRef.Context.getLangOpts().OpenCL)
return;
+ // Sort the elements of the map OpenCLTypeExtMap by TypeIDs,
+ // without copying them.
+ const llvm::DenseMap<const Type *, std::set<std::string>> &OpenCLTypeExtMap =
+ SemaRef.OpenCLTypeExtMap;
+ using ElementTy = std::pair<TypeID, const std::set<std::string> *>;
+ llvm::SmallVector<ElementTy, 8> StableOpenCLTypeExtMap;
+ StableOpenCLTypeExtMap.reserve(OpenCLTypeExtMap.size());
+
+ for (const auto &I : OpenCLTypeExtMap)
+ StableOpenCLTypeExtMap.emplace_back(
+ getTypeID(I.first->getCanonicalTypeInternal()), &I.second);
+
+ auto CompareByTypeID = [](const ElementTy &E1, const ElementTy &E2) -> bool {
+ return E1.first < E2.first;
+ };
+ llvm::sort(StableOpenCLTypeExtMap, CompareByTypeID);
+
RecordData Record;
- for (const auto &I : SemaRef.OpenCLTypeExtMap) {
- Record.push_back(
- static_cast<unsigned>(getTypeID(I.first->getCanonicalTypeInternal())));
- Record.push_back(I.second.size());
- for (auto Ext : I.second)
+ for (const ElementTy &E : StableOpenCLTypeExtMap) {
+ Record.push_back(E.first); // TypeID
+ const std::set<std::string> *ExtSet = E.second;
+ Record.push_back(static_cast<unsigned>(ExtSet->size()));
+ for (const std::string &Ext : *ExtSet)
AddString(Ext, Record);
}
+
Stream.EmitRecord(OPENCL_EXTENSION_TYPES, Record);
}
@@ -4293,13 +4323,31 @@ void ASTWriter::WriteOpenCLExtensionDecls(Sema &SemaRef) {
if (!SemaRef.Context.getLangOpts().OpenCL)
return;
+ // Sort the elements of the map OpenCLDeclExtMap by DeclIDs,
+ // without copying them.
+ const llvm::DenseMap<const Decl *, std::set<std::string>> &OpenCLDeclExtMap =
+ SemaRef.OpenCLDeclExtMap;
+ using ElementTy = std::pair<DeclID, const std::set<std::string> *>;
+ llvm::SmallVector<ElementTy, 8> StableOpenCLDeclExtMap;
+ StableOpenCLDeclExtMap.reserve(OpenCLDeclExtMap.size());
+
+ for (const auto &I : OpenCLDeclExtMap)
+ StableOpenCLDeclExtMap.emplace_back(getDeclID(I.first), &I.second);
+
+ auto CompareByDeclID = [](const ElementTy &E1, const ElementTy &E2) -> bool {
+ return E1.first < E2.first;
+ };
+ llvm::sort(StableOpenCLDeclExtMap, CompareByDeclID);
+
RecordData Record;
- for (const auto &I : SemaRef.OpenCLDeclExtMap) {
- Record.push_back(getDeclID(I.first));
- Record.push_back(static_cast<unsigned>(I.second.size()));
- for (auto Ext : I.second)
+ for (const ElementTy &E : StableOpenCLDeclExtMap) {
+ Record.push_back(E.first); // DeclID
+ const std::set<std::string> *ExtSet = E.second;
+ Record.push_back(static_cast<unsigned>(ExtSet->size()));
+ for (const std::string &Ext : *ExtSet)
AddString(Ext, Record);
}
+
Stream.EmitRecord(OPENCL_EXTENSION_DECLS, Record);
}
@@ -4568,10 +4616,11 @@ void ASTWriter::SetSelectorOffset(Selector Sel, uint32_t Offset) {
}
ASTWriter::ASTWriter(llvm::BitstreamWriter &Stream,
- SmallVectorImpl<char> &Buffer, MemoryBufferCache &PCMCache,
+ SmallVectorImpl<char> &Buffer,
+ InMemoryModuleCache &ModuleCache,
ArrayRef<std::shared_ptr<ModuleFileExtension>> Extensions,
bool IncludeTimestamps)
- : Stream(Stream), Buffer(Buffer), PCMCache(PCMCache),
+ : Stream(Stream), Buffer(Buffer), ModuleCache(ModuleCache),
IncludeTimestamps(IncludeTimestamps) {
for (const auto &Ext : Extensions) {
if (auto Writer = Ext->createExtensionWriter(*this))
@@ -4595,7 +4644,8 @@ time_t ASTWriter::getTimestampForOutput(const FileEntry *E) const {
ASTFileSignature ASTWriter::WriteAST(Sema &SemaRef,
const std::string &OutputFile,
Module *WritingModule, StringRef isysroot,
- bool hasErrors) {
+ bool hasErrors,
+ bool ShouldCacheASTInMemory) {
WritingAST = true;
ASTHasCompilerErrors = hasErrors;
@@ -4619,11 +4669,11 @@ ASTFileSignature ASTWriter::WriteAST(Sema &SemaRef,
this->BaseDirectory.clear();
WritingAST = false;
- if (SemaRef.Context.getLangOpts().ImplicitModules && WritingModule) {
+ if (ShouldCacheASTInMemory) {
// Construct MemoryBuffer and update buffer manager.
- PCMCache.addBuffer(OutputFile,
- llvm::MemoryBuffer::getMemBufferCopy(
- StringRef(Buffer.begin(), Buffer.size())));
+ ModuleCache.addBuiltPCM(OutputFile,
+ llvm::MemoryBuffer::getMemBufferCopy(
+ StringRef(Buffer.begin(), Buffer.size())));
}
return Signature;
}
@@ -5288,6 +5338,14 @@ void ASTWriter::WriteDeclUpdatesBlocks(RecordDataImpl &OffsetsRecord) {
D->getAttr<OMPThreadPrivateDeclAttr>()->getRange());
break;
+ case UPD_DECL_MARKED_OPENMP_ALLOCATE: {
+ auto *A = D->getAttr<OMPAllocateDeclAttr>();
+ Record.push_back(A->getAllocatorType());
+ Record.AddStmt(A->getAllocator());
+ Record.AddSourceRange(A->getRange());
+ break;
+ }
+
case UPD_DECL_MARKED_OPENMP_DECLARETARGET:
Record.push_back(D->getAttr<OMPDeclareTargetDeclAttr>()->getMapType());
Record.AddSourceRange(
@@ -5342,6 +5400,61 @@ void ASTRecordWriter::AddAPFloat(const llvm::APFloat &Value) {
AddAPInt(Value.bitcastToAPInt());
}
+static void WriteFixedPointSemantics(ASTRecordWriter &Record,
+ FixedPointSemantics FPSema) {
+ Record.push_back(FPSema.getWidth());
+ Record.push_back(FPSema.getScale());
+ Record.push_back(FPSema.isSigned() | FPSema.isSaturated() << 1 |
+ FPSema.hasUnsignedPadding() << 2);
+}
+
+void ASTRecordWriter::AddAPValue(const APValue &Value) {
+ APValue::ValueKind Kind = Value.getKind();
+ push_back(static_cast<uint64_t>(Kind));
+ switch (Kind) {
+ case APValue::None:
+ case APValue::Indeterminate:
+ return;
+ case APValue::Int:
+ AddAPSInt(Value.getInt());
+ return;
+ case APValue::Float:
+ push_back(static_cast<uint64_t>(
+ llvm::APFloatBase::SemanticsToEnum(Value.getFloat().getSemantics())));
+ AddAPFloat(Value.getFloat());
+ return;
+ case APValue::FixedPoint: {
+ WriteFixedPointSemantics(*this, Value.getFixedPoint().getSemantics());
+ AddAPSInt(Value.getFixedPoint().getValue());
+ return;
+ }
+ case APValue::ComplexInt: {
+ AddAPSInt(Value.getComplexIntReal());
+ AddAPSInt(Value.getComplexIntImag());
+ return;
+ }
+ case APValue::ComplexFloat: {
+ push_back(static_cast<uint64_t>(llvm::APFloatBase::SemanticsToEnum(
+ Value.getComplexFloatReal().getSemantics())));
+ AddAPFloat(Value.getComplexFloatReal());
+ push_back(static_cast<uint64_t>(llvm::APFloatBase::SemanticsToEnum(
+ Value.getComplexFloatImag().getSemantics())));
+ AddAPFloat(Value.getComplexFloatImag());
+ return;
+ }
+ case APValue::LValue:
+ case APValue::Vector:
+ case APValue::Array:
+ case APValue::Struct:
+ case APValue::Union:
+ case APValue::MemberPointer:
+ case APValue::AddrLabelDiff:
+ // TODO : Handle all these APValue::ValueKind.
+ return;
+ }
+ llvm_unreachable("Invalid APValue::ValueKind");
+}
+
void ASTWriter::AddIdentifierRef(const IdentifierInfo *II, RecordDataImpl &Record) {
Record.push_back(getIdentifierRef(II));
}
@@ -5595,7 +5708,7 @@ void ASTWriter::associateDeclWithFile(const Decl *D, DeclID ID) {
}
LocDeclIDsTy::iterator I =
- std::upper_bound(Decls.begin(), Decls.end(), LocDecl, llvm::less_first());
+ llvm::upper_bound(Decls, LocDecl, llvm::less_first());
Decls.insert(I, LocDecl);
}
@@ -5820,6 +5933,12 @@ void ASTRecordWriter::AddTemplateName(TemplateName Name) {
break;
}
+ case TemplateName::AssumedTemplate: {
+ AssumedTemplateStorage *ADLT = Name.getAsAssumedTemplateName();
+ AddDeclarationName(ADLT->getDeclName());
+ break;
+ }
+
case TemplateName::QualifiedTemplate: {
QualifiedTemplateName *QualT = Name.getAsQualifiedTemplateName();
AddNestedNameSpecifier(QualT->getQualifier());
@@ -6405,6 +6524,15 @@ void ASTWriter::DeclarationMarkedOpenMPThreadPrivate(const Decl *D) {
DeclUpdates[D].push_back(DeclUpdate(UPD_DECL_MARKED_OPENMP_THREADPRIVATE));
}
+void ASTWriter::DeclarationMarkedOpenMPAllocate(const Decl *D, const Attr *A) {
+ if (Chain && Chain->isProcessingUpdateRecords()) return;
+ assert(!WritingAST && "Already writing the AST!");
+ if (!D->isFromASTFile())
+ return;
+
+ DeclUpdates[D].push_back(DeclUpdate(UPD_DECL_MARKED_OPENMP_ALLOCATE, A));
+}
+
void ASTWriter::DeclarationMarkedOpenMPDeclareTarget(const Decl *D,
const Attr *Attr) {
if (Chain && Chain->isProcessingUpdateRecords()) return;
@@ -6519,6 +6647,11 @@ void OMPClauseWriter::VisitOMPSimdlenClause(OMPSimdlenClause *C) {
Record.AddSourceLocation(C->getLParenLoc());
}
+void OMPClauseWriter::VisitOMPAllocatorClause(OMPAllocatorClause *C) {
+ Record.AddStmt(C->getAllocator());
+ Record.AddSourceLocation(C->getLParenLoc());
+}
+
void OMPClauseWriter::VisitOMPCollapseClause(OMPCollapseClause *C) {
Record.AddStmt(C->getNumForLoops());
Record.AddSourceLocation(C->getLParenLoc());
@@ -6786,11 +6919,15 @@ void OMPClauseWriter::VisitOMPMapClause(OMPMapClause *C) {
Record.push_back(C->getMapTypeModifier(I));
Record.AddSourceLocation(C->getMapTypeModifierLoc(I));
}
+ Record.AddNestedNameSpecifierLoc(C->getMapperQualifierLoc());
+ Record.AddDeclarationNameInfo(C->getMapperIdInfo());
Record.push_back(C->getMapType());
Record.AddSourceLocation(C->getMapLoc());
Record.AddSourceLocation(C->getColonLoc());
for (auto *E : C->varlists())
Record.AddStmt(E);
+ for (auto *E : C->mapperlists())
+ Record.AddStmt(E);
for (auto *D : C->all_decls())
Record.AddDeclRef(D);
for (auto N : C->all_num_lists())
@@ -6803,6 +6940,15 @@ void OMPClauseWriter::VisitOMPMapClause(OMPMapClause *C) {
}
}
+void OMPClauseWriter::VisitOMPAllocateClause(OMPAllocateClause *C) {
+ Record.push_back(C->varlist_size());
+ Record.AddSourceLocation(C->getLParenLoc());
+ Record.AddSourceLocation(C->getColonLoc());
+ Record.AddStmt(C->getAllocator());
+ for (auto *VE : C->varlists())
+ Record.AddStmt(VE);
+}
+
void OMPClauseWriter::VisitOMPNumTeamsClause(OMPNumTeamsClause *C) {
VisitOMPClauseWithPreInit(C);
Record.AddStmt(C->getNumTeams());
@@ -6858,8 +7004,12 @@ void OMPClauseWriter::VisitOMPToClause(OMPToClause *C) {
Record.push_back(C->getTotalComponentListNum());
Record.push_back(C->getTotalComponentsNum());
Record.AddSourceLocation(C->getLParenLoc());
+ Record.AddNestedNameSpecifierLoc(C->getMapperQualifierLoc());
+ Record.AddDeclarationNameInfo(C->getMapperIdInfo());
for (auto *E : C->varlists())
Record.AddStmt(E);
+ for (auto *E : C->mapperlists())
+ Record.AddStmt(E);
for (auto *D : C->all_decls())
Record.AddDeclRef(D);
for (auto N : C->all_num_lists())
@@ -6878,8 +7028,12 @@ void OMPClauseWriter::VisitOMPFromClause(OMPFromClause *C) {
Record.push_back(C->getTotalComponentListNum());
Record.push_back(C->getTotalComponentsNum());
Record.AddSourceLocation(C->getLParenLoc());
+ Record.AddNestedNameSpecifierLoc(C->getMapperQualifierLoc());
+ Record.AddDeclarationNameInfo(C->getMapperIdInfo());
for (auto *E : C->varlists())
Record.AddStmt(E);
+ for (auto *E : C->mapperlists())
+ Record.AddStmt(E);
for (auto *D : C->all_decls())
Record.AddDeclRef(D);
for (auto N : C->all_num_lists())
diff --git a/lib/Serialization/ASTWriterDecl.cpp b/lib/Serialization/ASTWriterDecl.cpp
index 002b43f81121..b71315505de9 100644
--- a/lib/Serialization/ASTWriterDecl.cpp
+++ b/lib/Serialization/ASTWriterDecl.cpp
@@ -1,9 +1,8 @@
//===--- ASTWriterDecl.cpp - Declaration Serialization --------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -22,7 +21,7 @@
#include "clang/Basic/SourceManager.h"
#include "clang/Serialization/ASTReader.h"
#include "clang/Serialization/ASTWriter.h"
-#include "llvm/Bitcode/BitstreamWriter.h"
+#include "llvm/Bitstream/BitstreamWriter.h"
#include "llvm/Support/ErrorHandling.h"
using namespace clang;
using namespace serialization;
@@ -103,6 +102,7 @@ namespace clang {
void VisitBindingDecl(BindingDecl *D);
void VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D);
void VisitTemplateDecl(TemplateDecl *D);
+ void VisitConceptDecl(ConceptDecl *D);
void VisitRedeclarableTemplateDecl(RedeclarableTemplateDecl *D);
void VisitClassTemplateDecl(ClassTemplateDecl *D);
void VisitVarTemplateDecl(VarTemplateDecl *D);
@@ -145,8 +145,10 @@ namespace clang {
void VisitObjCPropertyDecl(ObjCPropertyDecl *D);
void VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D);
void VisitOMPThreadPrivateDecl(OMPThreadPrivateDecl *D);
+ void VisitOMPAllocateDecl(OMPAllocateDecl *D);
void VisitOMPRequiresDecl(OMPRequiresDecl *D);
void VisitOMPDeclareReductionDecl(OMPDeclareReductionDecl *D);
+ void VisitOMPDeclareMapperDecl(OMPDeclareMapperDecl *D);
void VisitOMPCapturedExprDecl(OMPCapturedExprDecl *D);
/// Add an Objective-C type parameter list to the given record.
@@ -474,6 +476,9 @@ void ASTDeclWriter::VisitRecordDecl(RecordDecl *D) {
Record.push_back(D->isNonTrivialToPrimitiveDefaultInitialize());
Record.push_back(D->isNonTrivialToPrimitiveCopy());
Record.push_back(D->isNonTrivialToPrimitiveDestroy());
+ Record.push_back(D->hasNonTrivialToPrimitiveDefaultInitializeCUnion());
+ Record.push_back(D->hasNonTrivialToPrimitiveDestructCUnion());
+ Record.push_back(D->hasNonTrivialToPrimitiveCopyCUnion());
Record.push_back(D->isParamDestroyedInCallee());
Record.push_back(D->getArgPassingRestrictions());
@@ -534,7 +539,6 @@ void ASTDeclWriter::VisitFunctionDecl(FunctionDecl *D) {
Record.push_back(static_cast<int>(D->getStorageClass())); // FIXME: stable encoding
Record.push_back(D->isInlineSpecified());
Record.push_back(D->isInlined());
- Record.push_back(D->isExplicitSpecified());
Record.push_back(D->isVirtualAsWritten());
Record.push_back(D->isPure());
Record.push_back(D->hasInheritedPrototype());
@@ -545,7 +549,7 @@ void ASTDeclWriter::VisitFunctionDecl(FunctionDecl *D) {
Record.push_back(D->isDefaulted());
Record.push_back(D->isExplicitlyDefaulted());
Record.push_back(D->hasImplicitReturnZero());
- Record.push_back(D->isConstexpr());
+ Record.push_back(D->getConstexprKind());
Record.push_back(D->usesSEHTry());
Record.push_back(D->hasSkippedBody());
Record.push_back(D->isMultiVersion());
@@ -595,6 +599,16 @@ void ASTDeclWriter::VisitFunctionDecl(FunctionDecl *D) {
Record.AddSourceLocation(FTSInfo->getPointOfInstantiation());
+ if (MemberSpecializationInfo *MemberInfo =
+ FTSInfo->getMemberSpecializationInfo()) {
+ Record.push_back(1);
+ Record.AddDeclRef(MemberInfo->getInstantiatedFrom());
+ Record.push_back(MemberInfo->getTemplateSpecializationKind());
+ Record.AddSourceLocation(MemberInfo->getPointOfInstantiation());
+ } else {
+ Record.push_back(0);
+ }
+
if (D->isCanonicalDecl()) {
// Write the template that contains the specializations set. We will
// add a FunctionTemplateSpecializationInfo to it when reading.
@@ -627,7 +641,18 @@ void ASTDeclWriter::VisitFunctionDecl(FunctionDecl *D) {
Code = serialization::DECL_FUNCTION;
}
+static void addExplicitSpecifier(ExplicitSpecifier ES,
+ ASTRecordWriter &Record) {
+ uint64_t Kind = static_cast<uint64_t>(ES.getKind());
+ Kind = Kind << 1 | static_cast<bool>(ES.getExpr());
+ Record.push_back(Kind);
+ if (ES.getExpr()) {
+ Record.AddStmt(ES.getExpr());
+ }
+}
+
void ASTDeclWriter::VisitCXXDeductionGuideDecl(CXXDeductionGuideDecl *D) {
+ addExplicitSpecifier(D->getExplicitSpecifier(), Record);
VisitFunctionDecl(D);
Record.push_back(D->isCopyDeductionCandidate());
Code = serialization::DECL_CXX_DEDUCTION_GUIDE;
@@ -1110,6 +1135,7 @@ void ASTDeclWriter::VisitBlockDecl(BlockDecl *D) {
Record.push_back(D->blockMissingReturnType());
Record.push_back(D->isConversionFromLambda());
Record.push_back(D->doesNotEscape());
+ Record.push_back(D->canAvoidCopyToHeap());
Record.push_back(D->capturesCXXThis());
Record.push_back(D->getNumCaptures());
for (const auto &capture : D->captures()) {
@@ -1319,19 +1345,15 @@ void ASTDeclWriter::VisitCXXMethodDecl(CXXMethodDecl *D) {
}
void ASTDeclWriter::VisitCXXConstructorDecl(CXXConstructorDecl *D) {
+ Record.push_back(D->getTraillingAllocKind());
+ addExplicitSpecifier(D->getExplicitSpecifier(), Record);
if (auto Inherited = D->getInheritedConstructor()) {
Record.AddDeclRef(Inherited.getShadowDecl());
Record.AddDeclRef(Inherited.getConstructor());
- Code = serialization::DECL_CXX_INHERITED_CONSTRUCTOR;
- } else {
- Code = serialization::DECL_CXX_CONSTRUCTOR;
}
VisitCXXMethodDecl(D);
-
- Code = D->isInheritingConstructor()
- ? serialization::DECL_CXX_INHERITED_CONSTRUCTOR
- : serialization::DECL_CXX_CONSTRUCTOR;
+ Code = serialization::DECL_CXX_CONSTRUCTOR;
}
void ASTDeclWriter::VisitCXXDestructorDecl(CXXDestructorDecl *D) {
@@ -1345,6 +1367,7 @@ void ASTDeclWriter::VisitCXXDestructorDecl(CXXDestructorDecl *D) {
}
void ASTDeclWriter::VisitCXXConversionDecl(CXXConversionDecl *D) {
+ addExplicitSpecifier(D->getExplicitSpecifier(), Record);
VisitCXXMethodDecl(D);
Code = serialization::DECL_CXX_CONVERSION;
}
@@ -1413,6 +1436,12 @@ void ASTDeclWriter::VisitTemplateDecl(TemplateDecl *D) {
Record.AddTemplateParameterList(D->getTemplateParameters());
}
+void ASTDeclWriter::VisitConceptDecl(ConceptDecl *D) {
+ VisitTemplateDecl(D);
+ Record.AddStmt(D->getConstraintExpr());
+ Code = serialization::DECL_CONCEPT;
+}
+
void ASTDeclWriter::VisitRedeclarableTemplateDecl(RedeclarableTemplateDecl *D) {
VisitRedeclarable(D);
@@ -1553,6 +1582,9 @@ void ASTDeclWriter::VisitClassScopeFunctionSpecializationDecl(
ClassScopeFunctionSpecializationDecl *D) {
VisitDecl(D);
Record.AddDeclRef(D->getSpecialization());
+ Record.push_back(D->hasExplicitTemplateArgs());
+ if (D->hasExplicitTemplateArgs())
+ Record.AddASTTemplateArgumentListInfo(D->getTemplateArgsAsWritten());
Code = serialization::DECL_CLASS_SCOPE_FUNCTION_SPECIALIZATION;
}
@@ -1743,10 +1775,22 @@ void ASTDeclWriter::VisitOMPThreadPrivateDecl(OMPThreadPrivateDecl *D) {
Code = serialization::DECL_OMP_THREADPRIVATE;
}
+void ASTDeclWriter::VisitOMPAllocateDecl(OMPAllocateDecl *D) {
+ Record.push_back(D->varlist_size());
+ Record.push_back(D->clauselist_size());
+ VisitDecl(D);
+ for (auto *I : D->varlists())
+ Record.AddStmt(I);
+ OMPClauseWriter ClauseWriter(Record);
+ for (OMPClause *C : D->clauselists())
+ ClauseWriter.writeClause(C);
+ Code = serialization::DECL_OMP_ALLOCATE;
+}
+
void ASTDeclWriter::VisitOMPRequiresDecl(OMPRequiresDecl *D) {
Record.push_back(D->clauselist_size());
VisitDecl(D);
- OMPClauseWriter ClauseWriter(Record);
+ OMPClauseWriter ClauseWriter(Record);
for (OMPClause *C : D->clauselists())
ClauseWriter.writeClause(C);
Code = serialization::DECL_OMP_REQUIRES;
@@ -1766,6 +1810,19 @@ void ASTDeclWriter::VisitOMPDeclareReductionDecl(OMPDeclareReductionDecl *D) {
Code = serialization::DECL_OMP_DECLARE_REDUCTION;
}
+void ASTDeclWriter::VisitOMPDeclareMapperDecl(OMPDeclareMapperDecl *D) {
+ Record.push_back(D->clauselist_size());
+ VisitValueDecl(D);
+ Record.AddSourceLocation(D->getBeginLoc());
+ Record.AddStmt(D->getMapperVarRef());
+ Record.AddDeclarationName(D->getVarName());
+ Record.AddDeclRef(D->getPrevDeclInScope());
+ OMPClauseWriter ClauseWriter(Record);
+ for (OMPClause *C : D->clauselists())
+ ClauseWriter.writeClause(C);
+ Code = serialization::DECL_OMP_DECLARE_MAPPER;
+}
+
void ASTDeclWriter::VisitOMPCapturedExprDecl(OMPCapturedExprDecl *D) {
VisitVarDecl(D);
Code = serialization::DECL_OMP_CAPTUREDEXPR;
@@ -1945,6 +2002,12 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1));
// isNonTrivialToPrimitiveDestroy
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1));
+ // hasNonTrivialToPrimitiveDefaultInitializeCUnion
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1));
+ // hasNonTrivialToPrimitiveDestructCUnion
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1));
+ // hasNonTrivialToPrimitiveCopyCUnion
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1));
// isParamDestroyedInCallee
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1));
// getArgPassingRestrictions
@@ -2116,7 +2179,6 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); // StorageClass
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Inline
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // InlineSpecified
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // ExplicitSpecified
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // VirtualAsWritten
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Pure
Abv->Add(BitCodeAbbrevOp(0)); // HasInheritedProto
@@ -2152,7 +2214,8 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv = std::make_shared<BitCodeAbbrev>();
Abv->Add(BitCodeAbbrevOp(serialization::EXPR_DECL_REF));
//Stmt
- //Expr
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // IsOMPStructuredBlock
+ // Expr
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //TypeDependent
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //ValueDependent
@@ -2165,8 +2228,8 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //GetDeclFound
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //ExplicitTemplateArgs
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //HadMultipleCandidates
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed,
- 1)); // RefersToEnclosingVariableOrCapture
+ Abv->Add(BitCodeAbbrevOp(0)); // RefersToEnclosingVariableOrCapture
+ Abv->Add(BitCodeAbbrevOp(0)); // NonOdrUseReason
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // DeclRef
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Location
DeclRefExprAbbrev = Stream.EmitAbbrev(std::move(Abv));
@@ -2175,7 +2238,8 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv = std::make_shared<BitCodeAbbrev>();
Abv->Add(BitCodeAbbrevOp(serialization::EXPR_INTEGER_LITERAL));
//Stmt
- //Expr
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // IsOMPStructuredBlock
+ // Expr
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //TypeDependent
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //ValueDependent
@@ -2193,7 +2257,8 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv = std::make_shared<BitCodeAbbrev>();
Abv->Add(BitCodeAbbrevOp(serialization::EXPR_CHARACTER_LITERAL));
//Stmt
- //Expr
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // IsOMPStructuredBlock
+ // Expr
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //TypeDependent
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //ValueDependent
@@ -2211,6 +2276,7 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv = std::make_shared<BitCodeAbbrev>();
Abv->Add(BitCodeAbbrevOp(serialization::EXPR_IMPLICIT_CAST));
// Stmt
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // IsOMPStructuredBlock
// Expr
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //TypeDependent
@@ -2259,7 +2325,7 @@ static bool isRequiredDecl(const Decl *D, ASTContext &Context,
if (isa<FileScopeAsmDecl>(D) || isa<ObjCImplDecl>(D))
return true;
- if (WritingModule && (isa<VarDecl>(D) || isa<ImportDecl>(D))) {
+ if (WritingModule && isPartOfPerModuleInitializer(D)) {
// These declarations are part of the module initializer, and are emitted
// if and when the module is imported, rather than being emitted eagerly.
return false;
diff --git a/lib/Serialization/ASTWriterStmt.cpp b/lib/Serialization/ASTWriterStmt.cpp
index 6f8b86edcdfc..4fbcbaabe74b 100644
--- a/lib/Serialization/ASTWriterStmt.cpp
+++ b/lib/Serialization/ASTWriterStmt.cpp
@@ -1,9 +1,8 @@
//===--- ASTWriterStmt.cpp - Statement and Expression Serialization -------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
@@ -19,7 +18,7 @@
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/Lex/Token.h"
-#include "llvm/Bitcode/BitstreamWriter.h"
+#include "llvm/Bitstream/BitstreamWriter.h"
using namespace clang;
//===----------------------------------------------------------------------===//
@@ -68,6 +67,7 @@ void ASTStmtWriter::AddTemplateKWAndArgsInfo(
}
void ASTStmtWriter::VisitStmt(Stmt *S) {
+ Record.push_back(S->StmtBits.IsOMPStructuredBlock);
}
void ASTStmtWriter::VisitNullStmt(NullStmt *S) {
@@ -283,6 +283,7 @@ void ASTStmtWriter::VisitAsmStmt(AsmStmt *S) {
void ASTStmtWriter::VisitGCCAsmStmt(GCCAsmStmt *S) {
VisitAsmStmt(S);
+ Record.push_back(S->getNumLabels());
Record.AddSourceLocation(S->getRParenLoc());
Record.AddStmt(S->getAsmString());
@@ -304,6 +305,9 @@ void ASTStmtWriter::VisitGCCAsmStmt(GCCAsmStmt *S) {
for (unsigned I = 0, N = S->getNumClobbers(); I != N; ++I)
Record.AddStmt(S->getClobberStringLiteral(I));
+ // Labels
+ for (auto *E : S->labels()) Record.AddStmt(E);
+
Code = serialization::STMT_GCCASM;
}
@@ -428,6 +432,16 @@ void ASTStmtWriter::VisitExpr(Expr *E) {
void ASTStmtWriter::VisitConstantExpr(ConstantExpr *E) {
VisitExpr(E);
+ Record.push_back(static_cast<uint64_t>(E->ConstantExprBits.ResultKind));
+ switch (E->ConstantExprBits.ResultKind) {
+ case ConstantExpr::RSK_Int64:
+ Record.push_back(E->Int64Result());
+ Record.push_back(E->ConstantExprBits.IsUnsigned |
+ E->ConstantExprBits.BitWidth << 1);
+ break;
+ case ConstantExpr::RSK_APValue:
+ Record.AddAPValue(E->APValueResult());
+ }
Record.AddStmt(E->getSubExpr());
Code = serialization::EXPR_CONSTANT;
}
@@ -452,6 +466,7 @@ void ASTStmtWriter::VisitDeclRefExpr(DeclRefExpr *E) {
Record.push_back(E->hasTemplateKWAndArgsInfo());
Record.push_back(E->hadMultipleCandidates());
Record.push_back(E->refersToEnclosingVariableOrCapture());
+ Record.push_back(E->isNonOdrUse());
if (E->hasTemplateKWAndArgsInfo()) {
unsigned NumTemplateArgs = E->getNumTemplateArgs();
@@ -462,7 +477,8 @@ void ASTStmtWriter::VisitDeclRefExpr(DeclRefExpr *E) {
if ((!E->hasTemplateKWAndArgsInfo()) && (!E->hasQualifier()) &&
(E->getDecl() == E->getFoundDecl()) &&
- nk == DeclarationName::Identifier) {
+ nk == DeclarationName::Identifier &&
+ !E->refersToEnclosingVariableOrCapture() && !E->isNonOdrUse()) {
AbbrevToUse = Writer.getDeclRefExprAbbrev();
}
@@ -656,39 +672,46 @@ void ASTStmtWriter::VisitCallExpr(CallExpr *E) {
}
void ASTStmtWriter::VisitMemberExpr(MemberExpr *E) {
- // Don't call VisitExpr, we'll write everything here.
-
- Record.push_back(E->hasQualifier());
- if (E->hasQualifier())
- Record.AddNestedNameSpecifierLoc(E->getQualifierLoc());
-
- Record.push_back(E->hasTemplateKWAndArgsInfo());
- if (E->hasTemplateKWAndArgsInfo()) {
- Record.AddSourceLocation(E->getTemplateKeywordLoc());
- unsigned NumTemplateArgs = E->getNumTemplateArgs();
- Record.push_back(NumTemplateArgs);
- Record.AddSourceLocation(E->getLAngleLoc());
- Record.AddSourceLocation(E->getRAngleLoc());
- for (unsigned i=0; i != NumTemplateArgs; ++i)
- Record.AddTemplateArgumentLoc(E->getTemplateArgs()[i]);
- }
+ VisitExpr(E);
- Record.push_back(E->hadMultipleCandidates());
+ bool HasQualifier = E->hasQualifier();
+ bool HasFoundDecl =
+ E->hasQualifierOrFoundDecl() &&
+ (E->getFoundDecl().getDecl() != E->getMemberDecl() ||
+ E->getFoundDecl().getAccess() != E->getMemberDecl()->getAccess());
+ bool HasTemplateInfo = E->hasTemplateKWAndArgsInfo();
+ unsigned NumTemplateArgs = E->getNumTemplateArgs();
- DeclAccessPair FoundDecl = E->getFoundDecl();
- Record.AddDeclRef(FoundDecl.getDecl());
- Record.push_back(FoundDecl.getAccess());
+ // Write these first for easy access when deserializing, as they affect the
+ // size of the MemberExpr.
+ Record.push_back(HasQualifier);
+ Record.push_back(HasFoundDecl);
+ Record.push_back(HasTemplateInfo);
+ Record.push_back(NumTemplateArgs);
- Record.AddTypeRef(E->getType());
- Record.push_back(E->getValueKind());
- Record.push_back(E->getObjectKind());
Record.AddStmt(E->getBase());
Record.AddDeclRef(E->getMemberDecl());
+ Record.AddDeclarationNameLoc(E->MemberDNLoc,
+ E->getMemberDecl()->getDeclName());
Record.AddSourceLocation(E->getMemberLoc());
Record.push_back(E->isArrow());
+ Record.push_back(E->hadMultipleCandidates());
+ Record.push_back(E->isNonOdrUse());
Record.AddSourceLocation(E->getOperatorLoc());
- Record.AddDeclarationNameLoc(E->MemberDNLoc,
- E->getMemberDecl()->getDeclName());
+
+ if (HasFoundDecl) {
+ DeclAccessPair FoundDecl = E->getFoundDecl();
+ Record.AddDeclRef(FoundDecl.getDecl());
+ Record.push_back(FoundDecl.getAccess());
+ }
+
+ if (HasQualifier)
+ Record.AddNestedNameSpecifierLoc(E->getQualifierLoc());
+
+ if (HasTemplateInfo)
+ AddTemplateKWAndArgsInfo(*E->getTrailingObjects<ASTTemplateKWAndArgsInfo>(),
+ E->getTrailingObjects<TemplateArgumentLoc>());
+
Code = serialization::EXPR_MEMBER;
}
@@ -909,6 +932,15 @@ void ASTStmtWriter::VisitVAArgExpr(VAArgExpr *E) {
Code = serialization::EXPR_VA_ARG;
}
+void ASTStmtWriter::VisitSourceLocExpr(SourceLocExpr *E) {
+ VisitExpr(E);
+ Record.AddDeclRef(cast_or_null<Decl>(E->getParentContext()));
+ Record.AddSourceLocation(E->getBeginLoc());
+ Record.AddSourceLocation(E->getEndLoc());
+ Record.push_back(E->getIdentKind());
+ Code = serialization::EXPR_SOURCE_LOC;
+}
+
void ASTStmtWriter::VisitAddrLabelExpr(AddrLabelExpr *E) {
VisitExpr(E);
Record.AddSourceLocation(E->getAmpAmpLoc());
@@ -969,18 +1001,24 @@ void ASTStmtWriter::VisitBlockExpr(BlockExpr *E) {
void ASTStmtWriter::VisitGenericSelectionExpr(GenericSelectionExpr *E) {
VisitExpr(E);
- Record.push_back(E->getNumAssocs());
-
- Record.AddStmt(E->getControllingExpr());
- for (unsigned I = 0, N = E->getNumAssocs(); I != N; ++I) {
- Record.AddTypeSourceInfo(E->getAssocTypeSourceInfo(I));
- Record.AddStmt(E->getAssocExpr(I));
- }
- Record.push_back(E->isResultDependent() ? -1U : E->getResultIndex());
+ Record.push_back(E->getNumAssocs());
+ Record.push_back(E->ResultIndex);
Record.AddSourceLocation(E->getGenericLoc());
Record.AddSourceLocation(E->getDefaultLoc());
Record.AddSourceLocation(E->getRParenLoc());
+
+ Stmt **Stmts = E->getTrailingObjects<Stmt *>();
+ // Add 1 to account for the controlling expression which is the first
+ // expression in the trailing array of Stmt *. This is not needed for
+ // the trailing array of TypeSourceInfo *.
+ for (unsigned I = 0, N = E->getNumAssocs() + 1; I < N; ++I)
+ Record.AddStmt(Stmts[I]);
+
+ TypeSourceInfo **TSIs = E->getTrailingObjects<TypeSourceInfo *>();
+ for (unsigned I = 0, N = E->getNumAssocs(); I < N; ++I)
+ Record.AddTypeSourceInfo(TSIs[I]);
+
Code = serialization::EXPR_GENERIC_SELECTION;
}
@@ -1193,6 +1231,7 @@ void ASTStmtWriter::VisitObjCForCollectionStmt(ObjCForCollectionStmt *S) {
}
void ASTStmtWriter::VisitObjCAtCatchStmt(ObjCAtCatchStmt *S) {
+ VisitStmt(S);
Record.AddStmt(S->getCatchBody());
Record.AddDeclRef(S->getCatchParamDecl());
Record.AddSourceLocation(S->getAtCatchLoc());
@@ -1201,18 +1240,21 @@ void ASTStmtWriter::VisitObjCAtCatchStmt(ObjCAtCatchStmt *S) {
}
void ASTStmtWriter::VisitObjCAtFinallyStmt(ObjCAtFinallyStmt *S) {
+ VisitStmt(S);
Record.AddStmt(S->getFinallyBody());
Record.AddSourceLocation(S->getAtFinallyLoc());
Code = serialization::STMT_OBJC_FINALLY;
}
void ASTStmtWriter::VisitObjCAutoreleasePoolStmt(ObjCAutoreleasePoolStmt *S) {
+ VisitStmt(S); // FIXME: no test coverage.
Record.AddStmt(S->getSubStmt());
Record.AddSourceLocation(S->getAtLoc());
Code = serialization::STMT_OBJC_AUTORELEASE_POOL;
}
void ASTStmtWriter::VisitObjCAtTryStmt(ObjCAtTryStmt *S) {
+ VisitStmt(S);
Record.push_back(S->getNumCatchStmts());
Record.push_back(S->getFinallyStmt() != nullptr);
Record.AddStmt(S->getTryBody());
@@ -1225,6 +1267,7 @@ void ASTStmtWriter::VisitObjCAtTryStmt(ObjCAtTryStmt *S) {
}
void ASTStmtWriter::VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt *S) {
+ VisitStmt(S); // FIXME: no test coverage.
Record.AddStmt(S->getSynchExpr());
Record.AddStmt(S->getSynchBody());
Record.AddSourceLocation(S->getAtSynchronizedLoc());
@@ -1232,6 +1275,7 @@ void ASTStmtWriter::VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt *S) {
}
void ASTStmtWriter::VisitObjCAtThrowStmt(ObjCAtThrowStmt *S) {
+ VisitStmt(S); // FIXME: no test coverage.
Record.AddStmt(S->getThrowExpr());
Record.AddSourceLocation(S->getThrowLoc());
Code = serialization::STMT_OBJC_AT_THROW;
@@ -1407,6 +1451,12 @@ void ASTStmtWriter::VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *E) {
Code = serialization::EXPR_CXX_FUNCTIONAL_CAST;
}
+void ASTStmtWriter::VisitBuiltinBitCastExpr(BuiltinBitCastExpr *E) {
+ VisitExplicitCastExpr(E);
+ Record.AddSourceLocation(E->getBeginLoc());
+ Record.AddSourceLocation(E->getEndLoc());
+}
+
void ASTStmtWriter::VisitUserDefinedLiteral(UserDefinedLiteral *E) {
VisitCallExpr(E);
Record.AddSourceLocation(E->UDSuffixLoc);
@@ -1456,6 +1506,7 @@ void ASTStmtWriter::VisitCXXThrowExpr(CXXThrowExpr *E) {
void ASTStmtWriter::VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E) {
VisitExpr(E);
Record.AddDeclRef(E->getParam());
+ Record.AddDeclRef(cast_or_null<Decl>(E->getUsedContext()));
Record.AddSourceLocation(E->getUsedLocation());
Code = serialization::EXPR_CXX_DEFAULT_ARG;
}
@@ -1463,6 +1514,7 @@ void ASTStmtWriter::VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E) {
void ASTStmtWriter::VisitCXXDefaultInitExpr(CXXDefaultInitExpr *E) {
VisitExpr(E);
Record.AddDeclRef(E->getField());
+ Record.AddDeclRef(cast_or_null<Decl>(E->getUsedContext()));
Record.AddSourceLocation(E->getExprLoc());
Code = serialization::EXPR_CXX_DEFAULT_INIT;
}
@@ -1768,6 +1820,7 @@ void ASTStmtWriter::VisitCXXFoldExpr(CXXFoldExpr *E) {
Record.AddSourceLocation(E->LParenLoc);
Record.AddSourceLocation(E->EllipsisLoc);
Record.AddSourceLocation(E->RParenLoc);
+ Record.push_back(E->NumExpansions);
Record.AddStmt(E->SubExprs[0]);
Record.AddStmt(E->SubExprs[1]);
Record.push_back(E->Opcode);
diff --git a/lib/Serialization/GeneratePCH.cpp b/lib/Serialization/GeneratePCH.cpp
index 2e0076521f9c..002233e49bb0 100644
--- a/lib/Serialization/GeneratePCH.cpp
+++ b/lib/Serialization/GeneratePCH.cpp
@@ -1,9 +1,8 @@
//===--- GeneratePCH.cpp - Sema Consumer for PCH Generation -----*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -17,20 +16,22 @@
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/SemaConsumer.h"
#include "clang/Serialization/ASTWriter.h"
-#include "llvm/Bitcode/BitstreamWriter.h"
+#include "llvm/Bitstream/BitstreamWriter.h"
using namespace clang;
PCHGenerator::PCHGenerator(
- const Preprocessor &PP, StringRef OutputFile, StringRef isysroot,
- std::shared_ptr<PCHBuffer> Buffer,
+ const Preprocessor &PP, InMemoryModuleCache &ModuleCache,
+ StringRef OutputFile, StringRef isysroot, std::shared_ptr<PCHBuffer> Buffer,
ArrayRef<std::shared_ptr<ModuleFileExtension>> Extensions,
- bool AllowASTWithErrors, bool IncludeTimestamps)
+ bool AllowASTWithErrors, bool IncludeTimestamps,
+ bool ShouldCacheASTInMemory)
: PP(PP), OutputFile(OutputFile), isysroot(isysroot.str()),
SemaPtr(nullptr), Buffer(std::move(Buffer)), Stream(this->Buffer->Data),
- Writer(Stream, this->Buffer->Data, PP.getPCMCache(), Extensions,
+ Writer(Stream, this->Buffer->Data, ModuleCache, Extensions,
IncludeTimestamps),
- AllowASTWithErrors(AllowASTWithErrors) {
+ AllowASTWithErrors(AllowASTWithErrors),
+ ShouldCacheASTInMemory(ShouldCacheASTInMemory) {
this->Buffer->IsComplete = false;
}
@@ -62,7 +63,8 @@ void PCHGenerator::HandleTranslationUnit(ASTContext &Ctx) {
Writer.WriteAST(*SemaPtr, OutputFile, Module, isysroot,
// For serialization we are lenient if the errors were
// only warn-as-error kind.
- PP.getDiagnostics().hasUncompilableErrorOccurred());
+ PP.getDiagnostics().hasUncompilableErrorOccurred(),
+ ShouldCacheASTInMemory);
Buffer->IsComplete = true;
}
diff --git a/lib/Serialization/GlobalModuleIndex.cpp b/lib/Serialization/GlobalModuleIndex.cpp
index e7642a38924d..2db8f830c46d 100644
--- a/lib/Serialization/GlobalModuleIndex.cpp
+++ b/lib/Serialization/GlobalModuleIndex.cpp
@@ -1,9 +1,8 @@
//===--- GlobalModuleIndex.cpp - Global Module Index ------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -11,6 +10,7 @@
//
//===----------------------------------------------------------------------===//
+
#include "ASTReaderInternals.h"
#include "clang/Basic/FileManager.h"
#include "clang/Lex/HeaderSearch.h"
@@ -21,14 +21,15 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SmallString.h"
-#include "llvm/Bitcode/BitstreamReader.h"
-#include "llvm/Bitcode/BitstreamWriter.h"
+#include "llvm/Bitstream/BitstreamReader.h"
+#include "llvm/Bitstream/BitstreamWriter.h"
#include "llvm/Support/DJB.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/LockFileManager.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/OnDiskHashTable.h"
#include "llvm/Support/Path.h"
+#include "llvm/Support/TimeProfiler.h"
#include <cstdio>
using namespace clang;
using namespace serialization;
@@ -127,11 +128,21 @@ GlobalModuleIndex::GlobalModuleIndex(std::unique_ptr<llvm::MemoryBuffer> Buffer,
llvm::BitstreamCursor Cursor)
: Buffer(std::move(Buffer)), IdentifierIndex(), NumIdentifierLookups(),
NumIdentifierLookupHits() {
+ auto Fail = [&Buffer](llvm::Error &&Err) {
+ report_fatal_error("Module index '" + Buffer->getBufferIdentifier() +
+ "' failed: " + toString(std::move(Err)));
+ };
+
+ llvm::TimeTraceScope TimeScope("Module LoadIndex", StringRef(""));
// Read the global index.
bool InGlobalIndexBlock = false;
bool Done = false;
while (!Done) {
- llvm::BitstreamEntry Entry = Cursor.advance();
+ llvm::BitstreamEntry Entry;
+ if (Expected<llvm::BitstreamEntry> Res = Cursor.advance())
+ Entry = Res.get();
+ else
+ Fail(Res.takeError());
switch (Entry.Kind) {
case llvm::BitstreamEntry::Error:
@@ -155,19 +166,23 @@ GlobalModuleIndex::GlobalModuleIndex(std::unique_ptr<llvm::MemoryBuffer> Buffer,
case llvm::BitstreamEntry::SubBlock:
if (!InGlobalIndexBlock && Entry.ID == GLOBAL_INDEX_BLOCK_ID) {
- if (Cursor.EnterSubBlock(GLOBAL_INDEX_BLOCK_ID))
- return;
-
+ if (llvm::Error Err = Cursor.EnterSubBlock(GLOBAL_INDEX_BLOCK_ID))
+ Fail(std::move(Err));
InGlobalIndexBlock = true;
- } else if (Cursor.SkipBlock()) {
- return;
- }
+ } else if (llvm::Error Err = Cursor.SkipBlock())
+ Fail(std::move(Err));
continue;
}
SmallVector<uint64_t, 64> Record;
StringRef Blob;
- switch ((IndexRecordTypes)Cursor.readRecord(Entry.ID, Record, &Blob)) {
+ Expected<unsigned> MaybeIndexRecord =
+ Cursor.readRecord(Entry.ID, Record, &Blob);
+ if (!MaybeIndexRecord)
+ Fail(MaybeIndexRecord.takeError());
+ IndexRecordTypes IndexRecord =
+ static_cast<IndexRecordTypes>(MaybeIndexRecord.get());
+ switch (IndexRecord) {
case INDEX_METADATA:
// Make sure that the version matches.
if (Record.size() < 1 || Record[0] != CurrentVersion)
@@ -232,7 +247,7 @@ GlobalModuleIndex::~GlobalModuleIndex() {
delete static_cast<IdentifierIndexTable *>(IdentifierIndex);
}
-std::pair<GlobalModuleIndex *, GlobalModuleIndex::ErrorCode>
+std::pair<GlobalModuleIndex *, llvm::Error>
GlobalModuleIndex::readIndex(StringRef Path) {
// Load the index file, if it's there.
llvm::SmallString<128> IndexPath;
@@ -242,22 +257,26 @@ GlobalModuleIndex::readIndex(StringRef Path) {
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> BufferOrErr =
llvm::MemoryBuffer::getFile(IndexPath.c_str());
if (!BufferOrErr)
- return std::make_pair(nullptr, EC_NotFound);
+ return std::make_pair(nullptr,
+ llvm::errorCodeToError(BufferOrErr.getError()));
std::unique_ptr<llvm::MemoryBuffer> Buffer = std::move(BufferOrErr.get());
/// The main bitstream cursor for the main block.
llvm::BitstreamCursor Cursor(*Buffer);
// Sniff for the signature.
- if (Cursor.Read(8) != 'B' ||
- Cursor.Read(8) != 'C' ||
- Cursor.Read(8) != 'G' ||
- Cursor.Read(8) != 'I') {
- return std::make_pair(nullptr, EC_IOError);
+ for (unsigned char C : {'B', 'C', 'G', 'I'}) {
+ if (Expected<llvm::SimpleBitstreamCursor::word_t> Res = Cursor.Read(8)) {
+ if (Res.get() != C)
+ return std::make_pair(
+ nullptr, llvm::createStringError(std::errc::illegal_byte_sequence,
+ "expected signature BCGI"));
+ } else
+ return std::make_pair(nullptr, Res.takeError());
}
return std::make_pair(new GlobalModuleIndex(std::move(Buffer), Cursor),
- EC_None);
+ llvm::Error::success());
}
void
@@ -436,9 +455,7 @@ namespace {
: FileMgr(FileMgr), PCHContainerRdr(PCHContainerRdr) {}
/// Load the contents of the given module file into the builder.
- ///
- /// \returns true if an error occurred, false otherwise.
- bool loadModuleFile(const FileEntry *File);
+ llvm::Error loadModuleFile(const FileEntry *File);
/// Write the index to the given bitstream.
/// \returns true if an error occurred, false otherwise.
@@ -509,24 +526,25 @@ namespace {
};
}
-bool GlobalModuleIndexBuilder::loadModuleFile(const FileEntry *File) {
+llvm::Error GlobalModuleIndexBuilder::loadModuleFile(const FileEntry *File) {
// Open the module file.
auto Buffer = FileMgr.getBufferForFile(File, /*isVolatile=*/true);
- if (!Buffer) {
- return true;
- }
+ if (!Buffer)
+ return llvm::createStringError(Buffer.getError(),
+ "failed getting buffer for module file");
// Initialize the input stream
llvm::BitstreamCursor InStream(PCHContainerRdr.ExtractPCH(**Buffer));
// Sniff for the signature.
- if (InStream.Read(8) != 'C' ||
- InStream.Read(8) != 'P' ||
- InStream.Read(8) != 'C' ||
- InStream.Read(8) != 'H') {
- return true;
- }
+ for (unsigned char C : {'C', 'P', 'C', 'H'})
+ if (Expected<llvm::SimpleBitstreamCursor::word_t> Res = InStream.Read(8)) {
+ if (Res.get() != C)
+ return llvm::createStringError(std::errc::illegal_byte_sequence,
+ "expected signature CPCH");
+ } else
+ return Res.takeError();
// Record this module file and assign it a unique ID (if it doesn't have
// one already).
@@ -536,7 +554,11 @@ bool GlobalModuleIndexBuilder::loadModuleFile(const FileEntry *File) {
enum { Other, ControlBlock, ASTBlock, DiagnosticOptionsBlock } State = Other;
bool Done = false;
while (!Done) {
- llvm::BitstreamEntry Entry = InStream.advance();
+ Expected<llvm::BitstreamEntry> MaybeEntry = InStream.advance();
+ if (!MaybeEntry)
+ return MaybeEntry.takeError();
+ llvm::BitstreamEntry Entry = MaybeEntry.get();
+
switch (Entry.Kind) {
case llvm::BitstreamEntry::Error:
Done = true;
@@ -545,8 +567,10 @@ bool GlobalModuleIndexBuilder::loadModuleFile(const FileEntry *File) {
case llvm::BitstreamEntry::Record:
// In the 'other' state, just skip the record. We don't care.
if (State == Other) {
- InStream.skipRecord(Entry.ID);
- continue;
+ if (llvm::Expected<unsigned> Skipped = InStream.skipRecord(Entry.ID))
+ continue;
+ else
+ return Skipped.takeError();
}
// Handle potentially-interesting records below.
@@ -554,8 +578,8 @@ bool GlobalModuleIndexBuilder::loadModuleFile(const FileEntry *File) {
case llvm::BitstreamEntry::SubBlock:
if (Entry.ID == CONTROL_BLOCK_ID) {
- if (InStream.EnterSubBlock(CONTROL_BLOCK_ID))
- return true;
+ if (llvm::Error Err = InStream.EnterSubBlock(CONTROL_BLOCK_ID))
+ return Err;
// Found the control block.
State = ControlBlock;
@@ -563,8 +587,8 @@ bool GlobalModuleIndexBuilder::loadModuleFile(const FileEntry *File) {
}
if (Entry.ID == AST_BLOCK_ID) {
- if (InStream.EnterSubBlock(AST_BLOCK_ID))
- return true;
+ if (llvm::Error Err = InStream.EnterSubBlock(AST_BLOCK_ID))
+ return Err;
// Found the AST block.
State = ASTBlock;
@@ -572,16 +596,16 @@ bool GlobalModuleIndexBuilder::loadModuleFile(const FileEntry *File) {
}
if (Entry.ID == UNHASHED_CONTROL_BLOCK_ID) {
- if (InStream.EnterSubBlock(UNHASHED_CONTROL_BLOCK_ID))
- return true;
+ if (llvm::Error Err = InStream.EnterSubBlock(UNHASHED_CONTROL_BLOCK_ID))
+ return Err;
// Found the Diagnostic Options block.
State = DiagnosticOptionsBlock;
continue;
}
- if (InStream.SkipBlock())
- return true;
+ if (llvm::Error Err = InStream.SkipBlock())
+ return Err;
continue;
@@ -593,7 +617,10 @@ bool GlobalModuleIndexBuilder::loadModuleFile(const FileEntry *File) {
// Read the given record.
SmallVector<uint64_t, 64> Record;
StringRef Blob;
- unsigned Code = InStream.readRecord(Entry.ID, Record, &Blob);
+ Expected<unsigned> MaybeCode = InStream.readRecord(Entry.ID, Record, &Blob);
+ if (!MaybeCode)
+ return MaybeCode.takeError();
+ unsigned Code = MaybeCode.get();
// Handle module dependencies.
if (State == ControlBlock && Code == IMPORTS) {
@@ -631,11 +658,13 @@ bool GlobalModuleIndexBuilder::loadModuleFile(const FileEntry *File) {
// Find the imported module file.
const FileEntry *DependsOnFile
- = FileMgr.getFile(ImportedFile, /*openFile=*/false,
- /*cacheFailure=*/false);
+ = FileMgr.getFile(ImportedFile, /*OpenFile=*/false,
+ /*CacheFailure=*/false);
if (!DependsOnFile)
- return true;
+ return llvm::createStringError(std::errc::bad_file_descriptor,
+ "imported file \"%s\" not found",
+ ImportedFile.c_str());
// Save the information in ImportedModuleFileInfo so we can verify after
// loading all pcms.
@@ -680,7 +709,7 @@ bool GlobalModuleIndexBuilder::loadModuleFile(const FileEntry *File) {
// We don't care about this record.
}
- return false;
+ return llvm::Error::success();
}
namespace {
@@ -740,6 +769,7 @@ bool GlobalModuleIndexBuilder::writeIndex(llvm::BitstreamWriter &Stream) {
}
using namespace llvm;
+ llvm::TimeTraceScope TimeScope("Module WriteIndex", StringRef(""));
// Emit the file header.
Stream.Emit((unsigned)'B', 8);
@@ -817,7 +847,7 @@ bool GlobalModuleIndexBuilder::writeIndex(llvm::BitstreamWriter &Stream) {
return false;
}
-GlobalModuleIndex::ErrorCode
+llvm::Error
GlobalModuleIndex::writeIndex(FileManager &FileMgr,
const PCHContainerReader &PCHContainerRdr,
StringRef Path) {
@@ -830,7 +860,7 @@ GlobalModuleIndex::writeIndex(FileManager &FileMgr,
llvm::LockFileManager Locked(IndexPath);
switch (Locked) {
case llvm::LockFileManager::LFS_Error:
- return EC_IOError;
+ return llvm::createStringError(std::errc::io_error, "LFS error");
case llvm::LockFileManager::LFS_Owned:
// We're responsible for building the index ourselves. Do so below.
@@ -839,7 +869,8 @@ GlobalModuleIndex::writeIndex(FileManager &FileMgr,
case llvm::LockFileManager::LFS_Shared:
// Someone else is responsible for building the index. We don't care
// when they finish, so we're done.
- return EC_Building;
+ return llvm::createStringError(std::errc::device_or_resource_busy,
+ "someone else is building the index");
}
// The module index builder.
@@ -856,7 +887,8 @@ GlobalModuleIndex::writeIndex(FileManager &FileMgr,
// in the process of rebuilding a module. They'll rebuild the index
// at the end of that translation unit, so we don't have to.
if (llvm::sys::path::extension(D->path()) == ".pcm.lock")
- return EC_Building;
+ return llvm::createStringError(std::errc::device_or_resource_busy,
+ "someone else is building the index");
continue;
}
@@ -867,8 +899,8 @@ GlobalModuleIndex::writeIndex(FileManager &FileMgr,
continue;
// Load this module file.
- if (Builder.loadModuleFile(ModuleFile))
- return EC_IOError;
+ if (llvm::Error Err = Builder.loadModuleFile(ModuleFile))
+ return Err;
}
// The output buffer, into which the global index will be written.
@@ -876,7 +908,8 @@ GlobalModuleIndex::writeIndex(FileManager &FileMgr,
{
llvm::BitstreamWriter OutputStream(OutputBuffer);
if (Builder.writeIndex(OutputStream))
- return EC_IOError;
+ return llvm::createStringError(std::errc::io_error,
+ "failed writing index");
}
// Write the global index file to a temporary file.
@@ -884,31 +917,32 @@ GlobalModuleIndex::writeIndex(FileManager &FileMgr,
int TmpFD;
if (llvm::sys::fs::createUniqueFile(IndexPath + "-%%%%%%%%", TmpFD,
IndexTmpPath))
- return EC_IOError;
+ return llvm::createStringError(std::errc::io_error,
+ "failed creating unique file");
// Open the temporary global index file for output.
llvm::raw_fd_ostream Out(TmpFD, true);
if (Out.has_error())
- return EC_IOError;
+ return llvm::createStringError(Out.error(), "failed outputting to stream");
// Write the index.
Out.write(OutputBuffer.data(), OutputBuffer.size());
Out.close();
if (Out.has_error())
- return EC_IOError;
+ return llvm::createStringError(Out.error(), "failed writing to stream");
// Remove the old index file. It isn't relevant any more.
llvm::sys::fs::remove(IndexPath);
// Rename the newly-written index file to the proper name.
- if (llvm::sys::fs::rename(IndexTmpPath, IndexPath)) {
- // Rename failed; just remove the
+ if (std::error_code Err = llvm::sys::fs::rename(IndexTmpPath, IndexPath)) {
+ // Remove the file on failure, don't check whether removal succeeded.
llvm::sys::fs::remove(IndexTmpPath);
- return EC_IOError;
+ return llvm::createStringError(Err, "failed renaming file \"%s\" to \"%s\"",
+ IndexTmpPath.c_str(), IndexPath.c_str());
}
- // We're done.
- return EC_None;
+ return llvm::Error::success();
}
namespace {
diff --git a/lib/Serialization/InMemoryModuleCache.cpp b/lib/Serialization/InMemoryModuleCache.cpp
new file mode 100644
index 000000000000..d35fa2a807f4
--- /dev/null
+++ b/lib/Serialization/InMemoryModuleCache.cpp
@@ -0,0 +1,80 @@
+//===- InMemoryModuleCache.cpp - Cache for loaded memory buffers ----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Serialization/InMemoryModuleCache.h"
+#include "llvm/Support/MemoryBuffer.h"
+
+using namespace clang;
+
+InMemoryModuleCache::State
+InMemoryModuleCache::getPCMState(llvm::StringRef Filename) const {
+ auto I = PCMs.find(Filename);
+ if (I == PCMs.end())
+ return Unknown;
+ if (I->second.IsFinal)
+ return Final;
+ return I->second.Buffer ? Tentative : ToBuild;
+}
+
+llvm::MemoryBuffer &
+InMemoryModuleCache::addPCM(llvm::StringRef Filename,
+ std::unique_ptr<llvm::MemoryBuffer> Buffer) {
+ auto Insertion = PCMs.insert(std::make_pair(Filename, std::move(Buffer)));
+ assert(Insertion.second && "Already has a PCM");
+ return *Insertion.first->second.Buffer;
+}
+
+llvm::MemoryBuffer &
+InMemoryModuleCache::addBuiltPCM(llvm::StringRef Filename,
+ std::unique_ptr<llvm::MemoryBuffer> Buffer) {
+ auto &PCM = PCMs[Filename];
+ assert(!PCM.IsFinal && "Trying to override finalized PCM?");
+ assert(!PCM.Buffer && "Trying to override tentative PCM?");
+ PCM.Buffer = std::move(Buffer);
+ PCM.IsFinal = true;
+ return *PCM.Buffer;
+}
+
+llvm::MemoryBuffer *
+InMemoryModuleCache::lookupPCM(llvm::StringRef Filename) const {
+ auto I = PCMs.find(Filename);
+ if (I == PCMs.end())
+ return nullptr;
+ return I->second.Buffer.get();
+}
+
+bool InMemoryModuleCache::isPCMFinal(llvm::StringRef Filename) const {
+ return getPCMState(Filename) == Final;
+}
+
+bool InMemoryModuleCache::shouldBuildPCM(llvm::StringRef Filename) const {
+ return getPCMState(Filename) == ToBuild;
+}
+
+bool InMemoryModuleCache::tryToDropPCM(llvm::StringRef Filename) {
+ auto I = PCMs.find(Filename);
+ assert(I != PCMs.end() && "PCM to remove is unknown...");
+
+ auto &PCM = I->second;
+ assert(PCM.Buffer && "PCM to remove is scheduled to be built...");
+
+ if (PCM.IsFinal)
+ return true;
+
+ PCM.Buffer.reset();
+ return false;
+}
+
+void InMemoryModuleCache::finalizePCM(llvm::StringRef Filename) {
+ auto I = PCMs.find(Filename);
+ assert(I != PCMs.end() && "PCM to finalize is unknown...");
+
+ auto &PCM = I->second;
+ assert(PCM.Buffer && "Trying to finalize a dropped PCM...");
+ PCM.IsFinal = true;
+}
diff --git a/lib/Serialization/Module.cpp b/lib/Serialization/Module.cpp
index 580e46e4f240..2b6c9211beaf 100644
--- a/lib/Serialization/Module.cpp
+++ b/lib/Serialization/Module.cpp
@@ -1,9 +1,8 @@
//===- Module.cpp - Module description ------------------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Serialization/ModuleFileExtension.cpp b/lib/Serialization/ModuleFileExtension.cpp
index 5bd0a1ce660b..e1ae8a494ab1 100644
--- a/lib/Serialization/ModuleFileExtension.cpp
+++ b/lib/Serialization/ModuleFileExtension.cpp
@@ -1,9 +1,8 @@
//===-- ModuleFileExtension.cpp - Module File Extensions ------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "clang/Serialization/ModuleFileExtension.h"
diff --git a/lib/Serialization/ModuleManager.cpp b/lib/Serialization/ModuleManager.cpp
index 54e0c08c5bc9..6ae0c4f57551 100644
--- a/lib/Serialization/ModuleManager.cpp
+++ b/lib/Serialization/ModuleManager.cpp
@@ -1,9 +1,8 @@
//===- ModuleManager.cpp - Module Manager ---------------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -15,10 +14,10 @@
#include "clang/Serialization/ModuleManager.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/LLVM.h"
-#include "clang/Basic/MemoryBufferCache.h"
#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/ModuleMap.h"
#include "clang/Serialization/GlobalModuleIndex.h"
+#include "clang/Serialization/InMemoryModuleCache.h"
#include "clang/Serialization/Module.h"
#include "clang/Serialization/PCHContainerOperations.h"
#include "llvm/ADT/STLExtras.h"
@@ -43,8 +42,8 @@ using namespace clang;
using namespace serialization;
ModuleFile *ModuleManager::lookupByFileName(StringRef Name) const {
- const FileEntry *Entry = FileMgr.getFile(Name, /*openFile=*/false,
- /*cacheFailure=*/false);
+ const FileEntry *Entry = FileMgr.getFile(Name, /*OpenFile=*/false,
+ /*CacheFailure=*/false);
if (Entry)
return lookup(Entry);
@@ -69,8 +68,8 @@ ModuleFile *ModuleManager::lookup(const FileEntry *File) const {
std::unique_ptr<llvm::MemoryBuffer>
ModuleManager::lookupBuffer(StringRef Name) {
- const FileEntry *Entry = FileMgr.getFile(Name, /*openFile=*/false,
- /*cacheFailure=*/false);
+ const FileEntry *Entry = FileMgr.getFile(Name, /*OpenFile=*/false,
+ /*CacheFailure=*/false);
return std::move(InMemoryBuffers[Entry]);
}
@@ -119,6 +118,8 @@ ModuleManager::addModule(StringRef FileName, ModuleKind Type,
// contents, but we can't check that.)
ExpectedModTime = 0;
}
+ // Note: ExpectedSize and ExpectedModTime will be 0 for MK_ImplicitModule
+ // when using an ASTFileSignature.
if (lookupModuleFile(FileName, ExpectedSize, ExpectedModTime, Entry)) {
ErrorStr = "module file out of date";
return OutOfDate;
@@ -160,15 +161,21 @@ ModuleManager::addModule(StringRef FileName, ModuleKind Type,
// Load the contents of the module
if (std::unique_ptr<llvm::MemoryBuffer> Buffer = lookupBuffer(FileName)) {
// The buffer was already provided for us.
- NewModule->Buffer = &PCMCache->addBuffer(FileName, std::move(Buffer));
+ NewModule->Buffer = &ModuleCache->addBuiltPCM(FileName, std::move(Buffer));
// Since the cached buffer is reused, it is safe to close the file
// descriptor that was opened while stat()ing the PCM in
// lookupModuleFile() above, it won't be needed any longer.
Entry->closeFile();
- } else if (llvm::MemoryBuffer *Buffer = PCMCache->lookupBuffer(FileName)) {
+ } else if (llvm::MemoryBuffer *Buffer =
+ getModuleCache().lookupPCM(FileName)) {
NewModule->Buffer = Buffer;
// As above, the file descriptor is no longer needed.
Entry->closeFile();
+ } else if (getModuleCache().shouldBuildPCM(FileName)) {
+ // Report that the module is out of date, since we tried (and failed) to
+ // import it earlier.
+ Entry->closeFile();
+ return OutOfDate;
} else {
// Open the AST file.
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> Buf((std::error_code()));
@@ -177,7 +184,7 @@ ModuleManager::addModule(StringRef FileName, ModuleKind Type,
} else {
// Get a buffer of the file and close the file descriptor when done.
Buf = FileMgr.getBufferForFile(NewModule->File,
- /*IsVolatile=*/false,
+ /*isVolatile=*/false,
/*ShouldClose=*/true);
}
@@ -186,7 +193,7 @@ ModuleManager::addModule(StringRef FileName, ModuleKind Type,
return Missing;
}
- NewModule->Buffer = &PCMCache->addBuffer(FileName, std::move(*Buf));
+ NewModule->Buffer = &getModuleCache().addPCM(FileName, std::move(*Buf));
}
// Initialize the stream.
@@ -198,7 +205,7 @@ ModuleManager::addModule(StringRef FileName, ModuleKind Type,
ExpectedSignature, ErrorStr)) {
// Try to remove the buffer. If it can't be removed, then it was already
// validated by this process.
- if (!PCMCache->tryToRemoveBuffer(NewModule->FileName))
+ if (!getModuleCache().tryToDropPCM(NewModule->FileName))
FileMgr.invalidateCache(NewModule->File);
return OutOfDate;
}
@@ -247,8 +254,7 @@ void ModuleManager::removeModules(
// Remove the modules from the PCH chain.
for (auto I = First; I != Last; ++I) {
if (!I->isModule()) {
- PCHChain.erase(std::find(PCHChain.begin(), PCHChain.end(), &*I),
- PCHChain.end());
+ PCHChain.erase(llvm::find(PCHChain, &*I), PCHChain.end());
break;
}
}
@@ -263,17 +269,6 @@ void ModuleManager::removeModules(
mod->setASTFile(nullptr);
}
}
-
- // Files that didn't make it through ReadASTCore successfully will be
- // rebuilt (or there was an error). Invalidate them so that we can load the
- // new files that will be renamed over the old ones.
- //
- // The PCMCache tracks whether the module was successfully loaded in another
- // thread/context; in that case, it won't need to be rebuilt (and we can't
- // safely invalidate it anyway).
- if (LoadedSuccessfully.count(&*victim) == 0 &&
- !PCMCache->tryToRemoveBuffer(victim->FileName))
- FileMgr.invalidateCache(victim->File);
}
// Delete the modules.
@@ -328,11 +323,12 @@ void ModuleManager::moduleFileAccepted(ModuleFile *MF) {
ModulesInCommonWithGlobalIndex.push_back(MF);
}
-ModuleManager::ModuleManager(FileManager &FileMgr, MemoryBufferCache &PCMCache,
+ModuleManager::ModuleManager(FileManager &FileMgr,
+ InMemoryModuleCache &ModuleCache,
const PCHContainerReader &PCHContainerRdr,
- const HeaderSearch& HeaderSearchInfo)
- : FileMgr(FileMgr), PCMCache(&PCMCache), PCHContainerRdr(PCHContainerRdr),
- HeaderSearchInfo(HeaderSearchInfo) {}
+ const HeaderSearch &HeaderSearchInfo)
+ : FileMgr(FileMgr), ModuleCache(&ModuleCache),
+ PCHContainerRdr(PCHContainerRdr), HeaderSearchInfo(HeaderSearchInfo) {}
ModuleManager::~ModuleManager() { delete FirstVisitState; }
@@ -451,7 +447,7 @@ bool ModuleManager::lookupModuleFile(StringRef FileName,
// Open the file immediately to ensure there is no race between stat'ing and
// opening the file.
- File = FileMgr.getFile(FileName, /*openFile=*/true, /*cacheFailure=*/false);
+ File = FileMgr.getFile(FileName, /*OpenFile=*/true, /*CacheFailure=*/false);
if (!File)
return false;
diff --git a/lib/Serialization/MultiOnDiskHashTable.h b/lib/Serialization/MultiOnDiskHashTable.h
index ded7cd146449..adc97d57e0ac 100644
--- a/lib/Serialization/MultiOnDiskHashTable.h
+++ b/lib/Serialization/MultiOnDiskHashTable.h
@@ -1,9 +1,8 @@
//===- MultiOnDiskHashTable.h - Merged set of hash tables -------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Serialization/PCHContainerOperations.cpp b/lib/Serialization/PCHContainerOperations.cpp
index fbc613efeb63..00063d64f3f2 100644
--- a/lib/Serialization/PCHContainerOperations.cpp
+++ b/lib/Serialization/PCHContainerOperations.cpp
@@ -1,9 +1,8 @@
//=== Serialization/PCHContainerOperations.cpp - PCH Containers -*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -14,7 +13,7 @@
#include "clang/Serialization/PCHContainerOperations.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/Lex/ModuleLoader.h"
-#include "llvm/Bitcode/BitstreamReader.h"
+#include "llvm/Bitstream/BitstreamReader.h"
#include "llvm/Support/raw_ostream.h"
#include <utility>
diff --git a/lib/StaticAnalyzer/Checkers/AllocationState.h b/lib/StaticAnalyzer/Checkers/AllocationState.h
index c8193f77f928..25de37003319 100644
--- a/lib/StaticAnalyzer/Checkers/AllocationState.h
+++ b/lib/StaticAnalyzer/Checkers/AllocationState.h
@@ -1,9 +1,8 @@
//===--- AllocationState.h ------------------------------------- *- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/StaticAnalyzer/Checkers/AnalysisOrderChecker.cpp b/lib/StaticAnalyzer/Checkers/AnalysisOrderChecker.cpp
index b5d0f6620a1d..d0def6918932 100644
--- a/lib/StaticAnalyzer/Checkers/AnalysisOrderChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/AnalysisOrderChecker.cpp
@@ -1,9 +1,8 @@
//===- AnalysisOrderChecker - Print callbacks called ------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -45,8 +44,8 @@ class AnalysisOrderChecker
check::LiveSymbols> {
bool isCallbackEnabled(AnalyzerOptions &Opts, StringRef CallbackName) const {
- return Opts.getCheckerBooleanOption("*", false, this) ||
- Opts.getCheckerBooleanOption(CallbackName, false, this);
+ return Opts.getCheckerBooleanOption(this, "*") ||
+ Opts.getCheckerBooleanOption(this, CallbackName);
}
bool isCallbackEnabled(CheckerContext &C, StringRef CallbackName) const {
@@ -176,3 +175,7 @@ public:
void ento::registerAnalysisOrderChecker(CheckerManager &mgr) {
mgr.registerChecker<AnalysisOrderChecker>();
}
+
+bool ento::shouldRegisterAnalysisOrderChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp b/lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp
index 5e01012401b2..20f3008b4a4b 100644
--- a/lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp
@@ -1,9 +1,8 @@
//==--AnalyzerStatsChecker.cpp - Analyzer visitation statistics --*- C++ -*-==//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// This file reports various statistics about analyzer visitation.
@@ -140,3 +139,7 @@ void AnalyzerStatsChecker::checkEndAnalysis(ExplodedGraph &G,
void ento::registerAnalyzerStatsChecker(CheckerManager &mgr) {
mgr.registerChecker<AnalyzerStatsChecker>();
}
+
+bool ento::shouldRegisterAnalyzerStatsChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp b/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp
index 20f3092fdba4..58017acb4a24 100644
--- a/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp
@@ -1,9 +1,8 @@
//== ArrayBoundChecker.cpp ------------------------------*- C++ -*--==//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -91,3 +90,7 @@ void ArrayBoundChecker::checkLocation(SVal l, bool isLoad, const Stmt* LoadS,
void ento::registerArrayBoundChecker(CheckerManager &mgr) {
mgr.registerChecker<ArrayBoundChecker>();
}
+
+bool ento::shouldRegisterArrayBoundChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp b/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp
index 26887be9f258..3bf8a1836b19 100644
--- a/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp
+++ b/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp
@@ -1,9 +1,8 @@
//== ArrayBoundCheckerV2.cpp ------------------------------------*- C++ -*--==//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -12,6 +11,7 @@
//
//===----------------------------------------------------------------------===//
+#include "Taint.h"
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/CharUnits.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
@@ -25,6 +25,7 @@
using namespace clang;
using namespace ento;
+using namespace taint;
namespace {
class ArrayBoundCheckerV2 :
@@ -205,7 +206,7 @@ void ArrayBoundCheckerV2::checkLocation(SVal location, bool isLoad,
// If we are under constrained and the index variables are tainted, report.
if (state_exceedsUpperBound && state_withinUpperBound) {
SVal ByteOffset = rawOffset.getByteOffset();
- if (state->isTainted(ByteOffset)) {
+ if (isTainted(state, ByteOffset)) {
reportOOB(checkerContext, state_exceedsUpperBound, OOB_Tainted,
llvm::make_unique<TaintBugVisitor>(ByteOffset));
return;
@@ -354,3 +355,7 @@ RegionRawOffsetV2 RegionRawOffsetV2::computeOffset(ProgramStateRef state,
void ento::registerArrayBoundCheckerV2(CheckerManager &mgr) {
mgr.registerChecker<ArrayBoundCheckerV2>();
}
+
+bool ento::shouldRegisterArrayBoundCheckerV2(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp b/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
index 577b5349f62e..e3fb4c3eb523 100644
--- a/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
+++ b/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
@@ -1,9 +1,8 @@
//== BasicObjCFoundationChecks.cpp - Simple Apple-Foundation checks -*- C++ -*--
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -1243,27 +1242,54 @@ void ento::registerNilArgChecker(CheckerManager &mgr) {
mgr.registerChecker<NilArgChecker>();
}
+bool ento::shouldRegisterNilArgChecker(const LangOptions &LO) {
+ return true;
+}
+
void ento::registerCFNumberChecker(CheckerManager &mgr) {
mgr.registerChecker<CFNumberChecker>();
}
+bool ento::shouldRegisterCFNumberChecker(const LangOptions &LO) {
+ return true;
+}
+
void ento::registerCFRetainReleaseChecker(CheckerManager &mgr) {
mgr.registerChecker<CFRetainReleaseChecker>();
}
+bool ento::shouldRegisterCFRetainReleaseChecker(const LangOptions &LO) {
+ return true;
+}
+
void ento::registerClassReleaseChecker(CheckerManager &mgr) {
mgr.registerChecker<ClassReleaseChecker>();
}
+bool ento::shouldRegisterClassReleaseChecker(const LangOptions &LO) {
+ return true;
+}
+
void ento::registerVariadicMethodTypeChecker(CheckerManager &mgr) {
mgr.registerChecker<VariadicMethodTypeChecker>();
}
+bool ento::shouldRegisterVariadicMethodTypeChecker(const LangOptions &LO) {
+ return true;
+}
+
void ento::registerObjCLoopChecker(CheckerManager &mgr) {
mgr.registerChecker<ObjCLoopChecker>();
}
-void
-ento::registerObjCNonNilReturnValueChecker(CheckerManager &mgr) {
+bool ento::shouldRegisterObjCLoopChecker(const LangOptions &LO) {
+ return true;
+}
+
+void ento::registerObjCNonNilReturnValueChecker(CheckerManager &mgr) {
mgr.registerChecker<ObjCNonNilReturnValueChecker>();
}
+
+bool ento::shouldRegisterObjCNonNilReturnValueChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/BlockInCriticalSectionChecker.cpp b/lib/StaticAnalyzer/Checkers/BlockInCriticalSectionChecker.cpp
index 00d08b371f37..009160fc9815 100644
--- a/lib/StaticAnalyzer/Checkers/BlockInCriticalSectionChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/BlockInCriticalSectionChecker.cpp
@@ -1,9 +1,8 @@
//===-- BlockInCriticalSectionChecker.cpp -----------------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -183,3 +182,7 @@ void BlockInCriticalSectionChecker::reportBlockInCritSection(
void ento::registerBlockInCriticalSectionChecker(CheckerManager &mgr) {
mgr.registerChecker<BlockInCriticalSectionChecker>();
}
+
+bool ento::shouldRegisterBlockInCriticalSectionChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp b/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp
index 3008eddd397e..de8763c1b7b5 100644
--- a/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp
@@ -1,9 +1,8 @@
//== BoolAssignmentChecker.cpp - Boolean assignment checker -----*- C++ -*--==//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -155,3 +154,7 @@ void BoolAssignmentChecker::checkBind(SVal loc, SVal val, const Stmt *S,
void ento::registerBoolAssignmentChecker(CheckerManager &mgr) {
mgr.registerChecker<BoolAssignmentChecker>();
}
+
+bool ento::shouldRegisterBoolAssignmentChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp b/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp
index f98027942e18..10594e331cbe 100644
--- a/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp
@@ -1,9 +1,8 @@
//=== BuiltinFunctionChecker.cpp --------------------------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -15,6 +14,7 @@
#include "clang/Basic/Builtins.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
using namespace clang;
@@ -24,30 +24,32 @@ namespace {
class BuiltinFunctionChecker : public Checker<eval::Call> {
public:
- bool evalCall(const CallExpr *CE, CheckerContext &C) const;
+ bool evalCall(const CallEvent &Call, CheckerContext &C) const;
};
}
-bool BuiltinFunctionChecker::evalCall(const CallExpr *CE,
+bool BuiltinFunctionChecker::evalCall(const CallEvent &Call,
CheckerContext &C) const {
ProgramStateRef state = C.getState();
- const FunctionDecl *FD = C.getCalleeDecl(CE);
- const LocationContext *LCtx = C.getLocationContext();
+ const auto *FD = dyn_cast_or_null<FunctionDecl>(Call.getDecl());
if (!FD)
return false;
+ const LocationContext *LCtx = C.getLocationContext();
+ const Expr *CE = Call.getOriginExpr();
+
switch (FD->getBuiltinID()) {
default:
return false;
case Builtin::BI__builtin_assume: {
- assert (CE->arg_begin() != CE->arg_end());
- SVal ArgSVal = C.getSVal(CE->getArg(0));
- if (ArgSVal.isUndef())
+ assert (Call.getNumArgs() > 0);
+ SVal Arg = Call.getArgSVal(0);
+ if (Arg.isUndef())
return true; // Return true to model purity.
- state = state->assume(ArgSVal.castAs<DefinedOrUnknownSVal>(), true);
+ state = state->assume(Arg.castAs<DefinedOrUnknownSVal>(), true);
// FIXME: do we want to warn here? Not right now. The most reports might
// come from infeasible paths, thus being false positives.
if (!state) {
@@ -67,9 +69,9 @@ bool BuiltinFunctionChecker::evalCall(const CallExpr *CE,
// __builtin_assume_aligned, just return the value of the subexpression.
// __builtin_addressof is going from a reference to a pointer, but those
// are represented the same way in the analyzer.
- assert (CE->arg_begin() != CE->arg_end());
- SVal X = C.getSVal(*(CE->arg_begin()));
- C.addTransition(state->BindExpr(CE, LCtx, X));
+ assert (Call.getNumArgs() > 0);
+ SVal Arg = Call.getArgSVal(0);
+ C.addTransition(state->BindExpr(CE, LCtx, Arg));
return true;
}
@@ -83,12 +85,14 @@ bool BuiltinFunctionChecker::evalCall(const CallExpr *CE,
// Set the extent of the region in bytes. This enables us to use the
// SVal of the argument directly. If we save the extent in bits, we
// cannot represent values like symbol*8.
- auto Size = C.getSVal(*(CE->arg_begin())).castAs<DefinedOrUnknownSVal>();
+ auto Size = Call.getArgSVal(0);
+ if (Size.isUndef())
+ return true; // Return true to model purity.
SValBuilder& svalBuilder = C.getSValBuilder();
DefinedOrUnknownSVal Extent = R->getExtent(svalBuilder);
DefinedOrUnknownSVal extentMatchesSizeArg =
- svalBuilder.evalEQ(state, Extent, Size);
+ svalBuilder.evalEQ(state, Extent, Size.castAs<DefinedOrUnknownSVal>());
state = state->assume(extentMatchesSizeArg, true);
assert(state && "The region should not have any previous constraints");
@@ -96,21 +100,30 @@ bool BuiltinFunctionChecker::evalCall(const CallExpr *CE,
return true;
}
+ case Builtin::BI__builtin_dynamic_object_size:
case Builtin::BI__builtin_object_size:
case Builtin::BI__builtin_constant_p: {
// This must be resolvable at compile time, so we defer to the constant
// evaluator for a value.
+ SValBuilder &SVB = C.getSValBuilder();
SVal V = UnknownVal();
Expr::EvalResult EVResult;
if (CE->EvaluateAsInt(EVResult, C.getASTContext(), Expr::SE_NoSideEffects)) {
// Make sure the result has the correct type.
llvm::APSInt Result = EVResult.Val.getInt();
- SValBuilder &SVB = C.getSValBuilder();
BasicValueFactory &BVF = SVB.getBasicValueFactory();
BVF.getAPSIntType(CE->getType()).apply(Result);
V = SVB.makeIntVal(Result);
}
+ if (FD->getBuiltinID() == Builtin::BI__builtin_constant_p) {
+ // If we didn't manage to figure out if the value is constant or not,
+ // it is safe to assume that it's not constant and unsafe to assume
+ // that it's constant.
+ if (V.isUnknown())
+ V = SVB.makeIntVal(0, CE->getType());
+ }
+
C.addTransition(state->BindExpr(CE, LCtx, V));
return true;
}
@@ -120,3 +133,7 @@ bool BuiltinFunctionChecker::evalCall(const CallExpr *CE,
void ento::registerBuiltinFunctionChecker(CheckerManager &mgr) {
mgr.registerChecker<BuiltinFunctionChecker>();
}
+
+bool ento::shouldRegisterBuiltinFunctionChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/CStringChecker.cpp b/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
index 8bffada69b9b..44f4530781a8 100644
--- a/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
@@ -1,9 +1,8 @@
//= CStringChecker.cpp - Checks calls to C string functions --------*- C++ -*-//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -18,6 +17,7 @@
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
#include "llvm/ADT/STLExtras.h"
@@ -58,7 +58,7 @@ public:
static void *getTag() { static int tag; return &tag; }
- bool evalCall(const CallExpr *CE, CheckerContext &C) const;
+ bool evalCall(const CallEvent &Call, CheckerContext &C) const;
void checkPreStmt(const DeclStmt *DS, CheckerContext &C) const;
void checkLiveSymbols(ProgramStateRef state, SymbolReaper &SR) const;
void checkDeadSymbols(SymbolReaper &SR, CheckerContext &C) const;
@@ -73,7 +73,38 @@ public:
typedef void (CStringChecker::*FnCheck)(CheckerContext &,
const CallExpr *) const;
+ CallDescriptionMap<FnCheck> Callbacks = {
+ {{CDF_MaybeBuiltin, "memcpy", 3}, &CStringChecker::evalMemcpy},
+ {{CDF_MaybeBuiltin, "mempcpy", 3}, &CStringChecker::evalMempcpy},
+ {{CDF_MaybeBuiltin, "memcmp", 3}, &CStringChecker::evalMemcmp},
+ {{CDF_MaybeBuiltin, "memmove", 3}, &CStringChecker::evalMemmove},
+ {{CDF_MaybeBuiltin, "memset", 3}, &CStringChecker::evalMemset},
+ {{CDF_MaybeBuiltin, "explicit_memset", 3}, &CStringChecker::evalMemset},
+ {{CDF_MaybeBuiltin, "strcpy", 2}, &CStringChecker::evalStrcpy},
+ {{CDF_MaybeBuiltin, "strncpy", 3}, &CStringChecker::evalStrncpy},
+ {{CDF_MaybeBuiltin, "stpcpy", 2}, &CStringChecker::evalStpcpy},
+ {{CDF_MaybeBuiltin, "strlcpy", 3}, &CStringChecker::evalStrlcpy},
+ {{CDF_MaybeBuiltin, "strcat", 2}, &CStringChecker::evalStrcat},
+ {{CDF_MaybeBuiltin, "strncat", 3}, &CStringChecker::evalStrncat},
+ {{CDF_MaybeBuiltin, "strlcat", 3}, &CStringChecker::evalStrlcat},
+ {{CDF_MaybeBuiltin, "strlen", 1}, &CStringChecker::evalstrLength},
+ {{CDF_MaybeBuiltin, "strnlen", 2}, &CStringChecker::evalstrnLength},
+ {{CDF_MaybeBuiltin, "strcmp", 2}, &CStringChecker::evalStrcmp},
+ {{CDF_MaybeBuiltin, "strncmp", 3}, &CStringChecker::evalStrncmp},
+ {{CDF_MaybeBuiltin, "strcasecmp", 2}, &CStringChecker::evalStrcasecmp},
+ {{CDF_MaybeBuiltin, "strncasecmp", 3}, &CStringChecker::evalStrncasecmp},
+ {{CDF_MaybeBuiltin, "strsep", 2}, &CStringChecker::evalStrsep},
+ {{CDF_MaybeBuiltin, "bcopy", 3}, &CStringChecker::evalBcopy},
+ {{CDF_MaybeBuiltin, "bcmp", 3}, &CStringChecker::evalMemcmp},
+ {{CDF_MaybeBuiltin, "bzero", 2}, &CStringChecker::evalBzero},
+ {{CDF_MaybeBuiltin, "explicit_bzero", 2}, &CStringChecker::evalBzero},
+ };
+
+ // These require a bit of special handling.
+ CallDescription StdCopy{{"std", "copy"}, 3},
+ StdCopyBackward{{"std", "copy_backward"}, 3};
+ FnCheck identifyCall(const CallEvent &Call, CheckerContext &C) const;
void evalMemcpy(CheckerContext &C, const CallExpr *CE) const;
void evalMempcpy(CheckerContext &C, const CallExpr *CE) const;
void evalMemmove(CheckerContext &C, const CallExpr *CE) const;
@@ -1201,9 +1232,6 @@ void CStringChecker::evalCopyCommon(CheckerContext &C,
void CStringChecker::evalMemcpy(CheckerContext &C, const CallExpr *CE) const {
- if (CE->getNumArgs() < 3)
- return;
-
// void *memcpy(void *restrict dst, const void *restrict src, size_t n);
// The return value is the address of the destination buffer.
const Expr *Dest = CE->getArg(0);
@@ -1213,9 +1241,6 @@ void CStringChecker::evalMemcpy(CheckerContext &C, const CallExpr *CE) const {
}
void CStringChecker::evalMempcpy(CheckerContext &C, const CallExpr *CE) const {
- if (CE->getNumArgs() < 3)
- return;
-
// void *mempcpy(void *restrict dst, const void *restrict src, size_t n);
// The return value is a pointer to the byte following the last written byte.
const Expr *Dest = CE->getArg(0);
@@ -1225,9 +1250,6 @@ void CStringChecker::evalMempcpy(CheckerContext &C, const CallExpr *CE) const {
}
void CStringChecker::evalMemmove(CheckerContext &C, const CallExpr *CE) const {
- if (CE->getNumArgs() < 3)
- return;
-
// void *memmove(void *dst, const void *src, size_t n);
// The return value is the address of the destination buffer.
const Expr *Dest = CE->getArg(0);
@@ -1237,18 +1259,12 @@ void CStringChecker::evalMemmove(CheckerContext &C, const CallExpr *CE) const {
}
void CStringChecker::evalBcopy(CheckerContext &C, const CallExpr *CE) const {
- if (CE->getNumArgs() < 3)
- return;
-
// void bcopy(const void *src, void *dst, size_t n);
evalCopyCommon(C, CE, C.getState(),
CE->getArg(2), CE->getArg(1), CE->getArg(0));
}
void CStringChecker::evalMemcmp(CheckerContext &C, const CallExpr *CE) const {
- if (CE->getNumArgs() < 3)
- return;
-
// int memcmp(const void *s1, const void *s2, size_t n);
CurrentFunctionDescription = "memory comparison function";
@@ -1323,18 +1339,12 @@ void CStringChecker::evalMemcmp(CheckerContext &C, const CallExpr *CE) const {
void CStringChecker::evalstrLength(CheckerContext &C,
const CallExpr *CE) const {
- if (CE->getNumArgs() < 1)
- return;
-
// size_t strlen(const char *s);
evalstrLengthCommon(C, CE, /* IsStrnlen = */ false);
}
void CStringChecker::evalstrnLength(CheckerContext &C,
const CallExpr *CE) const {
- if (CE->getNumArgs() < 2)
- return;
-
// size_t strnlen(const char *s, size_t maxlen);
evalstrLengthCommon(C, CE, /* IsStrnlen = */ true);
}
@@ -1459,9 +1469,6 @@ void CStringChecker::evalstrLengthCommon(CheckerContext &C, const CallExpr *CE,
}
void CStringChecker::evalStrcpy(CheckerContext &C, const CallExpr *CE) const {
- if (CE->getNumArgs() < 2)
- return;
-
// char *strcpy(char *restrict dst, const char *restrict src);
evalStrcpyCommon(C, CE,
/* returnEnd = */ false,
@@ -1470,9 +1477,6 @@ void CStringChecker::evalStrcpy(CheckerContext &C, const CallExpr *CE) const {
}
void CStringChecker::evalStrncpy(CheckerContext &C, const CallExpr *CE) const {
- if (CE->getNumArgs() < 3)
- return;
-
// char *strncpy(char *restrict dst, const char *restrict src, size_t n);
evalStrcpyCommon(C, CE,
/* returnEnd = */ false,
@@ -1481,9 +1485,6 @@ void CStringChecker::evalStrncpy(CheckerContext &C, const CallExpr *CE) const {
}
void CStringChecker::evalStpcpy(CheckerContext &C, const CallExpr *CE) const {
- if (CE->getNumArgs() < 2)
- return;
-
// char *stpcpy(char *restrict dst, const char *restrict src);
evalStrcpyCommon(C, CE,
/* returnEnd = */ true,
@@ -1492,9 +1493,6 @@ void CStringChecker::evalStpcpy(CheckerContext &C, const CallExpr *CE) const {
}
void CStringChecker::evalStrlcpy(CheckerContext &C, const CallExpr *CE) const {
- if (CE->getNumArgs() < 3)
- return;
-
// char *strlcpy(char *dst, const char *src, size_t n);
evalStrcpyCommon(C, CE,
/* returnEnd = */ true,
@@ -1504,9 +1502,6 @@ void CStringChecker::evalStrlcpy(CheckerContext &C, const CallExpr *CE) const {
}
void CStringChecker::evalStrcat(CheckerContext &C, const CallExpr *CE) const {
- if (CE->getNumArgs() < 2)
- return;
-
//char *strcat(char *restrict s1, const char *restrict s2);
evalStrcpyCommon(C, CE,
/* returnEnd = */ false,
@@ -1515,9 +1510,6 @@ void CStringChecker::evalStrcat(CheckerContext &C, const CallExpr *CE) const {
}
void CStringChecker::evalStrncat(CheckerContext &C, const CallExpr *CE) const {
- if (CE->getNumArgs() < 3)
- return;
-
//char *strncat(char *restrict s1, const char *restrict s2, size_t n);
evalStrcpyCommon(C, CE,
/* returnEnd = */ false,
@@ -1526,8 +1518,9 @@ void CStringChecker::evalStrncat(CheckerContext &C, const CallExpr *CE) const {
}
void CStringChecker::evalStrlcat(CheckerContext &C, const CallExpr *CE) const {
- if (CE->getNumArgs() < 3)
- return;
+ // FIXME: strlcat() uses a different rule for bound checking, i.e. 'n' means
+ // a different thing as compared to strncat(). This currently causes
+ // false positives in the alpha string bound checker.
//char *strlcat(char *s1, const char *s2, size_t n);
evalStrcpyCommon(C, CE,
@@ -1881,35 +1874,23 @@ void CStringChecker::evalStrcpyCommon(CheckerContext &C, const CallExpr *CE,
}
void CStringChecker::evalStrcmp(CheckerContext &C, const CallExpr *CE) const {
- if (CE->getNumArgs() < 2)
- return;
-
//int strcmp(const char *s1, const char *s2);
evalStrcmpCommon(C, CE, /* isBounded = */ false, /* ignoreCase = */ false);
}
void CStringChecker::evalStrncmp(CheckerContext &C, const CallExpr *CE) const {
- if (CE->getNumArgs() < 3)
- return;
-
//int strncmp(const char *s1, const char *s2, size_t n);
evalStrcmpCommon(C, CE, /* isBounded = */ true, /* ignoreCase = */ false);
}
void CStringChecker::evalStrcasecmp(CheckerContext &C,
const CallExpr *CE) const {
- if (CE->getNumArgs() < 2)
- return;
-
//int strcasecmp(const char *s1, const char *s2);
evalStrcmpCommon(C, CE, /* isBounded = */ false, /* ignoreCase = */ true);
}
void CStringChecker::evalStrncasecmp(CheckerContext &C,
const CallExpr *CE) const {
- if (CE->getNumArgs() < 3)
- return;
-
//int strncasecmp(const char *s1, const char *s2, size_t n);
evalStrcmpCommon(C, CE, /* isBounded = */ true, /* ignoreCase = */ true);
}
@@ -2043,9 +2024,6 @@ void CStringChecker::evalStrcmpCommon(CheckerContext &C, const CallExpr *CE,
void CStringChecker::evalStrsep(CheckerContext &C, const CallExpr *CE) const {
//char *strsep(char **stringp, const char *delim);
- if (CE->getNumArgs() < 2)
- return;
-
// Sanity: does the search string parameter match the return type?
const Expr *SearchStrPtr = CE->getArg(0);
QualType CharPtrTy = SearchStrPtr->getType()->getPointeeType();
@@ -2114,7 +2092,7 @@ void CStringChecker::evalStdCopyBackward(CheckerContext &C,
void CStringChecker::evalStdCopyCommon(CheckerContext &C,
const CallExpr *CE) const {
- if (CE->getNumArgs() < 3)
+ if (!CE->getArg(2)->getType()->isPointerType())
return;
ProgramStateRef State = C.getState();
@@ -2141,9 +2119,6 @@ void CStringChecker::evalStdCopyCommon(CheckerContext &C,
}
void CStringChecker::evalMemset(CheckerContext &C, const CallExpr *CE) const {
- if (CE->getNumArgs() != 3)
- return;
-
CurrentFunctionDescription = "memory set function";
const Expr *Mem = CE->getArg(0);
@@ -2192,9 +2167,6 @@ void CStringChecker::evalMemset(CheckerContext &C, const CallExpr *CE) const {
}
void CStringChecker::evalBzero(CheckerContext &C, const CallExpr *CE) const {
- if (CE->getNumArgs() != 2)
- return;
-
CurrentFunctionDescription = "memory clearance function";
const Expr *Mem = CE->getArg(0);
@@ -2237,110 +2209,53 @@ void CStringChecker::evalBzero(CheckerContext &C, const CallExpr *CE) const {
C.addTransition(State);
}
-static bool isCPPStdLibraryFunction(const FunctionDecl *FD, StringRef Name) {
- IdentifierInfo *II = FD->getIdentifier();
- if (!II)
- return false;
-
- if (!AnalysisDeclContext::isInStdNamespace(FD))
- return false;
-
- if (II->getName().equals(Name))
- return true;
-
- return false;
-}
//===----------------------------------------------------------------------===//
// The driver method, and other Checker callbacks.
//===----------------------------------------------------------------------===//
-static CStringChecker::FnCheck identifyCall(const CallExpr *CE,
- CheckerContext &C) {
- const FunctionDecl *FDecl = C.getCalleeDecl(CE);
- if (!FDecl)
+CStringChecker::FnCheck CStringChecker::identifyCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ const auto *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr());
+ if (!CE)
+ return nullptr;
+
+ const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(Call.getDecl());
+ if (!FD)
return nullptr;
+ if (Call.isCalled(StdCopy)) {
+ return &CStringChecker::evalStdCopy;
+ } else if (Call.isCalled(StdCopyBackward)) {
+ return &CStringChecker::evalStdCopyBackward;
+ }
+
// Pro-actively check that argument types are safe to do arithmetic upon.
// We do not want to crash if someone accidentally passes a structure
- // into, say, a C++ overload of any of these functions.
- if (isCPPStdLibraryFunction(FDecl, "copy")) {
- if (CE->getNumArgs() < 3 || !CE->getArg(2)->getType()->isPointerType())
+ // into, say, a C++ overload of any of these functions. We could not check
+ // that for std::copy because they may have arguments of other types.
+ for (auto I : CE->arguments()) {
+ QualType T = I->getType();
+ if (!T->isIntegralOrEnumerationType() && !T->isPointerType())
return nullptr;
- return &CStringChecker::evalStdCopy;
- } else if (isCPPStdLibraryFunction(FDecl, "copy_backward")) {
- if (CE->getNumArgs() < 3 || !CE->getArg(2)->getType()->isPointerType())
- return nullptr;
- return &CStringChecker::evalStdCopyBackward;
- } else {
- // An umbrella check for all C library functions.
- for (auto I: CE->arguments()) {
- QualType T = I->getType();
- if (!T->isIntegralOrEnumerationType() && !T->isPointerType())
- return nullptr;
- }
}
- // FIXME: Poorly-factored string switches are slow.
- if (C.isCLibraryFunction(FDecl, "memcpy"))
- return &CStringChecker::evalMemcpy;
- else if (C.isCLibraryFunction(FDecl, "mempcpy"))
- return &CStringChecker::evalMempcpy;
- else if (C.isCLibraryFunction(FDecl, "memcmp"))
- return &CStringChecker::evalMemcmp;
- else if (C.isCLibraryFunction(FDecl, "memmove"))
- return &CStringChecker::evalMemmove;
- else if (C.isCLibraryFunction(FDecl, "memset") ||
- C.isCLibraryFunction(FDecl, "explicit_memset"))
- return &CStringChecker::evalMemset;
- else if (C.isCLibraryFunction(FDecl, "strcpy"))
- return &CStringChecker::evalStrcpy;
- else if (C.isCLibraryFunction(FDecl, "strncpy"))
- return &CStringChecker::evalStrncpy;
- else if (C.isCLibraryFunction(FDecl, "stpcpy"))
- return &CStringChecker::evalStpcpy;
- else if (C.isCLibraryFunction(FDecl, "strlcpy"))
- return &CStringChecker::evalStrlcpy;
- else if (C.isCLibraryFunction(FDecl, "strcat"))
- return &CStringChecker::evalStrcat;
- else if (C.isCLibraryFunction(FDecl, "strncat"))
- return &CStringChecker::evalStrncat;
- else if (C.isCLibraryFunction(FDecl, "strlcat"))
- return &CStringChecker::evalStrlcat;
- else if (C.isCLibraryFunction(FDecl, "strlen"))
- return &CStringChecker::evalstrLength;
- else if (C.isCLibraryFunction(FDecl, "strnlen"))
- return &CStringChecker::evalstrnLength;
- else if (C.isCLibraryFunction(FDecl, "strcmp"))
- return &CStringChecker::evalStrcmp;
- else if (C.isCLibraryFunction(FDecl, "strncmp"))
- return &CStringChecker::evalStrncmp;
- else if (C.isCLibraryFunction(FDecl, "strcasecmp"))
- return &CStringChecker::evalStrcasecmp;
- else if (C.isCLibraryFunction(FDecl, "strncasecmp"))
- return &CStringChecker::evalStrncasecmp;
- else if (C.isCLibraryFunction(FDecl, "strsep"))
- return &CStringChecker::evalStrsep;
- else if (C.isCLibraryFunction(FDecl, "bcopy"))
- return &CStringChecker::evalBcopy;
- else if (C.isCLibraryFunction(FDecl, "bcmp"))
- return &CStringChecker::evalMemcmp;
- else if (C.isCLibraryFunction(FDecl, "bzero") ||
- C.isCLibraryFunction(FDecl, "explicit_bzero"))
- return &CStringChecker::evalBzero;
+ const FnCheck *Callback = Callbacks.lookup(Call);
+ if (Callback)
+ return *Callback;
return nullptr;
}
-bool CStringChecker::evalCall(const CallExpr *CE, CheckerContext &C) const {
-
- FnCheck evalFunction = identifyCall(CE, C);
+bool CStringChecker::evalCall(const CallEvent &Call, CheckerContext &C) const {
+ FnCheck Callback = identifyCall(Call, C);
// If the callee isn't a string function, let another checker handle it.
- if (!evalFunction)
+ if (!Callback)
return false;
// Check and evaluate the call.
- (this->*evalFunction)(C, CE);
+ const auto *CE = cast<CallExpr>(Call.getOriginExpr());
+ (this->*Callback)(C, CE);
// If the evaluate call resulted in no change, chain to the next eval call
// handler.
@@ -2476,18 +2391,26 @@ void CStringChecker::checkDeadSymbols(SymbolReaper &SR,
C.addTransition(state);
}
+void ento::registerCStringModeling(CheckerManager &Mgr) {
+ Mgr.registerChecker<CStringChecker>();
+}
+
+bool ento::shouldRegisterCStringModeling(const LangOptions &LO) {
+ return true;
+}
+
#define REGISTER_CHECKER(name) \
void ento::register##name(CheckerManager &mgr) { \
- CStringChecker *checker = mgr.registerChecker<CStringChecker>(); \
+ CStringChecker *checker = mgr.getChecker<CStringChecker>(); \
checker->Filter.Check##name = true; \
checker->Filter.CheckName##name = mgr.getCurrentCheckName(); \
+ } \
+ \
+ bool ento::shouldRegister##name(const LangOptions &LO) { \
+ return true; \
}
REGISTER_CHECKER(CStringNullArg)
REGISTER_CHECKER(CStringOutOfBounds)
REGISTER_CHECKER(CStringBufferOverlap)
REGISTER_CHECKER(CStringNotNullTerm)
-
- void ento::registerCStringCheckerBasic(CheckerManager &Mgr) {
- Mgr.registerChecker<CStringChecker>();
- }
diff --git a/lib/StaticAnalyzer/Checkers/CStringSyntaxChecker.cpp b/lib/StaticAnalyzer/Checkers/CStringSyntaxChecker.cpp
index bbeb41c5f3cf..b828ac059236 100644
--- a/lib/StaticAnalyzer/Checkers/CStringSyntaxChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/CStringSyntaxChecker.cpp
@@ -1,9 +1,8 @@
//== CStringSyntaxChecker.cpp - CoreFoundation containers API *- C++ -*-==//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -154,8 +153,6 @@ bool WalkAST::containsBadStrncatPattern(const CallExpr *CE) {
bool WalkAST::containsBadStrlcpyStrlcatPattern(const CallExpr *CE) {
if (CE->getNumArgs() != 3)
return false;
- const FunctionDecl *FD = CE->getDirectCallee();
- bool Append = CheckerContext::isCLibraryFunction(FD, "strlcat");
const Expr *DstArg = CE->getArg(0);
const Expr *LenArg = CE->getArg(2);
@@ -195,13 +192,8 @@ bool WalkAST::containsBadStrlcpyStrlcatPattern(const CallExpr *CE) {
ASTContext &C = BR.getContext();
uint64_t BufferLen = C.getTypeSize(Buffer) / 8;
auto RemainingBufferLen = BufferLen - DstOff;
- if (Append) {
- if (RemainingBufferLen <= ILRawVal)
- return true;
- } else {
- if (RemainingBufferLen < ILRawVal)
- return true;
- }
+ if (RemainingBufferLen < ILRawVal)
+ return true;
}
}
}
@@ -290,3 +282,6 @@ void ento::registerCStringSyntaxChecker(CheckerManager &mgr) {
mgr.registerChecker<CStringSyntaxChecker>();
}
+bool ento::shouldRegisterCStringSyntaxChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/CXXSelfAssignmentChecker.cpp b/lib/StaticAnalyzer/Checkers/CXXSelfAssignmentChecker.cpp
index 0b539e1188eb..01f5b9c889e3 100644
--- a/lib/StaticAnalyzer/Checkers/CXXSelfAssignmentChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/CXXSelfAssignmentChecker.cpp
@@ -1,9 +1,8 @@
//=== CXXSelfAssignmentChecker.cpp -----------------------------*- C++ -*--===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -51,12 +50,32 @@ void CXXSelfAssignmentChecker::checkBeginFunction(CheckerContext &C) const {
State->getSVal(SVB.getCXXThis(MD, LCtx->getStackFrame()));
auto Param = SVB.makeLoc(State->getRegion(MD->getParamDecl(0), LCtx));
auto ParamVal = State->getSVal(Param);
+
ProgramStateRef SelfAssignState = State->bindLoc(Param, ThisVal, LCtx);
- C.addTransition(SelfAssignState);
+ const NoteTag *SelfAssignTag =
+ C.getNoteTag([MD](BugReport &BR) -> std::string {
+ SmallString<256> Msg;
+ llvm::raw_svector_ostream Out(Msg);
+ Out << "Assuming " << MD->getParamDecl(0)->getName() << " == *this";
+ return Out.str();
+ });
+ C.addTransition(SelfAssignState, SelfAssignTag);
+
ProgramStateRef NonSelfAssignState = State->bindLoc(Param, ParamVal, LCtx);
- C.addTransition(NonSelfAssignState);
+ const NoteTag *NonSelfAssignTag =
+ C.getNoteTag([MD](BugReport &BR) -> std::string {
+ SmallString<256> Msg;
+ llvm::raw_svector_ostream Out(Msg);
+ Out << "Assuming " << MD->getParamDecl(0)->getName() << " != *this";
+ return Out.str();
+ });
+ C.addTransition(NonSelfAssignState, NonSelfAssignTag);
}
void ento::registerCXXSelfAssignmentChecker(CheckerManager &Mgr) {
Mgr.registerChecker<CXXSelfAssignmentChecker>();
}
+
+bool ento::shouldRegisterCXXSelfAssignmentChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp b/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp
index ef30dc74c39d..5a7eba0760fe 100644
--- a/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp
@@ -1,9 +1,8 @@
//===--- CallAndMessageChecker.cpp ------------------------------*- C++ -*--==//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -29,14 +28,6 @@ using namespace ento;
namespace {
-struct ChecksFilter {
- DefaultBool Check_CallAndMessageUnInitRefArg;
- DefaultBool Check_CallAndMessageChecker;
-
- CheckName CheckName_CallAndMessageUnInitRefArg;
- CheckName CheckName_CallAndMessageChecker;
-};
-
class CallAndMessageChecker
: public Checker< check::PreStmt<CallExpr>,
check::PreStmt<CXXDeleteExpr>,
@@ -57,7 +48,8 @@ class CallAndMessageChecker
mutable std::unique_ptr<BugType> BT_call_few_args;
public:
- ChecksFilter Filter;
+ DefaultBool Check_CallAndMessageUnInitRefArg;
+ CheckName CheckName_CallAndMessageUnInitRefArg;
void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
void checkPreStmt(const CXXDeleteExpr *DE, CheckerContext &C) const;
@@ -152,7 +144,7 @@ bool CallAndMessageChecker::uninitRefOrPointer(
CheckerContext &C, const SVal &V, SourceRange ArgRange, const Expr *ArgEx,
std::unique_ptr<BugType> &BT, const ParmVarDecl *ParamDecl, const char *BD,
int ArgumentNumber) const {
- if (!Filter.Check_CallAndMessageUnInitRefArg)
+ if (!Check_CallAndMessageUnInitRefArg)
return false;
// No parameter declaration available, i.e. variadic function argument.
@@ -608,13 +600,20 @@ void CallAndMessageChecker::HandleNilReceiver(CheckerContext &C,
C.addTransition(state);
}
-#define REGISTER_CHECKER(name) \
- void ento::register##name(CheckerManager &mgr) { \
- CallAndMessageChecker *Checker = \
- mgr.registerChecker<CallAndMessageChecker>(); \
- Checker->Filter.Check_##name = true; \
- Checker->Filter.CheckName_##name = mgr.getCurrentCheckName(); \
- }
+void ento::registerCallAndMessageChecker(CheckerManager &mgr) {
+ mgr.registerChecker<CallAndMessageChecker>();
+}
+
+bool ento::shouldRegisterCallAndMessageChecker(const LangOptions &LO) {
+ return true;
+}
-REGISTER_CHECKER(CallAndMessageUnInitRefArg)
-REGISTER_CHECKER(CallAndMessageChecker)
+void ento::registerCallAndMessageUnInitRefArg(CheckerManager &mgr) {
+ CallAndMessageChecker *Checker = mgr.getChecker<CallAndMessageChecker>();
+ Checker->Check_CallAndMessageUnInitRefArg = true;
+ Checker->CheckName_CallAndMessageUnInitRefArg = mgr.getCurrentCheckName();
+}
+
+bool ento::shouldRegisterCallAndMessageUnInitRefArg(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp b/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp
index 5deb62d32311..05ece961467f 100644
--- a/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp
@@ -1,9 +1,8 @@
//=== CastSizeChecker.cpp ---------------------------------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -140,10 +139,13 @@ void CastSizeChecker::checkPreStmt(const CastExpr *CE,CheckerContext &C) const {
}
void ento::registerCastSizeChecker(CheckerManager &mgr) {
+ mgr.registerChecker<CastSizeChecker>();
+}
+
+bool ento::shouldRegisterCastSizeChecker(const LangOptions &LO) {
// PR31226: C++ is more complicated than what this checker currently supports.
// There are derived-to-base casts, there are different rules for 0-size
// structures, no flexible arrays, etc.
// FIXME: Disabled on C++ for now.
- if (!mgr.getLangOpts().CPlusPlus)
- mgr.registerChecker<CastSizeChecker>();
+ return !LO.CPlusPlus;
}
diff --git a/lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp b/lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp
index 2bd3879627cb..93665596be29 100644
--- a/lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp
@@ -1,9 +1,8 @@
//=== CastToStructChecker.cpp ----------------------------------*- C++ -*--===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -120,3 +119,7 @@ public:
void ento::registerCastToStructChecker(CheckerManager &mgr) {
mgr.registerChecker<CastToStructChecker>();
}
+
+bool ento::shouldRegisterCastToStructChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/CastValueChecker.cpp b/lib/StaticAnalyzer/Checkers/CastValueChecker.cpp
new file mode 100644
index 000000000000..ff5d12c27c69
--- /dev/null
+++ b/lib/StaticAnalyzer/Checkers/CastValueChecker.cpp
@@ -0,0 +1,190 @@
+//===- CastValueChecker - Model implementation of custom RTTIs --*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines CastValueChecker which models casts of custom RTTIs.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "llvm/ADT/Optional.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class CastValueChecker : public Checker<eval::Call> {
+ using CastCheck =
+ std::function<void(const CastValueChecker *, const CallExpr *,
+ DefinedOrUnknownSVal, CheckerContext &)>;
+
+public:
+ // We have three cases to evaluate a cast:
+ // 1) The parameter is non-null, the return value is non-null
+ // 2) The parameter is non-null, the return value is null
+ // 3) The parameter is null, the return value is null
+ //
+ // cast: 1; dyn_cast: 1, 2; cast_or_null: 1, 3; dyn_cast_or_null: 1, 2, 3.
+ bool evalCall(const CallEvent &Call, CheckerContext &C) const;
+
+private:
+ // These are known in the LLVM project.
+ const CallDescriptionMap<CastCheck> CDM = {
+ {{{"llvm", "cast"}, 1}, &CastValueChecker::evalCast},
+ {{{"llvm", "dyn_cast"}, 1}, &CastValueChecker::evalDynCast},
+ {{{"llvm", "cast_or_null"}, 1}, &CastValueChecker::evalCastOrNull},
+ {{{"llvm", "dyn_cast_or_null"}, 1},
+ &CastValueChecker::evalDynCastOrNull}};
+
+ void evalCast(const CallExpr *CE, DefinedOrUnknownSVal ParamDV,
+ CheckerContext &C) const;
+ void evalDynCast(const CallExpr *CE, DefinedOrUnknownSVal ParamDV,
+ CheckerContext &C) const;
+ void evalCastOrNull(const CallExpr *CE, DefinedOrUnknownSVal ParamDV,
+ CheckerContext &C) const;
+ void evalDynCastOrNull(const CallExpr *CE, DefinedOrUnknownSVal ParamDV,
+ CheckerContext &C) const;
+};
+} // namespace
+
+static std::string getCastName(const Expr *Cast) {
+ return Cast->getType()->getPointeeCXXRecordDecl()->getNameAsString();
+}
+
+static void evalNonNullParamNonNullReturn(const CallExpr *CE,
+ DefinedOrUnknownSVal ParamDV,
+ CheckerContext &C) {
+ ProgramStateRef State = C.getState()->assume(ParamDV, true);
+ if (!State)
+ return;
+
+ State = State->BindExpr(CE, C.getLocationContext(), ParamDV, false);
+
+ std::string CastFromName = getCastName(CE->getArg(0));
+ std::string CastToName = getCastName(CE);
+
+ const NoteTag *CastTag = C.getNoteTag(
+ [CastFromName, CastToName](BugReport &) -> std::string {
+ SmallString<128> Msg;
+ llvm::raw_svector_ostream Out(Msg);
+
+ Out << "Assuming dynamic cast from '" << CastFromName << "' to '"
+ << CastToName << "' succeeds";
+ return Out.str();
+ },
+ /*IsPrunable=*/true);
+
+ C.addTransition(State, CastTag);
+}
+
+static void evalNonNullParamNullReturn(const CallExpr *CE,
+ DefinedOrUnknownSVal ParamDV,
+ CheckerContext &C) {
+ ProgramStateRef State = C.getState()->assume(ParamDV, true);
+ if (!State)
+ return;
+
+ State = State->BindExpr(CE, C.getLocationContext(),
+ C.getSValBuilder().makeNull(), false);
+
+ std::string CastFromName = getCastName(CE->getArg(0));
+ std::string CastToName = getCastName(CE);
+
+ const NoteTag *CastTag = C.getNoteTag(
+ [CastFromName, CastToName](BugReport &) -> std::string {
+ SmallString<128> Msg;
+ llvm::raw_svector_ostream Out(Msg);
+
+ Out << "Assuming dynamic cast from '" << CastFromName << "' to '"
+ << CastToName << "' fails";
+ return Out.str();
+ },
+ /*IsPrunable=*/true);
+
+ C.addTransition(State, CastTag);
+}
+
+static void evalNullParamNullReturn(const CallExpr *CE,
+ DefinedOrUnknownSVal ParamDV,
+ CheckerContext &C) {
+ ProgramStateRef State = C.getState()->assume(ParamDV, false);
+ if (!State)
+ return;
+
+ State = State->BindExpr(CE, C.getLocationContext(),
+ C.getSValBuilder().makeNull(), false);
+
+ const NoteTag *CastTag =
+ C.getNoteTag("Assuming null pointer is passed into cast",
+ /*IsPrunable=*/true);
+
+ C.addTransition(State, CastTag);
+}
+
+void CastValueChecker::evalCast(const CallExpr *CE,
+ DefinedOrUnknownSVal ParamDV,
+ CheckerContext &C) const {
+ evalNonNullParamNonNullReturn(CE, ParamDV, C);
+}
+
+void CastValueChecker::evalDynCast(const CallExpr *CE,
+ DefinedOrUnknownSVal ParamDV,
+ CheckerContext &C) const {
+ evalNonNullParamNonNullReturn(CE, ParamDV, C);
+ evalNonNullParamNullReturn(CE, ParamDV, C);
+}
+
+void CastValueChecker::evalCastOrNull(const CallExpr *CE,
+ DefinedOrUnknownSVal ParamDV,
+ CheckerContext &C) const {
+ evalNonNullParamNonNullReturn(CE, ParamDV, C);
+ evalNullParamNullReturn(CE, ParamDV, C);
+}
+
+void CastValueChecker::evalDynCastOrNull(const CallExpr *CE,
+ DefinedOrUnknownSVal ParamDV,
+ CheckerContext &C) const {
+ evalNonNullParamNonNullReturn(CE, ParamDV, C);
+ evalNonNullParamNullReturn(CE, ParamDV, C);
+ evalNullParamNullReturn(CE, ParamDV, C);
+}
+
+bool CastValueChecker::evalCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ const CastCheck *Check = CDM.lookup(Call);
+ if (!Check)
+ return false;
+
+ const auto *CE = cast<CallExpr>(Call.getOriginExpr());
+ if (!CE)
+ return false;
+
+ // If we cannot obtain both of the classes we cannot be sure how to model it.
+ if (!CE->getType()->getPointeeCXXRecordDecl() ||
+ !CE->getArg(0)->getType()->getPointeeCXXRecordDecl())
+ return false;
+
+ SVal ParamV = Call.getArgSVal(0);
+ auto ParamDV = ParamV.getAs<DefinedOrUnknownSVal>();
+ if (!ParamDV)
+ return false;
+
+ (*Check)(this, CE, *ParamDV, C);
+ return true;
+}
+
+void ento::registerCastValueChecker(CheckerManager &Mgr) {
+ Mgr.registerChecker<CastValueChecker>();
+}
+
+bool ento::shouldRegisterCastValueChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp b/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
index 00a912f27a8d..a7ca814c8f96 100644
--- a/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
+++ b/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
@@ -1,9 +1,8 @@
//==- CheckObjCDealloc.cpp - Check ObjC -dealloc implementation --*- C++ -*-==//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -1087,10 +1086,10 @@ bool ObjCDeallocChecker::isNibLoadedIvarWithoutRetain(
}
void ento::registerObjCDeallocChecker(CheckerManager &Mgr) {
- const LangOptions &LangOpts = Mgr.getLangOpts();
- // These checker only makes sense under MRR.
- if (LangOpts.getGC() == LangOptions::GCOnly || LangOpts.ObjCAutoRefCount)
- return;
-
Mgr.registerChecker<ObjCDeallocChecker>();
}
+
+bool ento::shouldRegisterObjCDeallocChecker(const LangOptions &LO) {
+ // These checker only makes sense under MRR.
+ return LO.getGC() != LangOptions::GCOnly && !LO.ObjCAutoRefCount;
+}
diff --git a/lib/StaticAnalyzer/Checkers/CheckObjCInstMethSignature.cpp b/lib/StaticAnalyzer/Checkers/CheckObjCInstMethSignature.cpp
index fe6715595e6f..a020d33bfd95 100644
--- a/lib/StaticAnalyzer/Checkers/CheckObjCInstMethSignature.cpp
+++ b/lib/StaticAnalyzer/Checkers/CheckObjCInstMethSignature.cpp
@@ -1,9 +1,8 @@
-//=- CheckObjCInstMethodRetTy.cpp - Check ObjC method signatures -*- C++ -*-==//
+//===-- CheckObjCInstMethSignature.cpp - Check ObjC method signatures -----===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -138,3 +137,7 @@ public:
void ento::registerObjCMethSigsChecker(CheckerManager &mgr) {
mgr.registerChecker<ObjCMethSigsChecker>();
}
+
+bool ento::shouldRegisterObjCMethSigsChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp b/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp
index 163ca9d8556f..3f1c213a5647 100644
--- a/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp
+++ b/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp
@@ -1,9 +1,8 @@
//==- CheckSecuritySyntaxOnly.cpp - Basic security checks --------*- C++ -*-==//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -45,6 +44,7 @@ struct ChecksFilter {
DefaultBool check_mktemp;
DefaultBool check_mkstemp;
DefaultBool check_strcpy;
+ DefaultBool check_DeprecatedOrUnsafeBufferHandling;
DefaultBool check_rand;
DefaultBool check_vfork;
DefaultBool check_FloatLoopCounter;
@@ -58,6 +58,7 @@ struct ChecksFilter {
CheckName checkName_mktemp;
CheckName checkName_mkstemp;
CheckName checkName_strcpy;
+ CheckName checkName_DeprecatedOrUnsafeBufferHandling;
CheckName checkName_rand;
CheckName checkName_vfork;
CheckName checkName_FloatLoopCounter;
@@ -104,6 +105,8 @@ public:
void checkCall_mkstemp(const CallExpr *CE, const FunctionDecl *FD);
void checkCall_strcpy(const CallExpr *CE, const FunctionDecl *FD);
void checkCall_strcat(const CallExpr *CE, const FunctionDecl *FD);
+ void checkDeprecatedOrUnsafeBufferHandling(const CallExpr *CE,
+ const FunctionDecl *FD);
void checkCall_rand(const CallExpr *CE, const FunctionDecl *FD);
void checkCall_random(const CallExpr *CE, const FunctionDecl *FD);
void checkCall_vfork(const CallExpr *CE, const FunctionDecl *FD);
@@ -149,6 +152,14 @@ void WalkAST::VisitCallExpr(CallExpr *CE) {
.Case("mkstemps", &WalkAST::checkCall_mkstemp)
.Cases("strcpy", "__strcpy_chk", &WalkAST::checkCall_strcpy)
.Cases("strcat", "__strcat_chk", &WalkAST::checkCall_strcat)
+ .Cases("sprintf", "vsprintf", "scanf", "wscanf", "fscanf", "fwscanf",
+ "vscanf", "vwscanf", "vfscanf", "vfwscanf",
+ &WalkAST::checkDeprecatedOrUnsafeBufferHandling)
+ .Cases("sscanf", "swscanf", "vsscanf", "vswscanf", "swprintf",
+ "snprintf", "vswprintf", "vsnprintf", "memcpy", "memmove",
+ &WalkAST::checkDeprecatedOrUnsafeBufferHandling)
+ .Cases("strncpy", "strncat", "memset",
+ &WalkAST::checkDeprecatedOrUnsafeBufferHandling)
.Case("drand48", &WalkAST::checkCall_rand)
.Case("erand48", &WalkAST::checkCall_rand)
.Case("jrand48", &WalkAST::checkCall_rand)
@@ -553,7 +564,6 @@ void WalkAST::checkCall_mktemp(const CallExpr *CE, const FunctionDecl *FD) {
CELoc, CE->getCallee()->getSourceRange());
}
-
//===----------------------------------------------------------------------===//
// Check: Use of 'mkstemp', 'mktemp', 'mkdtemp' should contain at least 6 X's.
//===----------------------------------------------------------------------===//
@@ -642,6 +652,7 @@ void WalkAST::checkCall_mkstemp(const CallExpr *CE, const FunctionDecl *FD) {
// CWE-119: Improper Restriction of Operations within
// the Bounds of a Memory Buffer
//===----------------------------------------------------------------------===//
+
void WalkAST::checkCall_strcpy(const CallExpr *CE, const FunctionDecl *FD) {
if (!filter.check_strcpy)
return;
@@ -680,6 +691,7 @@ void WalkAST::checkCall_strcpy(const CallExpr *CE, const FunctionDecl *FD) {
// CWE-119: Improper Restriction of Operations within
// the Bounds of a Memory Buffer
//===----------------------------------------------------------------------===//
+
void WalkAST::checkCall_strcat(const CallExpr *CE, const FunctionDecl *FD) {
if (!filter.check_strcpy)
return;
@@ -702,8 +714,92 @@ void WalkAST::checkCall_strcat(const CallExpr *CE, const FunctionDecl *FD) {
}
//===----------------------------------------------------------------------===//
+// Check: Any use of 'sprintf', 'vsprintf', 'scanf', 'wscanf', 'fscanf',
+// 'fwscanf', 'vscanf', 'vwscanf', 'vfscanf', 'vfwscanf', 'sscanf',
+// 'swscanf', 'vsscanf', 'vswscanf', 'swprintf', 'snprintf', 'vswprintf',
+// 'vsnprintf', 'memcpy', 'memmove', 'strncpy', 'strncat', 'memset'
+// is deprecated since C11.
+//
+// Use of 'sprintf', 'vsprintf', 'scanf', 'wscanf','fscanf',
+// 'fwscanf', 'vscanf', 'vwscanf', 'vfscanf', 'vfwscanf', 'sscanf',
+// 'swscanf', 'vsscanf', 'vswscanf' without buffer limitations
+// is insecure.
+//
+// CWE-119: Improper Restriction of Operations within
+// the Bounds of a Memory Buffer
+//===----------------------------------------------------------------------===//
+
+void WalkAST::checkDeprecatedOrUnsafeBufferHandling(const CallExpr *CE,
+ const FunctionDecl *FD) {
+ if (!filter.check_DeprecatedOrUnsafeBufferHandling)
+ return;
+
+ if (!BR.getContext().getLangOpts().C11)
+ return;
+
+ // Issue a warning. ArgIndex == -1: Deprecated but not unsafe (has size
+ // restrictions).
+ enum { DEPR_ONLY = -1, UNKNOWN_CALL = -2 };
+
+ StringRef Name = FD->getIdentifier()->getName();
+ if (Name.startswith("__builtin_"))
+ Name = Name.substr(10);
+
+ int ArgIndex =
+ llvm::StringSwitch<int>(Name)
+ .Cases("scanf", "wscanf", "vscanf", "vwscanf", 0)
+ .Cases("sprintf", "vsprintf", "fscanf", "fwscanf", "vfscanf",
+ "vfwscanf", "sscanf", "swscanf", "vsscanf", "vswscanf", 1)
+ .Cases("swprintf", "snprintf", "vswprintf", "vsnprintf", "memcpy",
+ "memmove", "memset", "strncpy", "strncat", DEPR_ONLY)
+ .Default(UNKNOWN_CALL);
+
+ assert(ArgIndex != UNKNOWN_CALL && "Unsupported function");
+ bool BoundsProvided = ArgIndex == DEPR_ONLY;
+
+ if (!BoundsProvided) {
+ // Currently we only handle (not wide) string literals. It is possible to do
+ // better, either by looking at references to const variables, or by doing
+ // real flow analysis.
+ auto FormatString =
+ dyn_cast<StringLiteral>(CE->getArg(ArgIndex)->IgnoreParenImpCasts());
+ if (FormatString &&
+ FormatString->getString().find("%s") == StringRef::npos &&
+ FormatString->getString().find("%[") == StringRef::npos)
+ BoundsProvided = true;
+ }
+
+ SmallString<128> Buf1;
+ SmallString<512> Buf2;
+ llvm::raw_svector_ostream Out1(Buf1);
+ llvm::raw_svector_ostream Out2(Buf2);
+
+ Out1 << "Potential insecure memory buffer bounds restriction in call '"
+ << Name << "'";
+ Out2 << "Call to function '" << Name
+ << "' is insecure as it does not provide ";
+
+ if (!BoundsProvided) {
+ Out2 << "bounding of the memory buffer or ";
+ }
+
+ Out2 << "security checks introduced "
+ "in the C11 standard. Replace with analogous functions that "
+ "support length arguments or provides boundary checks such as '"
+ << Name << "_s' in case of C11";
+
+ PathDiagnosticLocation CELoc =
+ PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
+ BR.EmitBasicReport(AC->getDecl(),
+ filter.checkName_DeprecatedOrUnsafeBufferHandling,
+ Out1.str(), "Security", Out2.str(), CELoc,
+ CE->getCallee()->getSourceRange());
+}
+
+//===----------------------------------------------------------------------===//
// Common check for str* functions with no bounds parameters.
//===----------------------------------------------------------------------===//
+
bool WalkAST::checkCall_strCommon(const CallExpr *CE, const FunctionDecl *FD) {
const FunctionProtoType *FPT = FD->getType()->getAs<FunctionProtoType>();
if (!FPT)
@@ -906,12 +1002,23 @@ public:
};
}
+void ento::registerSecuritySyntaxChecker(CheckerManager &mgr) {
+ mgr.registerChecker<SecuritySyntaxChecker>();
+}
+
+bool ento::shouldRegisterSecuritySyntaxChecker(const LangOptions &LO) {
+ return true;
+}
+
#define REGISTER_CHECKER(name) \
void ento::register##name(CheckerManager &mgr) { \
- SecuritySyntaxChecker *checker = \
- mgr.registerChecker<SecuritySyntaxChecker>(); \
+ SecuritySyntaxChecker *checker = mgr.getChecker<SecuritySyntaxChecker>(); \
checker->filter.check_##name = true; \
checker->filter.checkName_##name = mgr.getCurrentCheckName(); \
+ } \
+ \
+ bool ento::shouldRegister##name(const LangOptions &LO) { \
+ return true; \
}
REGISTER_CHECKER(bcmp)
@@ -926,5 +1033,4 @@ REGISTER_CHECKER(rand)
REGISTER_CHECKER(vfork)
REGISTER_CHECKER(FloatLoopCounter)
REGISTER_CHECKER(UncheckedReturn)
-
-
+REGISTER_CHECKER(DeprecatedOrUnsafeBufferHandling)
diff --git a/lib/StaticAnalyzer/Checkers/CheckSizeofPointer.cpp b/lib/StaticAnalyzer/Checkers/CheckSizeofPointer.cpp
index 7688b713b06b..ec401cfa8985 100644
--- a/lib/StaticAnalyzer/Checkers/CheckSizeofPointer.cpp
+++ b/lib/StaticAnalyzer/Checkers/CheckSizeofPointer.cpp
@@ -1,9 +1,8 @@
//==- CheckSizeofPointer.cpp - Check for sizeof on pointers ------*- C++ -*-==//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -91,3 +90,7 @@ public:
void ento::registerSizeofPointerChecker(CheckerManager &mgr) {
mgr.registerChecker<SizeofPointerChecker>();
}
+
+bool ento::shouldRegisterSizeofPointerChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp b/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp
index 44fac0278bdd..3e5e2b913914 100644
--- a/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp
+++ b/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp
@@ -1,9 +1,8 @@
//===- CheckerDocumentation.cpp - Documentation checker ---------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -216,7 +215,7 @@ public:
/// Evaluates function call.
///
- /// The analysis core threats all function calls in the same way. However, some
+ /// The analysis core treats all function calls in the same way. However, some
/// functions have special meaning, which should be reflected in the program
/// state. This callback allows a checker to provide domain specific knowledge
/// about the particular functions it knows about.
diff --git a/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp b/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp
index 673608db1a1d..9fffedfccd87 100644
--- a/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp
@@ -1,9 +1,8 @@
-//===- Chrootchecker.cpp -------- Basic security checks ---------*- C++ -*-===//
+//===-- ChrootChecker.cpp - chroot usage checks ---------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -15,6 +14,7 @@
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
@@ -38,53 +38,44 @@ bool isRootChanged(intptr_t k) { return k == ROOT_CHANGED; }
// ROOT_CHANGED<--chdir(..)-- JAIL_ENTERED<--chdir(..)--
// | |
// bug<--foo()-- JAIL_ENTERED<--foo()--
-class ChrootChecker : public Checker<eval::Call, check::PreStmt<CallExpr> > {
- mutable IdentifierInfo *II_chroot, *II_chdir;
+class ChrootChecker : public Checker<eval::Call, check::PreCall> {
// This bug refers to possibly break out of a chroot() jail.
mutable std::unique_ptr<BuiltinBug> BT_BreakJail;
+ const CallDescription Chroot{"chroot", 1}, Chdir{"chdir", 1};
+
public:
- ChrootChecker() : II_chroot(nullptr), II_chdir(nullptr) {}
+ ChrootChecker() {}
static void *getTag() {
static int x;
return &x;
}
- bool evalCall(const CallExpr *CE, CheckerContext &C) const;
- void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
+ bool evalCall(const CallEvent &Call, CheckerContext &C) const;
+ void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
private:
- void Chroot(CheckerContext &C, const CallExpr *CE) const;
- void Chdir(CheckerContext &C, const CallExpr *CE) const;
+ void evalChroot(const CallEvent &Call, CheckerContext &C) const;
+ void evalChdir(const CallEvent &Call, CheckerContext &C) const;
};
} // end anonymous namespace
-bool ChrootChecker::evalCall(const CallExpr *CE, CheckerContext &C) const {
- const FunctionDecl *FD = C.getCalleeDecl(CE);
- if (!FD)
- return false;
-
- ASTContext &Ctx = C.getASTContext();
- if (!II_chroot)
- II_chroot = &Ctx.Idents.get("chroot");
- if (!II_chdir)
- II_chdir = &Ctx.Idents.get("chdir");
-
- if (FD->getIdentifier() == II_chroot) {
- Chroot(C, CE);
+bool ChrootChecker::evalCall(const CallEvent &Call, CheckerContext &C) const {
+ if (Call.isCalled(Chroot)) {
+ evalChroot(Call, C);
return true;
}
- if (FD->getIdentifier() == II_chdir) {
- Chdir(C, CE);
+ if (Call.isCalled(Chdir)) {
+ evalChdir(Call, C);
return true;
}
return false;
}
-void ChrootChecker::Chroot(CheckerContext &C, const CallExpr *CE) const {
+void ChrootChecker::evalChroot(const CallEvent &Call, CheckerContext &C) const {
ProgramStateRef state = C.getState();
ProgramStateManager &Mgr = state->getStateManager();
@@ -94,7 +85,7 @@ void ChrootChecker::Chroot(CheckerContext &C, const CallExpr *CE) const {
C.addTransition(state);
}
-void ChrootChecker::Chdir(CheckerContext &C, const CallExpr *CE) const {
+void ChrootChecker::evalChdir(const CallEvent &Call, CheckerContext &C) const {
ProgramStateRef state = C.getState();
ProgramStateManager &Mgr = state->getStateManager();
@@ -104,7 +95,7 @@ void ChrootChecker::Chdir(CheckerContext &C, const CallExpr *CE) const {
return;
// After chdir("/"), enter the jail, set the enum value JAIL_ENTERED.
- const Expr *ArgExpr = CE->getArg(0);
+ const Expr *ArgExpr = Call.getArgExpr(0);
SVal ArgVal = C.getSVal(ArgExpr);
if (const MemRegion *R = ArgVal.getAsRegion()) {
@@ -121,19 +112,10 @@ void ChrootChecker::Chdir(CheckerContext &C, const CallExpr *CE) const {
}
// Check the jail state before any function call except chroot and chdir().
-void ChrootChecker::checkPreStmt(const CallExpr *CE, CheckerContext &C) const {
- const FunctionDecl *FD = C.getCalleeDecl(CE);
- if (!FD)
- return;
-
- ASTContext &Ctx = C.getASTContext();
- if (!II_chroot)
- II_chroot = &Ctx.Idents.get("chroot");
- if (!II_chdir)
- II_chdir = &Ctx.Idents.get("chdir");
-
+void ChrootChecker::checkPreCall(const CallEvent &Call,
+ CheckerContext &C) const {
// Ignore chroot and chdir.
- if (FD->getIdentifier() == II_chroot || FD->getIdentifier() == II_chdir)
+ if (Call.isCalled(Chroot) || Call.isCalled(Chdir))
return;
// If jail state is ROOT_CHANGED, generate BugReport.
@@ -153,3 +135,7 @@ void ChrootChecker::checkPreStmt(const CallExpr *CE, CheckerContext &C) const {
void ento::registerChrootChecker(CheckerManager &mgr) {
mgr.registerChecker<ChrootChecker>();
}
+
+bool ento::shouldRegisterChrootChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/CloneChecker.cpp b/lib/StaticAnalyzer/Checkers/CloneChecker.cpp
index 89354b866004..4fc225056d4c 100644
--- a/lib/StaticAnalyzer/Checkers/CloneChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/CloneChecker.cpp
@@ -1,9 +1,8 @@
//===--- CloneChecker.cpp - Clone detection checker -------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
@@ -28,6 +27,13 @@ using namespace ento;
namespace {
class CloneChecker
: public Checker<check::ASTCodeBody, check::EndOfTranslationUnit> {
+public:
+ // Checker options.
+ int MinComplexity;
+ bool ReportNormalClones;
+ StringRef IgnoredFilesPattern;
+
+private:
mutable CloneDetector Detector;
mutable std::unique_ptr<BugType> BT_Exact, BT_Suspicious;
@@ -63,19 +69,6 @@ void CloneChecker::checkEndOfTranslationUnit(const TranslationUnitDecl *TU,
// At this point, every statement in the translation unit has been analyzed by
// the CloneDetector. The only thing left to do is to report the found clones.
- int MinComplexity = Mgr.getAnalyzerOptions().getCheckerIntegerOption(
- "MinimumCloneComplexity", 50, this);
- assert(MinComplexity >= 0);
-
- bool ReportSuspiciousClones = Mgr.getAnalyzerOptions()
- .getCheckerBooleanOption("ReportSuspiciousClones", true, this);
-
- bool ReportNormalClones = Mgr.getAnalyzerOptions().getCheckerBooleanOption(
- "ReportNormalClones", true, this);
-
- StringRef IgnoredFilesPattern = Mgr.getAnalyzerOptions()
- .getCheckerStringOption("IgnoredFilesPattern", "", this);
-
// Let the CloneDetector create a list of clones from all the analyzed
// statements. We don't filter for matching variable patterns at this point
// because reportSuspiciousClones() wants to search them for errors.
@@ -87,8 +80,7 @@ void CloneChecker::checkEndOfTranslationUnit(const TranslationUnitDecl *TU,
MinComplexityConstraint(MinComplexity),
RecursiveCloneTypeIIVerifyConstraint(), OnlyLargestCloneConstraint());
- if (ReportSuspiciousClones)
- reportSuspiciousClones(BR, Mgr, AllCloneGroups);
+ reportSuspiciousClones(BR, Mgr, AllCloneGroups);
// We are done for this translation unit unless we also need to report normal
// clones.
@@ -200,5 +192,22 @@ void CloneChecker::reportSuspiciousClones(
//===----------------------------------------------------------------------===//
void ento::registerCloneChecker(CheckerManager &Mgr) {
- Mgr.registerChecker<CloneChecker>();
+ auto *Checker = Mgr.registerChecker<CloneChecker>();
+
+ Checker->MinComplexity = Mgr.getAnalyzerOptions().getCheckerIntegerOption(
+ Checker, "MinimumCloneComplexity");
+
+ if (Checker->MinComplexity < 0)
+ Mgr.reportInvalidCheckerOptionValue(
+ Checker, "MinimumCloneComplexity", "a non-negative value");
+
+ Checker->ReportNormalClones = Mgr.getAnalyzerOptions().getCheckerBooleanOption(
+ Checker, "ReportNormalClones");
+
+ Checker->IgnoredFilesPattern = Mgr.getAnalyzerOptions()
+ .getCheckerStringOption(Checker, "IgnoredFilesPattern");
+}
+
+bool ento::shouldRegisterCloneChecker(const LangOptions &LO) {
+ return true;
}
diff --git a/lib/StaticAnalyzer/Checkers/ConversionChecker.cpp b/lib/StaticAnalyzer/Checkers/ConversionChecker.cpp
index a5c67c2a5b45..5058d101b8e5 100644
--- a/lib/StaticAnalyzer/Checkers/ConversionChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ConversionChecker.cpp
@@ -1,9 +1,8 @@
//=== ConversionChecker.cpp -------------------------------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -196,3 +195,7 @@ bool ConversionChecker::isLossOfSign(const ImplicitCastExpr *Cast,
void ento::registerConversionChecker(CheckerManager &mgr) {
mgr.registerChecker<ConversionChecker>();
}
+
+bool ento::shouldRegisterConversionChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp b/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp
index 4e0f6d3bedfd..d5baa2bcba6f 100644
--- a/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp
@@ -1,9 +1,8 @@
//==- DeadStoresChecker.cpp - Check for stores to dead variables -*- C++ -*-==//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -161,6 +160,26 @@ public:
return InEH->count(D);
}
+ bool isSuppressed(SourceRange R) {
+ SourceManager &SMgr = Ctx.getSourceManager();
+ SourceLocation Loc = R.getBegin();
+ if (!Loc.isValid())
+ return false;
+
+ FileID FID = SMgr.getFileID(Loc);
+ bool Invalid = false;
+ StringRef Data = SMgr.getBufferData(FID, &Invalid);
+ if (Invalid)
+ return false;
+
+ // Files autogenerated by DriverKit IIG contain some dead stores that
+ // we don't want to report.
+ if (Data.startswith("/* iig"))
+ return true;
+
+ return false;
+ }
+
void Report(const VarDecl *V, DeadStoreKind dsk,
PathDiagnosticLocation L, SourceRange R) {
if (Escaped.count(V))
@@ -176,6 +195,9 @@ public:
if (!reachableCode->isReachable(currentBlock))
return;
+ if (isSuppressed(R))
+ return;
+
SmallString<64> buf;
llvm::raw_svector_ostream os(buf);
const char *BugType = nullptr;
@@ -479,3 +501,7 @@ public:
void ento::registerDeadStoresChecker(CheckerManager &mgr) {
mgr.registerChecker<DeadStoresChecker>();
}
+
+bool ento::shouldRegisterDeadStoresChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp b/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp
index 90b1111aff0f..bb9e8110b647 100644
--- a/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp
+++ b/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp
@@ -1,9 +1,8 @@
//==- DebugCheckers.cpp - Debugging Checkers ---------------------*- C++ -*-==//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -36,9 +35,9 @@ public:
void checkASTCodeBody(const Decl *D, AnalysisManager& mgr,
BugReporter &BR) const {
if (AnalysisDeclContext *AC = mgr.getAnalysisDeclContext(D)) {
- DominatorTree dom;
- dom.buildDominatorTree(*AC);
- dom.dump();
+ CFGDomTree Dom;
+ Dom.buildDominatorTree(AC->getCFG());
+ Dom.dump();
}
}
};
@@ -48,6 +47,61 @@ void ento::registerDominatorsTreeDumper(CheckerManager &mgr) {
mgr.registerChecker<DominatorsTreeDumper>();
}
+bool ento::shouldRegisterDominatorsTreeDumper(const LangOptions &LO) {
+ return true;
+}
+
+//===----------------------------------------------------------------------===//
+// PostDominatorsTreeDumper
+//===----------------------------------------------------------------------===//
+
+namespace {
+class PostDominatorsTreeDumper : public Checker<check::ASTCodeBody> {
+public:
+ void checkASTCodeBody(const Decl *D, AnalysisManager& mgr,
+ BugReporter &BR) const {
+ if (AnalysisDeclContext *AC = mgr.getAnalysisDeclContext(D)) {
+ CFGPostDomTree Dom;
+ Dom.buildDominatorTree(AC->getCFG());
+ Dom.dump();
+ }
+ }
+};
+}
+
+void ento::registerPostDominatorsTreeDumper(CheckerManager &mgr) {
+ mgr.registerChecker<PostDominatorsTreeDumper>();
+}
+
+bool ento::shouldRegisterPostDominatorsTreeDumper(const LangOptions &LO) {
+ return true;
+}
+
+//===----------------------------------------------------------------------===//
+// ControlDependencyTreeDumper
+//===----------------------------------------------------------------------===//
+
+namespace {
+class ControlDependencyTreeDumper : public Checker<check::ASTCodeBody> {
+public:
+ void checkASTCodeBody(const Decl *D, AnalysisManager& mgr,
+ BugReporter &BR) const {
+ if (AnalysisDeclContext *AC = mgr.getAnalysisDeclContext(D)) {
+ ControlDependencyCalculator Dom(AC->getCFG());
+ Dom.dump();
+ }
+ }
+};
+}
+
+void ento::registerControlDependencyTreeDumper(CheckerManager &mgr) {
+ mgr.registerChecker<ControlDependencyTreeDumper>();
+}
+
+bool ento::shouldRegisterControlDependencyTreeDumper(const LangOptions &LO) {
+ return true;
+}
+
//===----------------------------------------------------------------------===//
// LiveVariablesDumper
//===----------------------------------------------------------------------===//
@@ -68,6 +122,10 @@ void ento::registerLiveVariablesDumper(CheckerManager &mgr) {
mgr.registerChecker<LiveVariablesDumper>();
}
+bool ento::shouldRegisterLiveVariablesDumper(const LangOptions &LO) {
+ return true;
+}
+
//===----------------------------------------------------------------------===//
// LiveStatementsDumper
//===----------------------------------------------------------------------===//
@@ -87,6 +145,10 @@ void ento::registerLiveStatementsDumper(CheckerManager &mgr) {
mgr.registerChecker<LiveStatementsDumper>();
}
+bool ento::shouldRegisterLiveStatementsDumper(const LangOptions &LO) {
+ return true;
+}
+
//===----------------------------------------------------------------------===//
// CFGViewer
//===----------------------------------------------------------------------===//
@@ -107,6 +169,10 @@ void ento::registerCFGViewer(CheckerManager &mgr) {
mgr.registerChecker<CFGViewer>();
}
+bool ento::shouldRegisterCFGViewer(const LangOptions &LO) {
+ return true;
+}
+
//===----------------------------------------------------------------------===//
// CFGDumper
//===----------------------------------------------------------------------===//
@@ -133,6 +199,10 @@ void ento::registerCFGDumper(CheckerManager &mgr) {
mgr.registerChecker<CFGDumper>();
}
+bool ento::shouldRegisterCFGDumper(const LangOptions &LO) {
+ return true;
+}
+
//===----------------------------------------------------------------------===//
// CallGraphViewer
//===----------------------------------------------------------------------===//
@@ -153,6 +223,10 @@ void ento::registerCallGraphViewer(CheckerManager &mgr) {
mgr.registerChecker<CallGraphViewer>();
}
+bool ento::shouldRegisterCallGraphViewer(const LangOptions &LO) {
+ return true;
+}
+
//===----------------------------------------------------------------------===//
// CallGraphDumper
//===----------------------------------------------------------------------===//
@@ -173,6 +247,9 @@ void ento::registerCallGraphDumper(CheckerManager &mgr) {
mgr.registerChecker<CallGraphDumper>();
}
+bool ento::shouldRegisterCallGraphDumper(const LangOptions &LO) {
+ return true;
+}
//===----------------------------------------------------------------------===//
// ConfigDumper
@@ -214,6 +291,10 @@ void ento::registerConfigDumper(CheckerManager &mgr) {
mgr.registerChecker<ConfigDumper>();
}
+bool ento::shouldRegisterConfigDumper(const LangOptions &LO) {
+ return true;
+}
+
//===----------------------------------------------------------------------===//
// ExplodedGraph Viewer
//===----------------------------------------------------------------------===//
@@ -233,3 +314,37 @@ void ento::registerExplodedGraphViewer(CheckerManager &mgr) {
mgr.registerChecker<ExplodedGraphViewer>();
}
+bool ento::shouldRegisterExplodedGraphViewer(const LangOptions &LO) {
+ return true;
+}
+
+//===----------------------------------------------------------------------===//
+// Emits a report for every Stmt that the analyzer visits.
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class ReportStmts : public Checker<check::PreStmt<Stmt>> {
+ BuiltinBug BT_stmtLoc{this, "Statement"};
+
+public:
+ void checkPreStmt(const Stmt *S, CheckerContext &C) const {
+ ExplodedNode *Node = C.generateNonFatalErrorNode();
+ if (!Node)
+ return;
+
+ auto Report = llvm::make_unique<BugReport>(BT_stmtLoc, "Statement", Node);
+
+ C.emitReport(std::move(Report));
+ }
+};
+
+} // end of anonymous namespace
+
+void ento::registerReportStmts(CheckerManager &mgr) {
+ mgr.registerChecker<ReportStmts>();
+}
+
+bool ento::shouldRegisterReportStmts(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/DeleteWithNonVirtualDtorChecker.cpp b/lib/StaticAnalyzer/Checkers/DeleteWithNonVirtualDtorChecker.cpp
index adf5a8e77a74..8bf77c109f8a 100644
--- a/lib/StaticAnalyzer/Checkers/DeleteWithNonVirtualDtorChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/DeleteWithNonVirtualDtorChecker.cpp
@@ -1,9 +1,8 @@
//===-- DeleteWithNonVirtualDtorChecker.cpp -----------------------*- C++ -*--//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -148,3 +147,8 @@ DeleteWithNonVirtualDtorChecker::DeleteBugVisitor::VisitNode(
void ento::registerDeleteWithNonVirtualDtorChecker(CheckerManager &mgr) {
mgr.registerChecker<DeleteWithNonVirtualDtorChecker>();
}
+
+bool ento::shouldRegisterDeleteWithNonVirtualDtorChecker(
+ const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp b/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
index d01a889d256a..2c264833f2a9 100644
--- a/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
@@ -1,9 +1,8 @@
-//== NullDerefChecker.cpp - Null dereference checker ------------*- C++ -*--==//
+//===-- DereferenceChecker.cpp - Null dereference checker -----------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -304,3 +303,7 @@ void DereferenceChecker::checkBind(SVal L, SVal V, const Stmt *S,
void ento::registerDereferenceChecker(CheckerManager &mgr) {
mgr.registerChecker<DereferenceChecker>();
}
+
+bool ento::shouldRegisterDereferenceChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/DirectIvarAssignment.cpp b/lib/StaticAnalyzer/Checkers/DirectIvarAssignment.cpp
index 2a559422df34..0058f3d3881f 100644
--- a/lib/StaticAnalyzer/Checkers/DirectIvarAssignment.cpp
+++ b/lib/StaticAnalyzer/Checkers/DirectIvarAssignment.cpp
@@ -1,9 +1,8 @@
//=- DirectIvarAssignment.cpp - Check rules on ObjC properties -*- C++ ----*-==//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -206,12 +205,6 @@ void DirectIvarAssignment::MethodCrawler::VisitBinaryOperator(
}
}
-// Register the checker that checks for direct accesses in all functions,
-// except for the initialization and copy routines.
-void ento::registerDirectIvarAssignment(CheckerManager &mgr) {
- mgr.registerChecker<DirectIvarAssignment>();
-}
-
// Register the checker that checks for direct accesses in functions annotated
// with __attribute__((annotate("objc_no_direct_instance_variable_assignment"))).
static bool AttrFilter(const ObjCMethodDecl *M) {
@@ -221,7 +214,22 @@ static bool AttrFilter(const ObjCMethodDecl *M) {
return true;
}
+// Register the checker that checks for direct accesses in all functions,
+// except for the initialization and copy routines.
+void ento::registerDirectIvarAssignment(CheckerManager &mgr) {
+ mgr.registerChecker<DirectIvarAssignment>();
+}
+
+bool ento::shouldRegisterDirectIvarAssignment(const LangOptions &LO) {
+ return true;
+}
+
void ento::registerDirectIvarAssignmentForAnnotatedFunctions(
CheckerManager &mgr) {
- mgr.registerChecker<DirectIvarAssignment>()->ShouldSkipMethod = &AttrFilter;
+ mgr.getChecker<DirectIvarAssignment>()->ShouldSkipMethod = &AttrFilter;
+}
+
+bool ento::shouldRegisterDirectIvarAssignmentForAnnotatedFunctions(
+ const LangOptions &LO) {
+ return true;
}
diff --git a/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp b/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp
index a220a0513e28..33e8fcd8af7b 100644
--- a/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp
@@ -1,9 +1,8 @@
//== DivZeroChecker.cpp - Division by zero checker --------------*- C++ -*--==//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -12,6 +11,7 @@
//
//===----------------------------------------------------------------------===//
+#include "Taint.h"
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
@@ -20,6 +20,7 @@
using namespace clang;
using namespace ento;
+using namespace taint;
namespace {
class DivZeroChecker : public Checker< check::PreStmt<BinaryOperator> > {
@@ -84,10 +85,10 @@ void DivZeroChecker::checkPreStmt(const BinaryOperator *B,
return;
}
- bool TaintedD = C.getState()->isTainted(*DV);
+ bool TaintedD = isTainted(C.getState(), *DV);
if ((stateNotZero && stateZero && TaintedD)) {
reportBug("Division by a tainted value, possibly zero", stateZero, C,
- llvm::make_unique<TaintBugVisitor>(*DV));
+ llvm::make_unique<taint::TaintBugVisitor>(*DV));
return;
}
@@ -99,3 +100,7 @@ void DivZeroChecker::checkPreStmt(const BinaryOperator *B,
void ento::registerDivZeroChecker(CheckerManager &mgr) {
mgr.registerChecker<DivZeroChecker>();
}
+
+bool ento::shouldRegisterDivZeroChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/DynamicTypeChecker.cpp b/lib/StaticAnalyzer/Checkers/DynamicTypeChecker.cpp
index 803d7ae22a71..4d979dc9f240 100644
--- a/lib/StaticAnalyzer/Checkers/DynamicTypeChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/DynamicTypeChecker.cpp
@@ -1,9 +1,8 @@
//== DynamicTypeChecker.cpp ------------------------------------ -*- C++ -*--=//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -206,3 +205,7 @@ void DynamicTypeChecker::checkPostStmt(const ImplicitCastExpr *CE,
void ento::registerDynamicTypeChecker(CheckerManager &mgr) {
mgr.registerChecker<DynamicTypeChecker>();
}
+
+bool ento::shouldRegisterDynamicTypeChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp b/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp
index 31d4eebe8968..3cfe4dc82a10 100644
--- a/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp
+++ b/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp
@@ -1,9 +1,8 @@
//===- DynamicTypePropagation.cpp ------------------------------*- C++ -*--===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -115,8 +114,8 @@ public:
void DynamicTypePropagation::checkDeadSymbols(SymbolReaper &SR,
CheckerContext &C) const {
ProgramStateRef State = C.getState();
- DynamicTypeMapImpl TypeMap = State->get<DynamicTypeMap>();
- for (DynamicTypeMapImpl::iterator I = TypeMap.begin(), E = TypeMap.end();
+ DynamicTypeMapTy TypeMap = State->get<DynamicTypeMap>();
+ for (DynamicTypeMapTy::iterator I = TypeMap.begin(), E = TypeMap.end();
I != E; ++I) {
if (!SR.isLiveRegion(I->first)) {
State = State->remove<DynamicTypeMap>(I->first);
@@ -145,7 +144,7 @@ static void recordFixedType(const MemRegion *Region, const CXXMethodDecl *MD,
QualType Ty = Ctx.getPointerType(Ctx.getRecordType(MD->getParent()));
ProgramStateRef State = C.getState();
- State = setDynamicTypeInfo(State, Region, Ty, /*CanBeSubclass=*/false);
+ State = setDynamicTypeInfo(State, Region, Ty, /*CanBeSubClassed=*/false);
C.addTransition(State);
}
@@ -308,7 +307,7 @@ void DynamicTypePropagation::checkPostStmt(const CXXNewExpr *NewE,
return;
C.addTransition(setDynamicTypeInfo(C.getState(), MR, NewE->getType(),
- /*CanBeSubclass=*/false));
+ /*CanBeSubClassed=*/false));
}
const ObjCObjectType *
@@ -888,7 +887,7 @@ void DynamicTypePropagation::checkPostObjCMessage(const ObjCMethodCall &M,
// MostSpecializedTypeArgsMap. We should only store anything in the later if
// the stored data differs from the one stored in the former.
State = setDynamicTypeInfo(State, RetRegion, ResultType,
- /*CanBeSubclass=*/true);
+ /*CanBeSubClassed=*/true);
Pred = C.addTransition(State);
}
@@ -988,11 +987,18 @@ DynamicTypePropagation::GenericsBugVisitor::VisitNode(const ExplodedNode *N,
/// Register checkers.
void ento::registerObjCGenericsChecker(CheckerManager &mgr) {
- DynamicTypePropagation *checker =
- mgr.registerChecker<DynamicTypePropagation>();
+ DynamicTypePropagation *checker = mgr.getChecker<DynamicTypePropagation>();
checker->CheckGenerics = true;
}
+bool ento::shouldRegisterObjCGenericsChecker(const LangOptions &LO) {
+ return true;
+}
+
void ento::registerDynamicTypePropagation(CheckerManager &mgr) {
mgr.registerChecker<DynamicTypePropagation>();
}
+
+bool ento::shouldRegisterDynamicTypePropagation(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/EnumCastOutOfRangeChecker.cpp b/lib/StaticAnalyzer/Checkers/EnumCastOutOfRangeChecker.cpp
index 4e51cffaa744..736d80ef9ec7 100644
--- a/lib/StaticAnalyzer/Checkers/EnumCastOutOfRangeChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/EnumCastOutOfRangeChecker.cpp
@@ -1,9 +1,8 @@
//===- EnumCastOutOfRangeChecker.cpp ---------------------------*- C++ -*--===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -126,3 +125,7 @@ void EnumCastOutOfRangeChecker::checkPreStmt(const CastExpr *CE,
void ento::registerEnumCastOutOfRangeChecker(CheckerManager &mgr) {
mgr.registerChecker<EnumCastOutOfRangeChecker>();
}
+
+bool ento::shouldRegisterEnumCastOutOfRangeChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp b/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp
index 2553f54bbcac..f23f784016d8 100644
--- a/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp
@@ -1,9 +1,8 @@
//==- ExprInspectionChecker.cpp - Used for regression tests ------*- C++ -*-==//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -12,6 +11,7 @@
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/IssueHash.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/ScopedPrinter.h"
@@ -54,7 +54,7 @@ class ExprInspectionChecker : public Checker<eval::Call, check::DeadSymbols,
ExplodedNode *N) const;
public:
- bool evalCall(const CallExpr *CE, CheckerContext &C) const;
+ bool evalCall(const CallEvent &Call, CheckerContext &C) const;
void checkDeadSymbols(SymbolReaper &SymReaper, CheckerContext &C) const;
void checkEndAnalysis(ExplodedGraph &G, BugReporter &BR,
ExprEngine &Eng) const;
@@ -64,8 +64,12 @@ public:
REGISTER_SET_WITH_PROGRAMSTATE(MarkedSymbols, SymbolRef)
REGISTER_MAP_WITH_PROGRAMSTATE(DenotedSymbols, SymbolRef, const StringLiteral *)
-bool ExprInspectionChecker::evalCall(const CallExpr *CE,
+bool ExprInspectionChecker::evalCall(const CallEvent &Call,
CheckerContext &C) const {
+ const auto *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr());
+ if (!CE)
+ return false;
+
// These checks should have no effect on the surrounding environment
// (globals should not be invalidated, etc), hence the use of evalCall.
FnCheck Handler = llvm::StringSwitch<FnCheck>(C.getCalleeName(CE))
@@ -409,3 +413,7 @@ void ExprInspectionChecker::analyzerExpress(const CallExpr *CE,
void ento::registerExprInspectionChecker(CheckerManager &Mgr) {
Mgr.registerChecker<ExprInspectionChecker>();
}
+
+bool ento::shouldRegisterExprInspectionChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp b/lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp
index 165a4e4490eb..94542be7dd7c 100644
--- a/lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp
@@ -1,9 +1,8 @@
//=== FixedAddressChecker.cpp - Fixed address usage checker ----*- C++ -*--===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -65,3 +64,7 @@ void FixedAddressChecker::checkPreStmt(const BinaryOperator *B,
void ento::registerFixedAddressChecker(CheckerManager &mgr) {
mgr.registerChecker<FixedAddressChecker>();
}
+
+bool ento::shouldRegisterFixedAddressChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/GCDAntipatternChecker.cpp b/lib/StaticAnalyzer/Checkers/GCDAntipatternChecker.cpp
index 248b9c3f7693..d471c23b83bf 100644
--- a/lib/StaticAnalyzer/Checkers/GCDAntipatternChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/GCDAntipatternChecker.cpp
@@ -1,9 +1,8 @@
//===- GCDAntipatternChecker.cpp ---------------------------------*- C++ -*-==//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -197,7 +196,7 @@ static void emitDiagnostics(const BoundNodes &Nodes,
ADC->getDecl(),
Checker,
/*Name=*/"GCD performance anti-pattern",
- /*Category=*/"Performance",
+ /*BugCategory=*/"Performance",
OS.str(),
PathDiagnosticLocation::createBegin(SW, BR.getSourceManager(), ADC),
SW->getSourceRange());
@@ -222,8 +221,12 @@ void GCDAntipatternChecker::checkASTCodeBody(const Decl *D,
emitDiagnostics(Match, "group", BR, ADC, this);
}
-}
+} // end of anonymous namespace
void ento::registerGCDAntipattern(CheckerManager &Mgr) {
Mgr.registerChecker<GCDAntipatternChecker>();
}
+
+bool ento::shouldRegisterGCDAntipattern(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/GTestChecker.cpp b/lib/StaticAnalyzer/Checkers/GTestChecker.cpp
index 818716dd6070..f4308f510f0b 100644
--- a/lib/StaticAnalyzer/Checkers/GTestChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/GTestChecker.cpp
@@ -1,9 +1,8 @@
//==- GTestChecker.cpp - Model gtest API --*- C++ -*-==//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -289,11 +288,11 @@ ProgramStateRef GTestChecker::assumeValuesEqual(SVal Val1, SVal Val2,
}
void ento::registerGTestChecker(CheckerManager &Mgr) {
- const LangOptions &LangOpts = Mgr.getLangOpts();
+ Mgr.registerChecker<GTestChecker>();
+}
+
+bool ento::shouldRegisterGTestChecker(const LangOptions &LO) {
// gtest is a C++ API so there is no sense running the checker
// if not compiling for C++.
- if (!LangOpts.CPlusPlus)
- return;
-
- Mgr.registerChecker<GTestChecker>();
+ return LO.CPlusPlus;
}
diff --git a/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp b/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp
index 32fed202d3ab..d3ab98033236 100644
--- a/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp
@@ -1,9 +1,8 @@
//== GenericTaintChecker.cpp ----------------------------------- -*- C++ -*--=//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -14,6 +13,8 @@
// aggressively, even if the involved symbols are under constrained.
//
//===----------------------------------------------------------------------===//
+
+#include "Taint.h"
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/Attr.h"
#include "clang/Basic/Builtins.h"
@@ -23,9 +24,12 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
#include <climits>
+#include <initializer_list>
+#include <utility>
using namespace clang;
using namespace ento;
+using namespace taint;
namespace {
class GenericTaintChecker
@@ -40,13 +44,16 @@ public:
void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
+ void printState(raw_ostream &Out, ProgramStateRef State,
+ const char *NL, const char *Sep) const override;
+
private:
static const unsigned InvalidArgIndex = UINT_MAX;
/// Denotes the return vale.
static const unsigned ReturnValueIndex = UINT_MAX - 1;
mutable std::unique_ptr<BugType> BT;
- inline void initBugType() const {
+ void initBugType() const {
if (!BT)
BT.reset(new BugType(this, "Use of Untrusted Data", "Untrusted Data"));
}
@@ -61,9 +68,6 @@ private:
/// Propagate taint generated at pre-visit.
bool propagateFromPre(const CallExpr *CE, CheckerContext &C) const;
- /// Add taint sources on a post visit.
- void addSourcesPost(const CallExpr *CE, CheckerContext &C) const;
-
/// Check if the region the expression evaluates to is the standard input,
/// and thus, is tainted.
static bool isStdin(const Expr *E, CheckerContext &C);
@@ -71,16 +75,6 @@ private:
/// Given a pointer argument, return the value it points to.
static Optional<SVal> getPointedToSVal(CheckerContext &C, const Expr *Arg);
- /// Functions defining the attack surface.
- typedef ProgramStateRef (GenericTaintChecker::*FnCheck)(
- const CallExpr *, CheckerContext &C) const;
- ProgramStateRef postScanf(const CallExpr *CE, CheckerContext &C) const;
- ProgramStateRef postSocket(const CallExpr *CE, CheckerContext &C) const;
- ProgramStateRef postRetTaint(const CallExpr *CE, CheckerContext &C) const;
-
- /// Taint the scanned input if the file is tainted.
- ProgramStateRef preFscanf(const CallExpr *CE, CheckerContext &C) const;
-
/// Check for CWE-134: Uncontrolled Format String.
static const char MsgUncontrolledFormatString[];
bool checkUncontrolledFormatString(const CallExpr *CE,
@@ -103,7 +97,7 @@ private:
bool generateReportIfTainted(const Expr *E, const char Msg[],
CheckerContext &C) const;
- typedef SmallVector<unsigned, 2> ArgVector;
+ using ArgVector = SmallVector<unsigned, 2>;
/// A struct used to specify taint propagation rules for a function.
///
@@ -115,61 +109,72 @@ private:
/// ReturnValueIndex is added to the dst list, the return value will be
/// tainted.
struct TaintPropagationRule {
+ enum class VariadicType { None, Src, Dst };
+
+ using PropagationFuncType = bool (*)(bool IsTainted, const CallExpr *,
+ CheckerContext &C);
+
/// List of arguments which can be taint sources and should be checked.
ArgVector SrcArgs;
/// List of arguments which should be tainted on function return.
ArgVector DstArgs;
- // TODO: Check if using other data structures would be more optimal.
-
- TaintPropagationRule() {}
-
- TaintPropagationRule(unsigned SArg, unsigned DArg, bool TaintRet = false) {
- SrcArgs.push_back(SArg);
- DstArgs.push_back(DArg);
- if (TaintRet)
- DstArgs.push_back(ReturnValueIndex);
- }
-
- TaintPropagationRule(unsigned SArg1, unsigned SArg2, unsigned DArg,
- bool TaintRet = false) {
- SrcArgs.push_back(SArg1);
- SrcArgs.push_back(SArg2);
- DstArgs.push_back(DArg);
- if (TaintRet)
- DstArgs.push_back(ReturnValueIndex);
- }
+ /// Index for the first variadic parameter if exist.
+ unsigned VariadicIndex;
+ /// Show when a function has variadic parameters. If it has, it marks all
+ /// of them as source or destination.
+ VariadicType VarType;
+ /// Special function for tainted source determination. If defined, it can
+ /// override the default behavior.
+ PropagationFuncType PropagationFunc;
+
+ TaintPropagationRule()
+ : VariadicIndex(InvalidArgIndex), VarType(VariadicType::None),
+ PropagationFunc(nullptr) {}
+
+ TaintPropagationRule(std::initializer_list<unsigned> &&Src,
+ std::initializer_list<unsigned> &&Dst,
+ VariadicType Var = VariadicType::None,
+ unsigned VarIndex = InvalidArgIndex,
+ PropagationFuncType Func = nullptr)
+ : SrcArgs(std::move(Src)), DstArgs(std::move(Dst)),
+ VariadicIndex(VarIndex), VarType(Var), PropagationFunc(Func) {}
/// Get the propagation rule for a given function.
static TaintPropagationRule
getTaintPropagationRule(const FunctionDecl *FDecl, StringRef Name,
CheckerContext &C);
- inline void addSrcArg(unsigned A) { SrcArgs.push_back(A); }
- inline void addDstArg(unsigned A) { DstArgs.push_back(A); }
+ void addSrcArg(unsigned A) { SrcArgs.push_back(A); }
+ void addDstArg(unsigned A) { DstArgs.push_back(A); }
- inline bool isNull() const { return SrcArgs.empty(); }
+ bool isNull() const {
+ return SrcArgs.empty() && DstArgs.empty() &&
+ VariadicType::None == VarType;
+ }
- inline bool isDestinationArgument(unsigned ArgNum) const {
- return (std::find(DstArgs.begin(), DstArgs.end(), ArgNum) !=
- DstArgs.end());
+ bool isDestinationArgument(unsigned ArgNum) const {
+ return (llvm::find(DstArgs, ArgNum) != DstArgs.end());
}
- static inline bool isTaintedOrPointsToTainted(const Expr *E,
- ProgramStateRef State,
- CheckerContext &C) {
- if (State->isTainted(E, C.getLocationContext()) || isStdin(E, C))
+ static bool isTaintedOrPointsToTainted(const Expr *E, ProgramStateRef State,
+ CheckerContext &C) {
+ if (isTainted(State, E, C.getLocationContext()) || isStdin(E, C))
return true;
if (!E->getType().getTypePtr()->isPointerType())
return false;
Optional<SVal> V = getPointedToSVal(C, E);
- return (V && State->isTainted(*V));
+ return (V && isTainted(State, *V));
}
/// Pre-process a function which propagates taint according to the
/// taint rule.
ProgramStateRef process(const CallExpr *CE, CheckerContext &C) const;
+
+ // Functions for custom taintedness propagation.
+ static bool postSocket(bool IsTainted, const CallExpr *CE,
+ CheckerContext &C);
};
};
@@ -187,8 +192,7 @@ const char GenericTaintChecker::MsgSanitizeSystemArgs[] =
const char GenericTaintChecker::MsgTaintedBufferSize[] =
"Untrusted data is used to specify the buffer size "
"(CERT/STR31-C. Guarantee that storage for strings has sufficient space "
- "for "
- "character data and the null terminator)";
+ "for character data and the null terminator)";
} // end of anonymous namespace
@@ -207,24 +211,42 @@ GenericTaintChecker::TaintPropagationRule::getTaintPropagationRule(
// Check for exact name match for functions without builtin substitutes.
TaintPropagationRule Rule =
llvm::StringSwitch<TaintPropagationRule>(Name)
- .Case("atoi", TaintPropagationRule(0, ReturnValueIndex))
- .Case("atol", TaintPropagationRule(0, ReturnValueIndex))
- .Case("atoll", TaintPropagationRule(0, ReturnValueIndex))
- .Case("getc", TaintPropagationRule(0, ReturnValueIndex))
- .Case("fgetc", TaintPropagationRule(0, ReturnValueIndex))
- .Case("getc_unlocked", TaintPropagationRule(0, ReturnValueIndex))
- .Case("getw", TaintPropagationRule(0, ReturnValueIndex))
- .Case("toupper", TaintPropagationRule(0, ReturnValueIndex))
- .Case("tolower", TaintPropagationRule(0, ReturnValueIndex))
- .Case("strchr", TaintPropagationRule(0, ReturnValueIndex))
- .Case("strrchr", TaintPropagationRule(0, ReturnValueIndex))
- .Case("read", TaintPropagationRule(0, 2, 1, true))
- .Case("pread", TaintPropagationRule(InvalidArgIndex, 1, true))
- .Case("gets", TaintPropagationRule(InvalidArgIndex, 0, true))
- .Case("fgets", TaintPropagationRule(2, 0, true))
- .Case("getline", TaintPropagationRule(2, 0))
- .Case("getdelim", TaintPropagationRule(3, 0))
- .Case("fgetln", TaintPropagationRule(0, ReturnValueIndex))
+ // Source functions
+ // TODO: Add support for vfscanf & family.
+ .Case("fdopen", TaintPropagationRule({}, {ReturnValueIndex}))
+ .Case("fopen", TaintPropagationRule({}, {ReturnValueIndex}))
+ .Case("freopen", TaintPropagationRule({}, {ReturnValueIndex}))
+ .Case("getch", TaintPropagationRule({}, {ReturnValueIndex}))
+ .Case("getchar", TaintPropagationRule({}, {ReturnValueIndex}))
+ .Case("getchar_unlocked", TaintPropagationRule({}, {ReturnValueIndex}))
+ .Case("getenv", TaintPropagationRule({}, {ReturnValueIndex}))
+ .Case("gets", TaintPropagationRule({}, {0, ReturnValueIndex}))
+ .Case("scanf", TaintPropagationRule({}, {}, VariadicType::Dst, 1))
+ .Case("socket",
+ TaintPropagationRule({}, {ReturnValueIndex}, VariadicType::None,
+ InvalidArgIndex,
+ &TaintPropagationRule::postSocket))
+ .Case("wgetch", TaintPropagationRule({}, {ReturnValueIndex}))
+ // Propagating functions
+ .Case("atoi", TaintPropagationRule({0}, {ReturnValueIndex}))
+ .Case("atol", TaintPropagationRule({0}, {ReturnValueIndex}))
+ .Case("atoll", TaintPropagationRule({0}, {ReturnValueIndex}))
+ .Case("fgetc", TaintPropagationRule({0}, {ReturnValueIndex}))
+ .Case("fgetln", TaintPropagationRule({0}, {ReturnValueIndex}))
+ .Case("fgets", TaintPropagationRule({2}, {0, ReturnValueIndex}))
+ .Case("fscanf", TaintPropagationRule({0}, {}, VariadicType::Dst, 2))
+ .Case("getc", TaintPropagationRule({0}, {ReturnValueIndex}))
+ .Case("getc_unlocked", TaintPropagationRule({0}, {ReturnValueIndex}))
+ .Case("getdelim", TaintPropagationRule({3}, {0}))
+ .Case("getline", TaintPropagationRule({2}, {0}))
+ .Case("getw", TaintPropagationRule({0}, {ReturnValueIndex}))
+ .Case("pread",
+ TaintPropagationRule({0, 1, 2, 3}, {1, ReturnValueIndex}))
+ .Case("read", TaintPropagationRule({0, 2}, {1, ReturnValueIndex}))
+ .Case("strchr", TaintPropagationRule({0}, {ReturnValueIndex}))
+ .Case("strrchr", TaintPropagationRule({0}, {ReturnValueIndex}))
+ .Case("tolower", TaintPropagationRule({0}, {ReturnValueIndex}))
+ .Case("toupper", TaintPropagationRule({0}, {ReturnValueIndex}))
.Default(TaintPropagationRule());
if (!Rule.isNull())
@@ -239,12 +261,12 @@ GenericTaintChecker::TaintPropagationRule::getTaintPropagationRule(
case Builtin::BImemmove:
case Builtin::BIstrncpy:
case Builtin::BIstrncat:
- return TaintPropagationRule(1, 2, 0, true);
+ return TaintPropagationRule({1, 2}, {0, ReturnValueIndex});
case Builtin::BIstrlcpy:
case Builtin::BIstrlcat:
- return TaintPropagationRule(1, 2, 0, false);
+ return TaintPropagationRule({1, 2}, {0});
case Builtin::BIstrndup:
- return TaintPropagationRule(0, 1, ReturnValueIndex);
+ return TaintPropagationRule({0, 1}, {ReturnValueIndex});
default:
break;
@@ -252,20 +274,23 @@ GenericTaintChecker::TaintPropagationRule::getTaintPropagationRule(
// Process all other functions which could be defined as builtins.
if (Rule.isNull()) {
- if (C.isCLibraryFunction(FDecl, "snprintf") ||
- C.isCLibraryFunction(FDecl, "sprintf"))
- return TaintPropagationRule(InvalidArgIndex, 0, true);
+ if (C.isCLibraryFunction(FDecl, "snprintf"))
+ return TaintPropagationRule({1}, {0, ReturnValueIndex}, VariadicType::Src,
+ 3);
+ else if (C.isCLibraryFunction(FDecl, "sprintf"))
+ return TaintPropagationRule({}, {0, ReturnValueIndex}, VariadicType::Src,
+ 2);
else if (C.isCLibraryFunction(FDecl, "strcpy") ||
C.isCLibraryFunction(FDecl, "stpcpy") ||
C.isCLibraryFunction(FDecl, "strcat"))
- return TaintPropagationRule(1, 0, true);
+ return TaintPropagationRule({1}, {0, ReturnValueIndex});
else if (C.isCLibraryFunction(FDecl, "bcopy"))
- return TaintPropagationRule(0, 2, 1, false);
+ return TaintPropagationRule({0, 2}, {1});
else if (C.isCLibraryFunction(FDecl, "strdup") ||
C.isCLibraryFunction(FDecl, "strdupa"))
- return TaintPropagationRule(0, ReturnValueIndex);
+ return TaintPropagationRule({0}, {ReturnValueIndex});
else if (C.isCLibraryFunction(FDecl, "wcsdup"))
- return TaintPropagationRule(0, ReturnValueIndex);
+ return TaintPropagationRule({0}, {ReturnValueIndex});
}
// Skipping the following functions, since they might be used for cleansing
@@ -277,19 +302,26 @@ GenericTaintChecker::TaintPropagationRule::getTaintPropagationRule(
void GenericTaintChecker::checkPreStmt(const CallExpr *CE,
CheckerContext &C) const {
- // Check for errors first.
+ // Check for taintedness related errors first: system call, uncontrolled
+ // format string, tainted buffer size.
if (checkPre(CE, C))
return;
- // Add taint second.
+ // Marks the function's arguments and/or return value tainted if it present in
+ // the list.
addSourcesPre(CE, C);
}
void GenericTaintChecker::checkPostStmt(const CallExpr *CE,
CheckerContext &C) const {
- if (propagateFromPre(CE, C))
- return;
- addSourcesPost(CE, C);
+ // Set the marked values as tainted. The return value only accessible from
+ // checkPostStmt.
+ propagateFromPre(CE, C);
+}
+
+void GenericTaintChecker::printState(raw_ostream &Out, ProgramStateRef State,
+ const char *NL, const char *Sep) const {
+ printTaint(State, Out, NL, Sep);
}
void GenericTaintChecker::addSourcesPre(const CallExpr *CE,
@@ -314,13 +346,6 @@ void GenericTaintChecker::addSourcesPre(const CallExpr *CE,
return;
}
- // Otherwise, check if we have custom pre-processing implemented.
- FnCheck evalFunction = llvm::StringSwitch<FnCheck>(Name)
- .Case("fscanf", &GenericTaintChecker::preFscanf)
- .Default(nullptr);
- // Check and evaluate the call.
- if (evalFunction)
- State = (this->*evalFunction)(CE, C);
if (!State)
return;
C.addTransition(State);
@@ -337,14 +362,10 @@ bool GenericTaintChecker::propagateFromPre(const CallExpr *CE,
if (TaintArgs.isEmpty())
return false;
- for (llvm::ImmutableSet<unsigned>::iterator I = TaintArgs.begin(),
- E = TaintArgs.end();
- I != E; ++I) {
- unsigned ArgNum = *I;
-
+ for (unsigned ArgNum : TaintArgs) {
// Special handling for the tainted return value.
if (ArgNum == ReturnValueIndex) {
- State = State->addTaint(CE, C.getLocationContext());
+ State = addTaint(State, CE, C.getLocationContext());
continue;
}
@@ -355,7 +376,7 @@ bool GenericTaintChecker::propagateFromPre(const CallExpr *CE,
const Expr *Arg = CE->getArg(ArgNum);
Optional<SVal> V = getPointedToSVal(C, Arg);
if (V)
- State = State->addTaint(*V);
+ State = addTaint(State, *V);
}
// Clear up the taint info from the state.
@@ -368,43 +389,6 @@ bool GenericTaintChecker::propagateFromPre(const CallExpr *CE,
return false;
}
-void GenericTaintChecker::addSourcesPost(const CallExpr *CE,
- CheckerContext &C) const {
- // Define the attack surface.
- // Set the evaluation function by switching on the callee name.
- const FunctionDecl *FDecl = C.getCalleeDecl(CE);
- if (!FDecl || FDecl->getKind() != Decl::Function)
- return;
-
- StringRef Name = C.getCalleeName(FDecl);
- if (Name.empty())
- return;
- FnCheck evalFunction =
- llvm::StringSwitch<FnCheck>(Name)
- .Case("scanf", &GenericTaintChecker::postScanf)
- // TODO: Add support for vfscanf & family.
- .Case("getchar", &GenericTaintChecker::postRetTaint)
- .Case("getchar_unlocked", &GenericTaintChecker::postRetTaint)
- .Case("getenv", &GenericTaintChecker::postRetTaint)
- .Case("fopen", &GenericTaintChecker::postRetTaint)
- .Case("fdopen", &GenericTaintChecker::postRetTaint)
- .Case("freopen", &GenericTaintChecker::postRetTaint)
- .Case("getch", &GenericTaintChecker::postRetTaint)
- .Case("wgetch", &GenericTaintChecker::postRetTaint)
- .Case("socket", &GenericTaintChecker::postSocket)
- .Default(nullptr);
-
- // If the callee isn't defined, it is not of security concern.
- // Check and evaluate the call.
- ProgramStateRef State = nullptr;
- if (evalFunction)
- State = (this->*evalFunction)(CE, C);
- if (!State)
- return;
-
- C.addTransition(State);
-}
-
bool GenericTaintChecker::checkPre(const CallExpr *CE,
CheckerContext &C) const {
@@ -459,54 +443,31 @@ GenericTaintChecker::TaintPropagationRule::process(const CallExpr *CE,
ProgramStateRef State = C.getState();
// Check for taint in arguments.
- bool IsTainted = false;
- for (ArgVector::const_iterator I = SrcArgs.begin(), E = SrcArgs.end(); I != E;
- ++I) {
- unsigned ArgNum = *I;
-
- if (ArgNum == InvalidArgIndex) {
- // Check if any of the arguments is tainted, but skip the
- // destination arguments.
- for (unsigned int i = 0; i < CE->getNumArgs(); ++i) {
- if (isDestinationArgument(i))
- continue;
- if ((IsTainted = isTaintedOrPointsToTainted(CE->getArg(i), State, C)))
- break;
- }
- break;
- }
-
- if (CE->getNumArgs() < (ArgNum + 1))
+ bool IsTainted = true;
+ for (unsigned ArgNum : SrcArgs) {
+ if (ArgNum >= CE->getNumArgs())
return State;
if ((IsTainted = isTaintedOrPointsToTainted(CE->getArg(ArgNum), State, C)))
break;
}
+
+ // Check for taint in variadic arguments.
+ if (!IsTainted && VariadicType::Src == VarType) {
+ // Check if any of the arguments is tainted
+ for (unsigned int i = VariadicIndex; i < CE->getNumArgs(); ++i) {
+ if ((IsTainted = isTaintedOrPointsToTainted(CE->getArg(i), State, C)))
+ break;
+ }
+ }
+
+ if (PropagationFunc)
+ IsTainted = PropagationFunc(IsTainted, CE, C);
+
if (!IsTainted)
return State;
// Mark the arguments which should be tainted after the function returns.
- for (ArgVector::const_iterator I = DstArgs.begin(), E = DstArgs.end(); I != E;
- ++I) {
- unsigned ArgNum = *I;
-
- // Should we mark all arguments as tainted?
- if (ArgNum == InvalidArgIndex) {
- // For all pointer and references that were passed in:
- // If they are not pointing to const data, mark data as tainted.
- // TODO: So far we are just going one level down; ideally we'd need to
- // recurse here.
- for (unsigned int i = 0; i < CE->getNumArgs(); ++i) {
- const Expr *Arg = CE->getArg(i);
- // Process pointer argument.
- const Type *ArgTy = Arg->getType().getTypePtr();
- QualType PType = ArgTy->getPointeeType();
- if ((!PType.isNull() && !PType.isConstQualified()) ||
- (ArgTy->isReferenceType() && !Arg->getType().isConstQualified()))
- State = State->add<TaintArgsOnPostVisit>(i);
- }
- continue;
- }
-
+ for (unsigned ArgNum : DstArgs) {
// Should mark the return value?
if (ArgNum == ReturnValueIndex) {
State = State->add<TaintArgsOnPostVisit>(ReturnValueIndex);
@@ -518,66 +479,38 @@ GenericTaintChecker::TaintPropagationRule::process(const CallExpr *CE,
State = State->add<TaintArgsOnPostVisit>(ArgNum);
}
- return State;
-}
-
-// If argument 0 (file descriptor) is tainted, all arguments except for arg 0
-// and arg 1 should get taint.
-ProgramStateRef GenericTaintChecker::preFscanf(const CallExpr *CE,
- CheckerContext &C) const {
- assert(CE->getNumArgs() >= 2);
- ProgramStateRef State = C.getState();
-
- // Check is the file descriptor is tainted.
- if (State->isTainted(CE->getArg(0), C.getLocationContext()) ||
- isStdin(CE->getArg(0), C)) {
- // All arguments except for the first two should get taint.
- for (unsigned int i = 2; i < CE->getNumArgs(); ++i)
- State = State->add<TaintArgsOnPostVisit>(i);
- return State;
+ // Mark all variadic arguments tainted if present.
+ if (VariadicType::Dst == VarType) {
+ // For all pointer and references that were passed in:
+ // If they are not pointing to const data, mark data as tainted.
+ // TODO: So far we are just going one level down; ideally we'd need to
+ // recurse here.
+ for (unsigned int i = VariadicIndex; i < CE->getNumArgs(); ++i) {
+ const Expr *Arg = CE->getArg(i);
+ // Process pointer argument.
+ const Type *ArgTy = Arg->getType().getTypePtr();
+ QualType PType = ArgTy->getPointeeType();
+ if ((!PType.isNull() && !PType.isConstQualified()) ||
+ (ArgTy->isReferenceType() && !Arg->getType().isConstQualified()))
+ State = State->add<TaintArgsOnPostVisit>(i);
+ }
}
- return nullptr;
+ return State;
}
// If argument 0(protocol domain) is network, the return value should get taint.
-ProgramStateRef GenericTaintChecker::postSocket(const CallExpr *CE,
- CheckerContext &C) const {
- ProgramStateRef State = C.getState();
- if (CE->getNumArgs() < 3)
- return State;
-
+bool GenericTaintChecker::TaintPropagationRule::postSocket(bool /*IsTainted*/,
+ const CallExpr *CE,
+ CheckerContext &C) {
SourceLocation DomLoc = CE->getArg(0)->getExprLoc();
StringRef DomName = C.getMacroNameOrSpelling(DomLoc);
// White list the internal communication protocols.
if (DomName.equals("AF_SYSTEM") || DomName.equals("AF_LOCAL") ||
DomName.equals("AF_UNIX") || DomName.equals("AF_RESERVED_36"))
- return State;
- State = State->addTaint(CE, C.getLocationContext());
- return State;
-}
-
-ProgramStateRef GenericTaintChecker::postScanf(const CallExpr *CE,
- CheckerContext &C) const {
- ProgramStateRef State = C.getState();
- if (CE->getNumArgs() < 2)
- return State;
-
- // All arguments except for the very first one should get taint.
- for (unsigned int i = 1; i < CE->getNumArgs(); ++i) {
- // The arguments are pointer arguments. The data they are pointing at is
- // tainted after the call.
- const Expr *Arg = CE->getArg(i);
- Optional<SVal> V = getPointedToSVal(C, Arg);
- if (V)
- State = State->addTaint(*V);
- }
- return State;
-}
+ return false;
-ProgramStateRef GenericTaintChecker::postRetTaint(const CallExpr *CE,
- CheckerContext &C) const {
- return C.getState()->addTaint(CE, C.getLocationContext());
+ return true;
}
bool GenericTaintChecker::isStdin(const Expr *E, CheckerContext &C) {
@@ -603,14 +536,14 @@ bool GenericTaintChecker::isStdin(const Expr *E, CheckerContext &C) {
// This region corresponds to a declaration, find out if it's a global/extern
// variable named stdin with the proper type.
- if (const VarDecl *D = dyn_cast_or_null<VarDecl>(DeclReg->getDecl())) {
+ if (const auto *D = dyn_cast_or_null<VarDecl>(DeclReg->getDecl())) {
D = D->getCanonicalDecl();
- if ((D->getName().find("stdin") != StringRef::npos) && D->isExternC())
- if (const PointerType *PtrTy =
- dyn_cast<PointerType>(D->getType().getTypePtr()))
- if (PtrTy->getPointeeType().getCanonicalType() ==
- C.getASTContext().getFILEType().getCanonicalType())
- return true;
+ if ((D->getName().find("stdin") != StringRef::npos) && D->isExternC()) {
+ const auto *PtrTy = dyn_cast<PointerType>(D->getType().getTypePtr());
+ if (PtrTy && PtrTy->getPointeeType().getCanonicalType() ==
+ C.getASTContext().getFILEType().getCanonicalType())
+ return true;
+ }
}
return false;
}
@@ -648,9 +581,9 @@ bool GenericTaintChecker::generateReportIfTainted(const Expr *E,
ProgramStateRef State = C.getState();
Optional<SVal> PointedToSVal = getPointedToSVal(C, E);
SVal TaintedSVal;
- if (PointedToSVal && State->isTainted(*PointedToSVal))
+ if (PointedToSVal && isTainted(State, *PointedToSVal))
TaintedSVal = *PointedToSVal;
- else if (State->isTainted(E, C.getLocationContext()))
+ else if (isTainted(State, E, C.getLocationContext()))
TaintedSVal = C.getSVal(E);
else
return false;
@@ -746,3 +679,7 @@ bool GenericTaintChecker::checkTaintedBufferSize(const CallExpr *CE,
void ento::registerGenericTaintChecker(CheckerManager &mgr) {
mgr.registerChecker<GenericTaintChecker>();
}
+
+bool ento::shouldRegisterGenericTaintChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/IdenticalExprChecker.cpp b/lib/StaticAnalyzer/Checkers/IdenticalExprChecker.cpp
index 4c2a229428d9..cc2cfb774227 100644
--- a/lib/StaticAnalyzer/Checkers/IdenticalExprChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/IdenticalExprChecker.cpp
@@ -1,9 +1,8 @@
//== IdenticalExprChecker.cpp - Identical expression checker----------------==//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
@@ -116,7 +115,7 @@ bool FindIdenticalExprVisitor::VisitIfStmt(const IfStmt *I) {
if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(Stmt1)) {
if (!CS->body_empty()) {
const IfStmt *InnerIf = dyn_cast<IfStmt>(*CS->body_begin());
- if (InnerIf && isIdenticalStmt(AC->getASTContext(), I->getCond(), InnerIf->getCond(), /*ignoreSideEffects=*/ false)) {
+ if (InnerIf && isIdenticalStmt(AC->getASTContext(), I->getCond(), InnerIf->getCond(), /*IgnoreSideEffects=*/ false)) {
PathDiagnosticLocation ELoc(InnerIf->getCond(), BR.getSourceManager(), AC);
BR.EmitBasicReport(AC->getDecl(), Checker, "Identical conditions",
categories::LogicError,
@@ -513,3 +512,7 @@ public:
void ento::registerIdenticalExprChecker(CheckerManager &Mgr) {
Mgr.registerChecker<FindIdenticalExprChecker>();
}
+
+bool ento::shouldRegisterIdenticalExprChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/InnerPointerChecker.cpp b/lib/StaticAnalyzer/Checkers/InnerPointerChecker.cpp
index a4f47d727a8f..e3270f1f7be2 100644
--- a/lib/StaticAnalyzer/Checkers/InnerPointerChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/InnerPointerChecker.cpp
@@ -1,9 +1,8 @@
//=== InnerPointerChecker.cpp -------------------------------------*- C++ -*--//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -310,3 +309,7 @@ void ento::registerInnerPointerChecker(CheckerManager &Mgr) {
registerInnerPointerCheckerAux(Mgr);
Mgr.registerChecker<InnerPointerChecker>();
}
+
+bool ento::shouldRegisterInnerPointerChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/InterCheckerAPI.h b/lib/StaticAnalyzer/Checkers/InterCheckerAPI.h
index 81c95a4813a6..9642588d6a41 100644
--- a/lib/StaticAnalyzer/Checkers/InterCheckerAPI.h
+++ b/lib/StaticAnalyzer/Checkers/InterCheckerAPI.h
@@ -1,9 +1,8 @@
//==--- InterCheckerAPI.h ---------------------------------------*- C++ -*-==//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// This file allows introduction of checker dependencies. It contains APIs for
@@ -17,9 +16,6 @@ class CheckerManager;
namespace ento {
-/// Register the checker which evaluates CString API calls.
-void registerCStringCheckerBasic(CheckerManager &Mgr);
-
/// Register the part of MallocChecker connected to InnerPointerChecker.
void registerInnerPointerCheckerAux(CheckerManager &Mgr);
diff --git a/lib/StaticAnalyzer/Checkers/IteratorChecker.cpp b/lib/StaticAnalyzer/Checkers/IteratorChecker.cpp
index e719e19d68e9..6f1060b5f26d 100644
--- a/lib/StaticAnalyzer/Checkers/IteratorChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/IteratorChecker.cpp
@@ -1,9 +1,8 @@
//===-- IteratorChecker.cpp ---------------------------------------*- C++ -*--//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -134,8 +133,6 @@ public:
}
};
-typedef llvm::PointerUnion<const MemRegion *, SymbolRef> RegionOrSymbol;
-
// Structure to record the symbolic begin and end position of a container
struct ContainerData {
private:
@@ -173,41 +170,21 @@ public:
}
};
-// Structure fo recording iterator comparisons. We needed to retrieve the
-// original comparison expression in assumptions.
-struct IteratorComparison {
-private:
- RegionOrSymbol Left, Right;
- bool Equality;
-
-public:
- IteratorComparison(RegionOrSymbol L, RegionOrSymbol R, bool Eq)
- : Left(L), Right(R), Equality(Eq) {}
-
- RegionOrSymbol getLeft() const { return Left; }
- RegionOrSymbol getRight() const { return Right; }
- bool isEquality() const { return Equality; }
- bool operator==(const IteratorComparison &X) const {
- return Left == X.Left && Right == X.Right && Equality == X.Equality;
- }
- bool operator!=(const IteratorComparison &X) const {
- return Left != X.Left || Right != X.Right || Equality != X.Equality;
- }
- void Profile(llvm::FoldingSetNodeID &ID) const { ID.AddInteger(Equality); }
-};
-
class IteratorChecker
: public Checker<check::PreCall, check::PostCall,
check::PostStmt<MaterializeTemporaryExpr>, check::Bind,
- check::LiveSymbols, check::DeadSymbols,
- eval::Assume> {
+ check::LiveSymbols, check::DeadSymbols> {
std::unique_ptr<BugType> OutOfRangeBugType;
std::unique_ptr<BugType> MismatchedBugType;
std::unique_ptr<BugType> InvalidatedBugType;
- void handleComparison(CheckerContext &C, const SVal &RetVal, const SVal &LVal,
- const SVal &RVal, OverloadedOperatorKind Op) const;
+ void handleComparison(CheckerContext &C, const Expr *CE, const SVal &RetVal,
+ const SVal &LVal, const SVal &RVal,
+ OverloadedOperatorKind Op) const;
+ void processComparison(CheckerContext &C, ProgramStateRef State,
+ SymbolRef Sym1, SymbolRef Sym2, const SVal &RetVal,
+ OverloadedOperatorKind Op) const;
void verifyAccess(CheckerContext &C, const SVal &Val) const;
void verifyDereference(CheckerContext &C, const SVal &Val) const;
void handleIncrement(CheckerContext &C, const SVal &RetVal, const SVal &Iter,
@@ -282,8 +259,6 @@ public:
CheckerContext &C) const;
void checkLiveSymbols(ProgramStateRef State, SymbolReaper &SR) const;
void checkDeadSymbols(SymbolReaper &SR, CheckerContext &C) const;
- ProgramStateRef evalAssume(ProgramStateRef State, SVal Cond,
- bool Assumption) const;
};
} // namespace
@@ -293,9 +268,6 @@ REGISTER_MAP_WITH_PROGRAMSTATE(IteratorRegionMap, const MemRegion *,
REGISTER_MAP_WITH_PROGRAMSTATE(ContainerMap, const MemRegion *, ContainerData)
-REGISTER_MAP_WITH_PROGRAMSTATE(IteratorComparisonMap, const SymExpr *,
- IteratorComparison)
-
namespace {
bool isIteratorType(const QualType &Type);
@@ -325,40 +297,23 @@ bool isRandomIncrOrDecrOperator(OverloadedOperatorKind OK);
bool hasSubscriptOperator(ProgramStateRef State, const MemRegion *Reg);
bool frontModifiable(ProgramStateRef State, const MemRegion *Reg);
bool backModifiable(ProgramStateRef State, const MemRegion *Reg);
-BinaryOperator::Opcode getOpcode(const SymExpr *SE);
-const RegionOrSymbol getRegionOrSymbol(const SVal &Val);
-const ProgramStateRef processComparison(ProgramStateRef State,
- RegionOrSymbol LVal,
- RegionOrSymbol RVal, bool Equal);
-const ProgramStateRef saveComparison(ProgramStateRef State,
- const SymExpr *Condition, const SVal &LVal,
- const SVal &RVal, bool Eq);
-const IteratorComparison *loadComparison(ProgramStateRef State,
- const SymExpr *Condition);
SymbolRef getContainerBegin(ProgramStateRef State, const MemRegion *Cont);
SymbolRef getContainerEnd(ProgramStateRef State, const MemRegion *Cont);
ProgramStateRef createContainerBegin(ProgramStateRef State,
- const MemRegion *Cont,
- const SymbolRef Sym);
+ const MemRegion *Cont, const Expr *E,
+ QualType T, const LocationContext *LCtx,
+ unsigned BlockCount);
ProgramStateRef createContainerEnd(ProgramStateRef State, const MemRegion *Cont,
- const SymbolRef Sym);
+ const Expr *E, QualType T,
+ const LocationContext *LCtx,
+ unsigned BlockCount);
const IteratorPosition *getIteratorPosition(ProgramStateRef State,
const SVal &Val);
-const IteratorPosition *getIteratorPosition(ProgramStateRef State,
- RegionOrSymbol RegOrSym);
ProgramStateRef setIteratorPosition(ProgramStateRef State, const SVal &Val,
const IteratorPosition &Pos);
-ProgramStateRef setIteratorPosition(ProgramStateRef State,
- RegionOrSymbol RegOrSym,
- const IteratorPosition &Pos);
ProgramStateRef removeIteratorPosition(ProgramStateRef State, const SVal &Val);
-ProgramStateRef adjustIteratorPosition(ProgramStateRef State,
- RegionOrSymbol RegOrSym,
- const IteratorPosition &Pos, bool Equal);
-ProgramStateRef relateIteratorPositions(ProgramStateRef State,
- const IteratorPosition &Pos1,
- const IteratorPosition &Pos2,
- bool Equal);
+ProgramStateRef assumeNoOverflow(ProgramStateRef State, SymbolRef Sym,
+ long Scale);
ProgramStateRef invalidateAllIteratorPositions(ProgramStateRef State,
const MemRegion *Cont);
ProgramStateRef
@@ -384,6 +339,8 @@ ProgramStateRef reassignAllIteratorPositionsUnless(ProgramStateRef State,
ProgramStateRef rebaseSymbolInIteratorPositionsIf(
ProgramStateRef State, SValBuilder &SVB, SymbolRef OldSym,
SymbolRef NewSym, SymbolRef CondSym, BinaryOperator::Opcode Opc);
+ProgramStateRef relateSymbols(ProgramStateRef State, SymbolRef Sym1,
+ SymbolRef Sym2, bool Equal);
const ContainerData *getContainerData(ProgramStateRef State,
const MemRegion *Cont);
ProgramStateRef setContainerData(ProgramStateRef State, const MemRegion *Cont,
@@ -399,14 +356,14 @@ bool isZero(ProgramStateRef State, const NonLoc &Val);
IteratorChecker::IteratorChecker() {
OutOfRangeBugType.reset(
- new BugType(this, "Iterator out of range", "Misuse of STL APIs"));
- OutOfRangeBugType->setSuppressOnSink(true);
+ new BugType(this, "Iterator out of range", "Misuse of STL APIs",
+ /*SuppressOnSink=*/true));
MismatchedBugType.reset(
- new BugType(this, "Iterator(s) mismatched", "Misuse of STL APIs"));
- MismatchedBugType->setSuppressOnSink(true);
+ new BugType(this, "Iterator(s) mismatched", "Misuse of STL APIs",
+ /*SuppressOnSink=*/true));
InvalidatedBugType.reset(
- new BugType(this, "Iterator invalidated", "Misuse of STL APIs"));
- InvalidatedBugType->setSuppressOnSink(true);
+ new BugType(this, "Iterator invalidated", "Misuse of STL APIs",
+ /*SuppressOnSink=*/true));
}
void IteratorChecker::checkPreCall(const CallEvent &Call,
@@ -609,78 +566,123 @@ void IteratorChecker::checkPostCall(const CallEvent &Call,
const auto Op = Func->getOverloadedOperator();
if (isAssignmentOperator(Op)) {
const auto *InstCall = dyn_cast<CXXInstanceCall>(&Call);
- if (Func->getParamDecl(0)->getType()->isRValueReferenceType()) {
+ if (cast<CXXMethodDecl>(Func)->isMoveAssignmentOperator()) {
handleAssign(C, InstCall->getCXXThisVal(), Call.getOriginExpr(),
Call.getArgSVal(0));
- } else {
- handleAssign(C, InstCall->getCXXThisVal());
+ return;
}
+
+ handleAssign(C, InstCall->getCXXThisVal());
+ return;
} else if (isSimpleComparisonOperator(Op)) {
+ const auto *OrigExpr = Call.getOriginExpr();
+ if (!OrigExpr)
+ return;
+
if (const auto *InstCall = dyn_cast<CXXInstanceCall>(&Call)) {
- handleComparison(C, Call.getReturnValue(), InstCall->getCXXThisVal(),
- Call.getArgSVal(0), Op);
- } else {
- handleComparison(C, Call.getReturnValue(), Call.getArgSVal(0),
- Call.getArgSVal(1), Op);
+ handleComparison(C, OrigExpr, Call.getReturnValue(),
+ InstCall->getCXXThisVal(), Call.getArgSVal(0), Op);
+ return;
}
+
+ handleComparison(C, OrigExpr, Call.getReturnValue(), Call.getArgSVal(0),
+ Call.getArgSVal(1), Op);
+ return;
} else if (isRandomIncrOrDecrOperator(Func->getOverloadedOperator())) {
if (const auto *InstCall = dyn_cast<CXXInstanceCall>(&Call)) {
if (Call.getNumArgs() >= 1) {
handleRandomIncrOrDecr(C, Func->getOverloadedOperator(),
Call.getReturnValue(),
InstCall->getCXXThisVal(), Call.getArgSVal(0));
+ return;
}
} else {
if (Call.getNumArgs() >= 2) {
handleRandomIncrOrDecr(C, Func->getOverloadedOperator(),
Call.getReturnValue(), Call.getArgSVal(0),
Call.getArgSVal(1));
+ return;
}
}
} else if (isIncrementOperator(Func->getOverloadedOperator())) {
if (const auto *InstCall = dyn_cast<CXXInstanceCall>(&Call)) {
handleIncrement(C, Call.getReturnValue(), InstCall->getCXXThisVal(),
Call.getNumArgs());
- } else {
- handleIncrement(C, Call.getReturnValue(), Call.getArgSVal(0),
- Call.getNumArgs());
+ return;
}
+
+ handleIncrement(C, Call.getReturnValue(), Call.getArgSVal(0),
+ Call.getNumArgs());
+ return;
} else if (isDecrementOperator(Func->getOverloadedOperator())) {
if (const auto *InstCall = dyn_cast<CXXInstanceCall>(&Call)) {
handleDecrement(C, Call.getReturnValue(), InstCall->getCXXThisVal(),
Call.getNumArgs());
- } else {
- handleDecrement(C, Call.getReturnValue(), Call.getArgSVal(0),
- Call.getNumArgs());
+ return;
}
+
+ handleDecrement(C, Call.getReturnValue(), Call.getArgSVal(0),
+ Call.getNumArgs());
+ return;
}
} else {
if (const auto *InstCall = dyn_cast<CXXInstanceCall>(&Call)) {
if (isAssignCall(Func)) {
handleAssign(C, InstCall->getCXXThisVal());
- } else if (isClearCall(Func)) {
+ return;
+ }
+
+ if (isClearCall(Func)) {
handleClear(C, InstCall->getCXXThisVal());
- } else if (isPushBackCall(Func) || isEmplaceBackCall(Func)) {
+ return;
+ }
+
+ if (isPushBackCall(Func) || isEmplaceBackCall(Func)) {
handlePushBack(C, InstCall->getCXXThisVal());
- } else if (isPopBackCall(Func)) {
+ return;
+ }
+
+ if (isPopBackCall(Func)) {
handlePopBack(C, InstCall->getCXXThisVal());
- } else if (isPushFrontCall(Func) || isEmplaceFrontCall(Func)) {
+ return;
+ }
+
+ if (isPushFrontCall(Func) || isEmplaceFrontCall(Func)) {
handlePushFront(C, InstCall->getCXXThisVal());
- } else if (isPopFrontCall(Func)) {
+ return;
+ }
+
+ if (isPopFrontCall(Func)) {
handlePopFront(C, InstCall->getCXXThisVal());
- } else if (isInsertCall(Func) || isEmplaceCall(Func)) {
+ return;
+ }
+
+ if (isInsertCall(Func) || isEmplaceCall(Func)) {
handleInsert(C, Call.getArgSVal(0));
- } else if (isEraseCall(Func)) {
+ return;
+ }
+
+ if (isEraseCall(Func)) {
if (Call.getNumArgs() == 1) {
handleErase(C, Call.getArgSVal(0));
- } else if (Call.getNumArgs() == 2) {
+ return;
+ }
+
+ if (Call.getNumArgs() == 2) {
handleErase(C, Call.getArgSVal(0), Call.getArgSVal(1));
+ return;
}
- } else if (isEraseAfterCall(Func)) {
+ }
+
+ if (isEraseAfterCall(Func)) {
if (Call.getNumArgs() == 1) {
handleEraseAfter(C, Call.getArgSVal(0));
- } else if (Call.getNumArgs() == 2) {
+ return;
+ }
+
+ if (Call.getNumArgs() == 2) {
handleEraseAfter(C, Call.getArgSVal(0), Call.getArgSVal(1));
+ return;
}
}
}
@@ -700,6 +702,7 @@ void IteratorChecker::checkPostCall(const CallEvent &Call,
InstCall->getCXXThisVal());
return;
}
+
if (isEndCall(Func)) {
handleEnd(C, OrigExpr, Call.getReturnValue(),
InstCall->getCXXThisVal());
@@ -839,77 +842,79 @@ void IteratorChecker::checkDeadSymbols(SymbolReaper &SR,
}
}
- auto ComparisonMap = State->get<IteratorComparisonMap>();
- for (const auto Comp : ComparisonMap) {
- if (!SR.isLive(Comp.first)) {
- State = State->remove<IteratorComparisonMap>(Comp.first);
- }
- }
-
C.addTransition(State);
}
-ProgramStateRef IteratorChecker::evalAssume(ProgramStateRef State, SVal Cond,
- bool Assumption) const {
- // Load recorded comparison and transfer iterator state between sides
- // according to comparison operator and assumption
- const auto *SE = Cond.getAsSymExpr();
- if (!SE)
- return State;
-
- auto Opc = getOpcode(SE);
- if (Opc != BO_EQ && Opc != BO_NE)
- return State;
-
- bool Negated = false;
- const auto *Comp = loadComparison(State, SE);
- if (!Comp) {
- // Try negated comparison, which is a SymExpr to 0 integer comparison
- const auto *SIE = dyn_cast<SymIntExpr>(SE);
- if (!SIE)
- return State;
-
- if (SIE->getRHS() != 0)
- return State;
+void IteratorChecker::handleComparison(CheckerContext &C, const Expr *CE,
+ const SVal &RetVal, const SVal &LVal,
+ const SVal &RVal,
+ OverloadedOperatorKind Op) const {
+ // Record the operands and the operator of the comparison for the next
+ // evalAssume, if the result is a symbolic expression. If it is a concrete
+ // value (only one branch is possible), then transfer the state between
+ // the operands according to the operator and the result
+ auto State = C.getState();
+ const auto *LPos = getIteratorPosition(State, LVal);
+ const auto *RPos = getIteratorPosition(State, RVal);
+ const MemRegion *Cont = nullptr;
+ if (LPos) {
+ Cont = LPos->getContainer();
+ } else if (RPos) {
+ Cont = RPos->getContainer();
+ }
+ if (!Cont)
+ return;
- SE = SIE->getLHS();
- Negated = SIE->getOpcode() == BO_EQ; // Equal to zero means negation
- Opc = getOpcode(SE);
- if (Opc != BO_EQ && Opc != BO_NE)
- return State;
+ // At least one of the iterators have recorded positions. If one of them has
+ // not then create a new symbol for the offset.
+ SymbolRef Sym;
+ if (!LPos || !RPos) {
+ auto &SymMgr = C.getSymbolManager();
+ Sym = SymMgr.conjureSymbol(CE, C.getLocationContext(),
+ C.getASTContext().LongTy, C.blockCount());
+ State = assumeNoOverflow(State, Sym, 4);
+ }
- Comp = loadComparison(State, SE);
- if (!Comp)
- return State;
+ if (!LPos) {
+ State = setIteratorPosition(State, LVal,
+ IteratorPosition::getPosition(Cont, Sym));
+ LPos = getIteratorPosition(State, LVal);
+ } else if (!RPos) {
+ State = setIteratorPosition(State, RVal,
+ IteratorPosition::getPosition(Cont, Sym));
+ RPos = getIteratorPosition(State, RVal);
}
- return processComparison(State, Comp->getLeft(), Comp->getRight(),
- (Comp->isEquality() == Assumption) != Negated);
+ processComparison(C, State, LPos->getOffset(), RPos->getOffset(), RetVal, Op);
}
-void IteratorChecker::handleComparison(CheckerContext &C, const SVal &RetVal,
- const SVal &LVal, const SVal &RVal,
- OverloadedOperatorKind Op) const {
- // Record the operands and the operator of the comparison for the next
- // evalAssume, if the result is a symbolic expression. If it is a concrete
- // value (only one branch is possible), then transfer the state between
- // the operands according to the operator and the result
- auto State = C.getState();
- if (const auto *Condition = RetVal.getAsSymbolicExpression()) {
- const auto *LPos = getIteratorPosition(State, LVal);
- const auto *RPos = getIteratorPosition(State, RVal);
- if (!LPos && !RPos)
- return;
- State = saveComparison(State, Condition, LVal, RVal, Op == OO_EqualEqual);
- C.addTransition(State);
- } else if (const auto TruthVal = RetVal.getAs<nonloc::ConcreteInt>()) {
- if ((State = processComparison(
- State, getRegionOrSymbol(LVal), getRegionOrSymbol(RVal),
- (Op == OO_EqualEqual) == (TruthVal->getValue() != 0)))) {
+void IteratorChecker::processComparison(CheckerContext &C,
+ ProgramStateRef State, SymbolRef Sym1,
+ SymbolRef Sym2, const SVal &RetVal,
+ OverloadedOperatorKind Op) const {
+ if (const auto TruthVal = RetVal.getAs<nonloc::ConcreteInt>()) {
+ if ((State = relateSymbols(State, Sym1, Sym2,
+ (Op == OO_EqualEqual) ==
+ (TruthVal->getValue() != 0)))) {
C.addTransition(State);
} else {
C.generateSink(State, C.getPredecessor());
}
+ return;
+ }
+
+ const auto ConditionVal = RetVal.getAs<DefinedSVal>();
+ if (!ConditionVal)
+ return;
+
+ if (auto StateTrue = relateSymbols(State, Sym1, Sym2, Op == OO_EqualEqual)) {
+ StateTrue = StateTrue->assume(*ConditionVal, true);
+ C.addTransition(StateTrue);
+ }
+
+ if (auto StateFalse = relateSymbols(State, Sym1, Sym2, Op != OO_EqualEqual)) {
+ StateFalse = StateFalse->assume(*ConditionVal, false);
+ C.addTransition(StateFalse);
}
}
@@ -974,47 +979,6 @@ void IteratorChecker::handleDecrement(CheckerContext &C, const SVal &RetVal,
}
}
-// This function tells the analyzer's engine that symbols produced by our
-// checker, most notably iterator positions, are relatively small.
-// A distance between items in the container should not be very large.
-// By assuming that it is within around 1/8 of the address space,
-// we can help the analyzer perform operations on these symbols
-// without being afraid of integer overflows.
-// FIXME: Should we provide it as an API, so that all checkers could use it?
-static ProgramStateRef assumeNoOverflow(ProgramStateRef State, SymbolRef Sym,
- long Scale) {
- SValBuilder &SVB = State->getStateManager().getSValBuilder();
- BasicValueFactory &BV = SVB.getBasicValueFactory();
-
- QualType T = Sym->getType();
- assert(T->isSignedIntegerOrEnumerationType());
- APSIntType AT = BV.getAPSIntType(T);
-
- ProgramStateRef NewState = State;
-
- llvm::APSInt Max = AT.getMaxValue() / AT.getValue(Scale);
- SVal IsCappedFromAbove =
- SVB.evalBinOpNN(State, BO_LE, nonloc::SymbolVal(Sym),
- nonloc::ConcreteInt(Max), SVB.getConditionType());
- if (auto DV = IsCappedFromAbove.getAs<DefinedSVal>()) {
- NewState = NewState->assume(*DV, true);
- if (!NewState)
- return State;
- }
-
- llvm::APSInt Min = -Max;
- SVal IsCappedFromBelow =
- SVB.evalBinOpNN(State, BO_GE, nonloc::SymbolVal(Sym),
- nonloc::ConcreteInt(Min), SVB.getConditionType());
- if (auto DV = IsCappedFromBelow.getAs<DefinedSVal>()) {
- NewState = NewState->assume(*DV, true);
- if (!NewState)
- return State;
- }
-
- return NewState;
-}
-
void IteratorChecker::handleRandomIncrOrDecr(CheckerContext &C,
OverloadedOperatorKind Op,
const SVal &RetVal,
@@ -1099,14 +1063,34 @@ void IteratorChecker::verifyMatch(CheckerContext &C, const SVal &Iter,
// Verify match between a container and the container of an iterator
Cont = Cont->getMostDerivedObjectRegion();
+ if (const auto *ContSym = Cont->getSymbolicBase()) {
+ if (isa<SymbolConjured>(ContSym->getSymbol()))
+ return;
+ }
+
auto State = C.getState();
const auto *Pos = getIteratorPosition(State, Iter);
- if (Pos && Pos->getContainer() != Cont) {
+ if (!Pos)
+ return;
+
+ const auto *IterCont = Pos->getContainer();
+
+ // Skip symbolic regions based on conjured symbols. Two conjured symbols
+ // may or may not be the same. For example, the same function can return
+ // the same or a different container but we get different conjured symbols
+ // for each call. This may cause false positives so omit them from the check.
+ if (const auto *ContSym = IterCont->getSymbolicBase()) {
+ if (isa<SymbolConjured>(ContSym->getSymbol()))
+ return;
+ }
+
+ if (IterCont != Cont) {
auto *N = C.generateNonFatalErrorNode(State);
if (!N) {
return;
}
- reportMismatchedBug("Container accessed using foreign iterator argument.", Iter, Cont, C, N);
+ reportMismatchedBug("Container accessed using foreign iterator argument.",
+ Iter, Cont, C, N);
}
}
@@ -1115,8 +1099,31 @@ void IteratorChecker::verifyMatch(CheckerContext &C, const SVal &Iter1,
// Verify match between the containers of two iterators
auto State = C.getState();
const auto *Pos1 = getIteratorPosition(State, Iter1);
+ if (!Pos1)
+ return;
+
+ const auto *IterCont1 = Pos1->getContainer();
+
+ // Skip symbolic regions based on conjured symbols. Two conjured symbols
+ // may or may not be the same. For example, the same function can return
+ // the same or a different container but we get different conjured symbols
+ // for each call. This may cause false positives so omit them from the check.
+ if (const auto *ContSym = IterCont1->getSymbolicBase()) {
+ if (isa<SymbolConjured>(ContSym->getSymbol()))
+ return;
+ }
+
const auto *Pos2 = getIteratorPosition(State, Iter2);
- if (Pos1 && Pos2 && Pos1->getContainer() != Pos2->getContainer()) {
+ if (!Pos2)
+ return;
+
+ const auto *IterCont2 = Pos2->getContainer();
+ if (const auto *ContSym = IterCont2->getSymbolicBase()) {
+ if (isa<SymbolConjured>(ContSym->getSymbol()))
+ return;
+ }
+
+ if (IterCont1 != IterCont2) {
auto *N = C.generateNonFatalErrorNode(State);
if (!N)
return;
@@ -1138,11 +1145,9 @@ void IteratorChecker::handleBegin(CheckerContext &C, const Expr *CE,
auto State = C.getState();
auto BeginSym = getContainerBegin(State, ContReg);
if (!BeginSym) {
- auto &SymMgr = C.getSymbolManager();
- BeginSym = SymMgr.conjureSymbol(CE, C.getLocationContext(),
- C.getASTContext().LongTy, C.blockCount());
- State = assumeNoOverflow(State, BeginSym, 4);
- State = createContainerBegin(State, ContReg, BeginSym);
+ State = createContainerBegin(State, ContReg, CE, C.getASTContext().LongTy,
+ C.getLocationContext(), C.blockCount());
+ BeginSym = getContainerBegin(State, ContReg);
}
State = setIteratorPosition(State, RetVal,
IteratorPosition::getPosition(ContReg, BeginSym));
@@ -1162,11 +1167,9 @@ void IteratorChecker::handleEnd(CheckerContext &C, const Expr *CE,
auto State = C.getState();
auto EndSym = getContainerEnd(State, ContReg);
if (!EndSym) {
- auto &SymMgr = C.getSymbolManager();
- EndSym = SymMgr.conjureSymbol(CE, C.getLocationContext(),
- C.getASTContext().LongTy, C.blockCount());
- State = assumeNoOverflow(State, EndSym, 4);
- State = createContainerEnd(State, ContReg, EndSym);
+ State = createContainerEnd(State, ContReg, CE, C.getASTContext().LongTy,
+ C.getLocationContext(), C.blockCount());
+ EndSym = getContainerEnd(State, ContReg);
}
State = setIteratorPosition(State, RetVal,
IteratorPosition::getPosition(ContReg, EndSym));
@@ -1853,23 +1856,6 @@ bool isRandomIncrOrDecrOperator(OverloadedOperatorKind OK) {
OK == OO_MinusEqual;
}
-BinaryOperator::Opcode getOpcode(const SymExpr *SE) {
- if (const auto *BSE = dyn_cast<BinarySymExpr>(SE)) {
- return BSE->getOpcode();
- } else if (const auto *SC = dyn_cast<SymbolConjured>(SE)) {
- const auto *COE = dyn_cast_or_null<CXXOperatorCallExpr>(SC->getStmt());
- if (!COE)
- return BO_Comma; // Extremal value, neither EQ nor NE
- if (COE->getOperator() == OO_EqualEqual) {
- return BO_EQ;
- } else if (COE->getOperator() == OO_ExclaimEqual) {
- return BO_NE;
- }
- return BO_Comma; // Extremal value, neither EQ nor NE
- }
- return BO_Comma; // Extremal value, neither EQ nor NE
-}
-
bool hasSubscriptOperator(ProgramStateRef State, const MemRegion *Reg) {
const auto *CRD = getCXXRecordDecl(State, Reg);
if (!CRD)
@@ -1930,48 +1916,6 @@ const CXXRecordDecl *getCXXRecordDecl(ProgramStateRef State,
return Type->getUnqualifiedDesugaredType()->getAsCXXRecordDecl();
}
-const RegionOrSymbol getRegionOrSymbol(const SVal &Val) {
- if (const auto Reg = Val.getAsRegion()) {
- return Reg;
- } else if (const auto Sym = Val.getAsSymbol()) {
- return Sym;
- } else if (const auto LCVal = Val.getAs<nonloc::LazyCompoundVal>()) {
- return LCVal->getRegion();
- }
- return RegionOrSymbol();
-}
-
-const ProgramStateRef processComparison(ProgramStateRef State,
- RegionOrSymbol LVal,
- RegionOrSymbol RVal, bool Equal) {
- const auto *LPos = getIteratorPosition(State, LVal);
- const auto *RPos = getIteratorPosition(State, RVal);
- if (LPos && !RPos) {
- State = adjustIteratorPosition(State, RVal, *LPos, Equal);
- } else if (!LPos && RPos) {
- State = adjustIteratorPosition(State, LVal, *RPos, Equal);
- } else if (LPos && RPos) {
- State = relateIteratorPositions(State, *LPos, *RPos, Equal);
- }
- return State;
-}
-
-const ProgramStateRef saveComparison(ProgramStateRef State,
- const SymExpr *Condition, const SVal &LVal,
- const SVal &RVal, bool Eq) {
- const auto Left = getRegionOrSymbol(LVal);
- const auto Right = getRegionOrSymbol(RVal);
- if (!Left || !Right)
- return State;
- return State->set<IteratorComparisonMap>(Condition,
- IteratorComparison(Left, Right, Eq));
-}
-
-const IteratorComparison *loadComparison(ProgramStateRef State,
- const SymExpr *Condition) {
- return State->get<IteratorComparisonMap>(Condition);
-}
-
SymbolRef getContainerBegin(ProgramStateRef State, const MemRegion *Cont) {
const auto *CDataPtr = getContainerData(State, Cont);
if (!CDataPtr)
@@ -1989,32 +1933,47 @@ SymbolRef getContainerEnd(ProgramStateRef State, const MemRegion *Cont) {
}
ProgramStateRef createContainerBegin(ProgramStateRef State,
- const MemRegion *Cont,
- const SymbolRef Sym) {
+ const MemRegion *Cont, const Expr *E,
+ QualType T, const LocationContext *LCtx,
+ unsigned BlockCount) {
// Only create if it does not exist
const auto *CDataPtr = getContainerData(State, Cont);
+ if (CDataPtr && CDataPtr->getBegin())
+ return State;
+
+ auto &SymMgr = State->getSymbolManager();
+ const SymbolConjured *Sym = SymMgr.conjureSymbol(E, LCtx, T, BlockCount,
+ "begin");
+ State = assumeNoOverflow(State, Sym, 4);
+
if (CDataPtr) {
- if (CDataPtr->getBegin()) {
- return State;
- }
const auto CData = CDataPtr->newBegin(Sym);
return setContainerData(State, Cont, CData);
}
+
const auto CData = ContainerData::fromBegin(Sym);
return setContainerData(State, Cont, CData);
}
ProgramStateRef createContainerEnd(ProgramStateRef State, const MemRegion *Cont,
- const SymbolRef Sym) {
+ const Expr *E, QualType T,
+ const LocationContext *LCtx,
+ unsigned BlockCount) {
// Only create if it does not exist
const auto *CDataPtr = getContainerData(State, Cont);
+ if (CDataPtr && CDataPtr->getEnd())
+ return State;
+
+ auto &SymMgr = State->getSymbolManager();
+ const SymbolConjured *Sym = SymMgr.conjureSymbol(E, LCtx, T, BlockCount,
+ "end");
+ State = assumeNoOverflow(State, Sym, 4);
+
if (CDataPtr) {
- if (CDataPtr->getEnd()) {
- return State;
- }
const auto CData = CDataPtr->newEnd(Sym);
return setContainerData(State, Cont, CData);
}
+
const auto CData = ContainerData::fromEnd(Sym);
return setContainerData(State, Cont, CData);
}
@@ -2042,17 +2001,6 @@ const IteratorPosition *getIteratorPosition(ProgramStateRef State,
return nullptr;
}
-const IteratorPosition *getIteratorPosition(ProgramStateRef State,
- RegionOrSymbol RegOrSym) {
- if (RegOrSym.is<const MemRegion *>()) {
- auto Reg = RegOrSym.get<const MemRegion *>()->getMostDerivedObjectRegion();
- return State->get<IteratorRegionMap>(Reg);
- } else if (RegOrSym.is<SymbolRef>()) {
- return State->get<IteratorSymbolMap>(RegOrSym.get<SymbolRef>());
- }
- return nullptr;
-}
-
ProgramStateRef setIteratorPosition(ProgramStateRef State, const SVal &Val,
const IteratorPosition &Pos) {
if (auto Reg = Val.getAsRegion()) {
@@ -2066,18 +2014,6 @@ ProgramStateRef setIteratorPosition(ProgramStateRef State, const SVal &Val,
return nullptr;
}
-ProgramStateRef setIteratorPosition(ProgramStateRef State,
- RegionOrSymbol RegOrSym,
- const IteratorPosition &Pos) {
- if (RegOrSym.is<const MemRegion *>()) {
- auto Reg = RegOrSym.get<const MemRegion *>()->getMostDerivedObjectRegion();
- return State->set<IteratorRegionMap>(Reg, Pos);
- } else if (RegOrSym.is<SymbolRef>()) {
- return State->set<IteratorSymbolMap>(RegOrSym.get<SymbolRef>(), Pos);
- }
- return nullptr;
-}
-
ProgramStateRef removeIteratorPosition(ProgramStateRef State, const SVal &Val) {
if (auto Reg = Val.getAsRegion()) {
Reg = Reg->getMostDerivedObjectRegion();
@@ -2090,21 +2026,8 @@ ProgramStateRef removeIteratorPosition(ProgramStateRef State, const SVal &Val) {
return nullptr;
}
-ProgramStateRef adjustIteratorPosition(ProgramStateRef State,
- RegionOrSymbol RegOrSym,
- const IteratorPosition &Pos,
- bool Equal) {
- if (Equal) {
- return setIteratorPosition(State, RegOrSym, Pos);
- } else {
- return State;
- }
-}
-
-ProgramStateRef relateIteratorPositions(ProgramStateRef State,
- const IteratorPosition &Pos1,
- const IteratorPosition &Pos2,
- bool Equal) {
+ProgramStateRef relateSymbols(ProgramStateRef State, SymbolRef Sym1,
+ SymbolRef Sym2, bool Equal) {
auto &SVB = State->getStateManager().getSValBuilder();
// FIXME: This code should be reworked as follows:
@@ -2113,14 +2036,16 @@ ProgramStateRef relateIteratorPositions(ProgramStateRef State,
// 3. Compare the result to 0.
// 4. Assume the result of the comparison.
const auto comparison =
- SVB.evalBinOp(State, BO_EQ, nonloc::SymbolVal(Pos1.getOffset()),
- nonloc::SymbolVal(Pos2.getOffset()),
- SVB.getConditionType());
+ SVB.evalBinOp(State, BO_EQ, nonloc::SymbolVal(Sym1),
+ nonloc::SymbolVal(Sym2), SVB.getConditionType());
assert(comparison.getAs<DefinedSVal>() &&
"Symbol comparison must be a `DefinedSVal`");
auto NewState = State->assume(comparison.castAs<DefinedSVal>(), Equal);
+ if (!NewState)
+ return nullptr;
+
if (const auto CompSym = comparison.getAsSymbol()) {
assert(isa<SymIntExpr>(CompSym) &&
"Symbol comparison must be a `SymIntExpr`");
@@ -2161,6 +2086,47 @@ bool isBoundThroughLazyCompoundVal(const Environment &Env,
return false;
}
+// This function tells the analyzer's engine that symbols produced by our
+// checker, most notably iterator positions, are relatively small.
+// A distance between items in the container should not be very large.
+// By assuming that it is within around 1/8 of the address space,
+// we can help the analyzer perform operations on these symbols
+// without being afraid of integer overflows.
+// FIXME: Should we provide it as an API, so that all checkers could use it?
+ProgramStateRef assumeNoOverflow(ProgramStateRef State, SymbolRef Sym,
+ long Scale) {
+ SValBuilder &SVB = State->getStateManager().getSValBuilder();
+ BasicValueFactory &BV = SVB.getBasicValueFactory();
+
+ QualType T = Sym->getType();
+ assert(T->isSignedIntegerOrEnumerationType());
+ APSIntType AT = BV.getAPSIntType(T);
+
+ ProgramStateRef NewState = State;
+
+ llvm::APSInt Max = AT.getMaxValue() / AT.getValue(Scale);
+ SVal IsCappedFromAbove =
+ SVB.evalBinOpNN(State, BO_LE, nonloc::SymbolVal(Sym),
+ nonloc::ConcreteInt(Max), SVB.getConditionType());
+ if (auto DV = IsCappedFromAbove.getAs<DefinedSVal>()) {
+ NewState = NewState->assume(*DV, true);
+ if (!NewState)
+ return State;
+ }
+
+ llvm::APSInt Min = -Max;
+ SVal IsCappedFromBelow =
+ SVB.evalBinOpNN(State, BO_GE, nonloc::SymbolVal(Sym),
+ nonloc::ConcreteInt(Min), SVB.getConditionType());
+ if (auto DV = IsCappedFromBelow.getAs<DefinedSVal>()) {
+ NewState = NewState->assume(*DV, true);
+ if (!NewState)
+ return State;
+ }
+
+ return NewState;
+}
+
template <typename Condition, typename Process>
ProgramStateRef processIteratorPositions(ProgramStateRef State, Condition Cond,
Process Proc) {
@@ -2379,7 +2345,6 @@ bool compare(ProgramStateRef State, SymbolRef Sym1, SymbolRef Sym2,
return compare(State, nonloc::SymbolVal(Sym1), nonloc::SymbolVal(Sym2), Opc);
}
-
bool compare(ProgramStateRef State, NonLoc NL1, NonLoc NL2,
BinaryOperator::Opcode Opc) {
auto &SVB = State->getStateManager().getSValBuilder();
@@ -2395,12 +2360,24 @@ bool compare(ProgramStateRef State, NonLoc NL1, NonLoc NL2,
} // namespace
+void ento::registerIteratorModeling(CheckerManager &mgr) {
+ mgr.registerChecker<IteratorChecker>();
+}
+
+bool ento::shouldRegisterIteratorModeling(const LangOptions &LO) {
+ return true;
+}
+
#define REGISTER_CHECKER(name) \
void ento::register##name(CheckerManager &Mgr) { \
- auto *checker = Mgr.registerChecker<IteratorChecker>(); \
+ auto *checker = Mgr.getChecker<IteratorChecker>(); \
checker->ChecksEnabled[IteratorChecker::CK_##name] = true; \
checker->CheckNames[IteratorChecker::CK_##name] = \
Mgr.getCurrentCheckName(); \
+ } \
+ \
+ bool ento::shouldRegister##name(const LangOptions &LO) { \
+ return true; \
}
REGISTER_CHECKER(IteratorRangeChecker)
diff --git a/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp b/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp
index aade62fd7491..2b75f3acc922 100644
--- a/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp
@@ -1,9 +1,8 @@
//===- IvarInvalidationChecker.cpp ------------------------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -736,12 +735,24 @@ public:
};
} // end anonymous namespace
+void ento::registerIvarInvalidationModeling(CheckerManager &mgr) {
+ mgr.registerChecker<IvarInvalidationChecker>();
+}
+
+bool ento::shouldRegisterIvarInvalidationModeling(const LangOptions &LO) {
+ return true;
+}
+
#define REGISTER_CHECKER(name) \
void ento::register##name(CheckerManager &mgr) { \
IvarInvalidationChecker *checker = \
- mgr.registerChecker<IvarInvalidationChecker>(); \
+ mgr.getChecker<IvarInvalidationChecker>(); \
checker->Filter.check_##name = true; \
checker->Filter.checkName_##name = mgr.getCurrentCheckName(); \
+ } \
+ \
+ bool ento::shouldRegister##name(const LangOptions &LO) { \
+ return true; \
}
REGISTER_CHECKER(InstanceVariableInvalidation)
diff --git a/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp b/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp
index df238f2b2e45..7522fdd0a99b 100644
--- a/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp
@@ -1,9 +1,8 @@
//=== LLVMConventionsChecker.cpp - Check LLVM codebase conventions ---*- C++ -*-
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -314,3 +313,7 @@ public:
void ento::registerLLVMConventionsChecker(CheckerManager &mgr) {
mgr.registerChecker<LLVMConventionsChecker>();
}
+
+bool ento::shouldRegisterLLVMConventionsChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp b/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp
index eda39efeca17..46067ecbca99 100644
--- a/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp
@@ -1,9 +1,8 @@
//=- LocalizationChecker.cpp -------------------------------------*- C++ -*-==//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -754,10 +753,9 @@ void NonLocalizedStringChecker::reportLocalizationError(
if (isDebuggingContext(C))
return;
- ExplodedNode *ErrNode = C.getPredecessor();
static CheckerProgramPointTag Tag("NonLocalizedStringChecker",
"UnlocalizedString");
- ErrNode = C.addTransition(C.getState(), C.getPredecessor(), &Tag);
+ ExplodedNode *ErrNode = C.addTransition(C.getState(), C.getPredecessor(), &Tag);
if (!ErrNode)
return;
@@ -1141,7 +1139,7 @@ void EmptyLocalizationContextChecker::MethodCrawler::VisitObjCMessageExpr(
}
bool Invalid = false;
- llvm::MemoryBuffer *BF =
+ const llvm::MemoryBuffer *BF =
Mgr.getSourceManager().getBuffer(SLInfo.first, SL, &Invalid);
if (Invalid)
return;
@@ -1398,14 +1396,27 @@ void ento::registerNonLocalizedStringChecker(CheckerManager &mgr) {
NonLocalizedStringChecker *checker =
mgr.registerChecker<NonLocalizedStringChecker>();
checker->IsAggressive =
- mgr.getAnalyzerOptions().getCheckerBooleanOption("AggressiveReport",
- false, checker);
+ mgr.getAnalyzerOptions().getCheckerBooleanOption(
+ checker, "AggressiveReport");
+}
+
+bool ento::shouldRegisterNonLocalizedStringChecker(const LangOptions &LO) {
+ return true;
}
void ento::registerEmptyLocalizationContextChecker(CheckerManager &mgr) {
mgr.registerChecker<EmptyLocalizationContextChecker>();
}
+bool ento::shouldRegisterEmptyLocalizationContextChecker(
+ const LangOptions &LO) {
+ return true;
+}
+
void ento::registerPluralMisuseChecker(CheckerManager &mgr) {
mgr.registerChecker<PluralMisuseChecker>();
}
+
+bool ento::shouldRegisterPluralMisuseChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/MIGChecker.cpp b/lib/StaticAnalyzer/Checkers/MIGChecker.cpp
new file mode 100644
index 000000000000..6e7776bb484e
--- /dev/null
+++ b/lib/StaticAnalyzer/Checkers/MIGChecker.cpp
@@ -0,0 +1,295 @@
+//== MIGChecker.cpp - MIG calling convention checker ------------*- C++ -*--==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines MIGChecker, a Mach Interface Generator calling convention
+// checker. Namely, in MIG callback implementation the following rules apply:
+// - When a server routine returns an error code that represents success, it
+// must take ownership of resources passed to it (and eventually release
+// them).
+// - Additionally, when returning success, all out-parameters must be
+// initialized.
+// - When it returns any other error code, it must not take ownership,
+// because the message and its out-of-line parameters will be destroyed
+// by the client that called the function.
+// For now we only check the last rule, as its violations lead to dangerous
+// use-after-free exploits.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/AnyCall.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class MIGChecker : public Checker<check::PostCall, check::PreStmt<ReturnStmt>,
+ check::EndFunction> {
+ BugType BT{this, "Use-after-free (MIG calling convention violation)",
+ categories::MemoryError};
+
+ // The checker knows that an out-of-line object is deallocated if it is
+ // passed as an argument to one of these functions. If this object is
+ // additionally an argument of a MIG routine, the checker keeps track of that
+ // information and issues a warning when an error is returned from the
+ // respective routine.
+ std::vector<std::pair<CallDescription, unsigned>> Deallocators = {
+#define CALL(required_args, deallocated_arg, ...) \
+ {{{__VA_ARGS__}, required_args}, deallocated_arg}
+ // E.g., if the checker sees a C function 'vm_deallocate' that is
+ // defined on class 'IOUserClient' that has exactly 3 parameters, it knows
+ // that argument #1 (starting from 0, i.e. the second argument) is going
+ // to be consumed in the sense of the MIG consume-on-success convention.
+ CALL(3, 1, "vm_deallocate"),
+ CALL(3, 1, "mach_vm_deallocate"),
+ CALL(2, 0, "mig_deallocate"),
+ CALL(2, 1, "mach_port_deallocate"),
+ CALL(1, 0, "device_deallocate"),
+ CALL(1, 0, "iokit_remove_connect_reference"),
+ CALL(1, 0, "iokit_remove_reference"),
+ CALL(1, 0, "iokit_release_port"),
+ CALL(1, 0, "ipc_port_release"),
+ CALL(1, 0, "ipc_port_release_sonce"),
+ CALL(1, 0, "ipc_voucher_attr_control_release"),
+ CALL(1, 0, "ipc_voucher_release"),
+ CALL(1, 0, "lock_set_dereference"),
+ CALL(1, 0, "memory_object_control_deallocate"),
+ CALL(1, 0, "pset_deallocate"),
+ CALL(1, 0, "semaphore_dereference"),
+ CALL(1, 0, "space_deallocate"),
+ CALL(1, 0, "space_inspect_deallocate"),
+ CALL(1, 0, "task_deallocate"),
+ CALL(1, 0, "task_inspect_deallocate"),
+ CALL(1, 0, "task_name_deallocate"),
+ CALL(1, 0, "thread_deallocate"),
+ CALL(1, 0, "thread_inspect_deallocate"),
+ CALL(1, 0, "upl_deallocate"),
+ CALL(1, 0, "vm_map_deallocate"),
+ // E.g., if the checker sees a method 'releaseAsyncReference64()' that is
+ // defined on class 'IOUserClient' that takes exactly 1 argument, it knows
+ // that the argument is going to be consumed in the sense of the MIG
+ // consume-on-success convention.
+ CALL(1, 0, "IOUserClient", "releaseAsyncReference64"),
+ CALL(1, 0, "IOUserClient", "releaseNotificationPort"),
+#undef CALL
+ };
+
+ CallDescription OsRefRetain{"os_ref_retain", 1};
+
+ void checkReturnAux(const ReturnStmt *RS, CheckerContext &C) const;
+
+public:
+ void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
+
+ // HACK: We're making two attempts to find the bug: checkEndFunction
+ // should normally be enough but it fails when the return value is a literal
+ // that never gets put into the Environment and ends of function with multiple
+ // returns get agglutinated across returns, preventing us from obtaining
+ // the return value. The problem is similar to https://reviews.llvm.org/D25326
+ // but now we step into it in the top-level function.
+ void checkPreStmt(const ReturnStmt *RS, CheckerContext &C) const {
+ checkReturnAux(RS, C);
+ }
+ void checkEndFunction(const ReturnStmt *RS, CheckerContext &C) const {
+ checkReturnAux(RS, C);
+ }
+
+};
+} // end anonymous namespace
+
+// A flag that says that the programmer has called a MIG destructor
+// for at least one parameter.
+REGISTER_TRAIT_WITH_PROGRAMSTATE(ReleasedParameter, bool)
+// A set of parameters for which the check is suppressed because
+// reference counting is being performed.
+REGISTER_SET_WITH_PROGRAMSTATE(RefCountedParameters, const ParmVarDecl *)
+
+static const ParmVarDecl *getOriginParam(SVal V, CheckerContext &C,
+ bool IncludeBaseRegions = false) {
+ // TODO: We should most likely always include base regions here.
+ SymbolRef Sym = V.getAsSymbol(IncludeBaseRegions);
+ if (!Sym)
+ return nullptr;
+
+ // If we optimistically assume that the MIG routine never re-uses the storage
+ // that was passed to it as arguments when it invalidates it (but at most when
+ // it assigns to parameter variables directly), this procedure correctly
+ // determines if the value was loaded from the transitive closure of MIG
+ // routine arguments in the heap.
+ while (const MemRegion *MR = Sym->getOriginRegion()) {
+ const auto *VR = dyn_cast<VarRegion>(MR);
+ if (VR && VR->hasStackParametersStorage() &&
+ VR->getStackFrame()->inTopFrame())
+ return cast<ParmVarDecl>(VR->getDecl());
+
+ const SymbolicRegion *SR = MR->getSymbolicBase();
+ if (!SR)
+ return nullptr;
+
+ Sym = SR->getSymbol();
+ }
+
+ return nullptr;
+}
+
+static bool isInMIGCall(CheckerContext &C) {
+ const LocationContext *LC = C.getLocationContext();
+ assert(LC && "Unknown location context");
+
+ const StackFrameContext *SFC;
+ // Find the top frame.
+ while (LC) {
+ SFC = LC->getStackFrame();
+ LC = SFC->getParent();
+ }
+
+ const Decl *D = SFC->getDecl();
+
+ if (Optional<AnyCall> AC = AnyCall::forDecl(D)) {
+ // Even though there's a Sema warning when the return type of an annotated
+ // function is not a kern_return_t, this warning isn't an error, so we need
+ // an extra sanity check here.
+ // FIXME: AnyCall doesn't support blocks yet, so they remain unchecked
+ // for now.
+ if (!AC->getReturnType(C.getASTContext())
+ .getCanonicalType()->isSignedIntegerType())
+ return false;
+ }
+
+ if (D->hasAttr<MIGServerRoutineAttr>())
+ return true;
+
+ // See if there's an annotated method in the superclass.
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(D))
+ for (const auto *OMD: MD->overridden_methods())
+ if (OMD->hasAttr<MIGServerRoutineAttr>())
+ return true;
+
+ return false;
+}
+
+void MIGChecker::checkPostCall(const CallEvent &Call, CheckerContext &C) const {
+ if (Call.isCalled(OsRefRetain)) {
+ // If the code is doing reference counting over the parameter,
+ // it opens up an opportunity for safely calling a destructor function.
+ // TODO: We should still check for over-releases.
+ if (const ParmVarDecl *PVD =
+ getOriginParam(Call.getArgSVal(0), C, /*IncludeBaseRegions=*/true)) {
+ // We never need to clean up the program state because these are
+ // top-level parameters anyway, so they're always live.
+ C.addTransition(C.getState()->add<RefCountedParameters>(PVD));
+ }
+ return;
+ }
+
+ if (!isInMIGCall(C))
+ return;
+
+ auto I = llvm::find_if(Deallocators,
+ [&](const std::pair<CallDescription, unsigned> &Item) {
+ return Call.isCalled(Item.first);
+ });
+ if (I == Deallocators.end())
+ return;
+
+ ProgramStateRef State = C.getState();
+ unsigned ArgIdx = I->second;
+ SVal Arg = Call.getArgSVal(ArgIdx);
+ const ParmVarDecl *PVD = getOriginParam(Arg, C);
+ if (!PVD || State->contains<RefCountedParameters>(PVD))
+ return;
+
+ const NoteTag *T = C.getNoteTag([this, PVD](BugReport &BR) -> std::string {
+ if (&BR.getBugType() != &BT)
+ return "";
+ SmallString<64> Str;
+ llvm::raw_svector_ostream OS(Str);
+ OS << "Value passed through parameter '" << PVD->getName()
+ << "\' is deallocated";
+ return OS.str();
+ });
+ C.addTransition(State->set<ReleasedParameter>(true), T);
+}
+
+// Returns true if V can potentially represent a "successful" kern_return_t.
+static bool mayBeSuccess(SVal V, CheckerContext &C) {
+ ProgramStateRef State = C.getState();
+
+ // Can V represent KERN_SUCCESS?
+ if (!State->isNull(V).isConstrainedFalse())
+ return true;
+
+ SValBuilder &SVB = C.getSValBuilder();
+ ASTContext &ACtx = C.getASTContext();
+
+ // Can V represent MIG_NO_REPLY?
+ static const int MigNoReply = -305;
+ V = SVB.evalEQ(C.getState(), V, SVB.makeIntVal(MigNoReply, ACtx.IntTy));
+ if (!State->isNull(V).isConstrainedTrue())
+ return true;
+
+ // If none of the above, it's definitely an error.
+ return false;
+}
+
+void MIGChecker::checkReturnAux(const ReturnStmt *RS, CheckerContext &C) const {
+ // It is very unlikely that a MIG callback will be called from anywhere
+ // within the project under analysis and the caller isn't itself a routine
+ // that follows the MIG calling convention. Therefore we're safe to believe
+ // that it's always the top frame that is of interest. There's a slight chance
+ // that the user would want to enforce the MIG calling convention upon
+ // a random routine in the middle of nowhere, but given that the convention is
+ // fairly weird and hard to follow in the first place, there's relatively
+ // little motivation to spread it this way.
+ if (!C.inTopFrame())
+ return;
+
+ if (!isInMIGCall(C))
+ return;
+
+ // We know that the function is non-void, but what if the return statement
+ // is not there in the code? It's not a compile error, we should not crash.
+ if (!RS)
+ return;
+
+ ProgramStateRef State = C.getState();
+ if (!State->get<ReleasedParameter>())
+ return;
+
+ SVal V = C.getSVal(RS);
+ if (mayBeSuccess(V, C))
+ return;
+
+ ExplodedNode *N = C.generateErrorNode();
+ if (!N)
+ return;
+
+ auto R = llvm::make_unique<BugReport>(
+ BT,
+ "MIG callback fails with error after deallocating argument value. "
+ "This is a use-after-free vulnerability because the caller will try to "
+ "deallocate it again",
+ N);
+
+ R->addRange(RS->getSourceRange());
+ bugreporter::trackExpressionValue(N, RS->getRetValue(), *R, false);
+ C.emitReport(std::move(R));
+}
+
+void ento::registerMIGChecker(CheckerManager &Mgr) {
+ Mgr.registerChecker<MIGChecker>();
+}
+
+bool ento::shouldRegisterMIGChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIBugReporter.cpp b/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIBugReporter.cpp
index fb9bccebe465..b250d3f8795e 100644
--- a/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIBugReporter.cpp
+++ b/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIBugReporter.cpp
@@ -1,9 +1,8 @@
//===-- MPIBugReporter.cpp - bug reporter -----------------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
diff --git a/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIBugReporter.h b/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIBugReporter.h
index 32fcb07e3371..6fbc30288655 100644
--- a/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIBugReporter.h
+++ b/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIBugReporter.h
@@ -1,9 +1,8 @@
//===-- MPIBugReporter.h - bug reporter -----------------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
diff --git a/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.cpp b/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.cpp
index 28c6898f7947..7f9ba0de1dc2 100644
--- a/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.cpp
@@ -1,9 +1,8 @@
//===-- MPIChecker.cpp - Checker Entry Point Class --------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
@@ -188,3 +187,7 @@ void MPIChecker::allRegionsUsedByWait(
void clang::ento::registerMPIChecker(CheckerManager &MGR) {
MGR.registerChecker<clang::ento::mpi::MPIChecker>();
}
+
+bool clang::ento::shouldRegisterMPIChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.h b/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.h
index 6b1c062ef3d5..ce9f1afac209 100644
--- a/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.h
+++ b/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.h
@@ -1,9 +1,8 @@
//===-- MPIChecker.h - Verify MPI API usage- --------------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
diff --git a/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIFunctionClassifier.cpp b/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIFunctionClassifier.cpp
index 12760abaeeff..277b3ed2e105 100644
--- a/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIFunctionClassifier.cpp
+++ b/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIFunctionClassifier.cpp
@@ -1,9 +1,8 @@
//===-- MPIFunctionClassifier.cpp - classifies MPI functions ----*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
diff --git a/lib/StaticAnalyzer/Checkers/MPI-Checker/MPITypes.h b/lib/StaticAnalyzer/Checkers/MPI-Checker/MPITypes.h
index 2e7140cd771e..fe0fb2a4d0e7 100644
--- a/lib/StaticAnalyzer/Checkers/MPI-Checker/MPITypes.h
+++ b/lib/StaticAnalyzer/Checkers/MPI-Checker/MPITypes.h
@@ -1,9 +1,8 @@
//===-- MPITypes.h - Functionality to model MPI concepts --------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
diff --git a/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp b/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp
index 06e27fc5718d..32ba9bc8e2ef 100644
--- a/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp
@@ -1,9 +1,8 @@
//==--- MacOSKeychainAPIChecker.cpp ------------------------------*- C++ -*-==//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// This checker flags misuses of KeyChainAPI. In particular, the password data
@@ -662,3 +661,7 @@ void MacOSKeychainAPIChecker::printState(raw_ostream &Out,
void ento::registerMacOSKeychainAPIChecker(CheckerManager &mgr) {
mgr.registerChecker<MacOSKeychainAPIChecker>();
}
+
+bool ento::shouldRegisterMacOSKeychainAPIChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp b/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp
index f5976d7da4c1..1c52d20d0991 100644
--- a/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp
@@ -1,9 +1,8 @@
// MacOSXAPIChecker.h - Checks proper use of various MacOS X APIs --*- C++ -*-//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -174,3 +173,7 @@ void MacOSXAPIChecker::checkPreStmt(const CallExpr *CE,
void ento::registerMacOSXAPIChecker(CheckerManager &mgr) {
mgr.registerChecker<MacOSXAPIChecker>();
}
+
+bool ento::shouldRegisterMacOSXAPIChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/MallocChecker.cpp b/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
index ae1b1fc837be..a79b34189065 100644
--- a/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
@@ -1,9 +1,8 @@
//=== MallocChecker.cpp - A malloc/free checker -------------------*- C++ -*--//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -18,6 +17,7 @@
#include "clang/AST/ParentMap.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
+#include "clang/Lex/Lexer.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/BugReporter/CommonBugCategories.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
@@ -178,9 +178,10 @@ public:
II_free(nullptr), II_realloc(nullptr), II_calloc(nullptr),
II_valloc(nullptr), II_reallocf(nullptr), II_strndup(nullptr),
II_strdup(nullptr), II_win_strdup(nullptr), II_kmalloc(nullptr),
- II_if_nameindex(nullptr), II_if_freenameindex(nullptr),
- II_wcsdup(nullptr), II_win_wcsdup(nullptr), II_g_malloc(nullptr),
- II_g_malloc0(nullptr), II_g_realloc(nullptr), II_g_try_malloc(nullptr),
+ II_kfree(nullptr), II_if_nameindex(nullptr),
+ II_if_freenameindex(nullptr), II_wcsdup(nullptr),
+ II_win_wcsdup(nullptr), II_g_malloc(nullptr), II_g_malloc0(nullptr),
+ II_g_realloc(nullptr), II_g_try_malloc(nullptr),
II_g_try_malloc0(nullptr), II_g_try_realloc(nullptr),
II_g_free(nullptr), II_g_memdup(nullptr), II_g_malloc_n(nullptr),
II_g_malloc0_n(nullptr), II_g_realloc_n(nullptr),
@@ -250,13 +251,13 @@ private:
mutable IdentifierInfo *II_alloca, *II_win_alloca, *II_malloc, *II_free,
*II_realloc, *II_calloc, *II_valloc, *II_reallocf,
*II_strndup, *II_strdup, *II_win_strdup, *II_kmalloc,
- *II_if_nameindex, *II_if_freenameindex, *II_wcsdup,
- *II_win_wcsdup, *II_g_malloc, *II_g_malloc0,
- *II_g_realloc, *II_g_try_malloc, *II_g_try_malloc0,
- *II_g_try_realloc, *II_g_free, *II_g_memdup,
- *II_g_malloc_n, *II_g_malloc0_n, *II_g_realloc_n,
- *II_g_try_malloc_n, *II_g_try_malloc0_n,
- *II_g_try_realloc_n;
+ *II_kfree, *II_if_nameindex, *II_if_freenameindex,
+ *II_wcsdup, *II_win_wcsdup, *II_g_malloc,
+ *II_g_malloc0, *II_g_realloc, *II_g_try_malloc,
+ *II_g_try_malloc0, *II_g_try_realloc, *II_g_free,
+ *II_g_memdup, *II_g_malloc_n, *II_g_malloc0_n,
+ *II_g_realloc_n, *II_g_try_malloc_n,
+ *II_g_try_malloc0_n, *II_g_try_realloc_n;
mutable Optional<uint64_t> KernelZeroFlagVal;
void initIdentifierInfo(ASTContext &C) const;
@@ -359,6 +360,11 @@ private:
/// Check if the memory associated with this symbol was released.
bool isReleased(SymbolRef Sym, CheckerContext &C) const;
+ /// See if deallocation happens in a suspicious context. If so, escape the
+ /// pointers that otherwise would have been deallocated and return true.
+ bool suppressDeallocationsInSuspiciousContexts(const CallExpr *CE,
+ CheckerContext &C) const;
+
bool checkUseAfterFree(SymbolRef Sym, CheckerContext &C, const Stmt *S) const;
void checkUseZeroAllocated(SymbolRef Sym, CheckerContext &C,
@@ -599,6 +605,7 @@ void MallocChecker::initIdentifierInfo(ASTContext &Ctx) const {
II_strndup = &Ctx.Idents.get("strndup");
II_wcsdup = &Ctx.Idents.get("wcsdup");
II_kmalloc = &Ctx.Idents.get("kmalloc");
+ II_kfree = &Ctx.Idents.get("kfree");
II_if_nameindex = &Ctx.Idents.get("if_nameindex");
II_if_freenameindex = &Ctx.Idents.get("if_freenameindex");
@@ -658,7 +665,7 @@ bool MallocChecker::isCMemFunction(const FunctionDecl *FD,
if (Family == AF_Malloc && CheckFree) {
if (FunI == II_free || FunI == II_realloc || FunI == II_reallocf ||
- FunI == II_g_free)
+ FunI == II_g_free || FunI == II_kfree)
return true;
}
@@ -875,7 +882,10 @@ void MallocChecker::checkPostStmt(const CallExpr *CE, CheckerContext &C) const {
State = CallocMem(C, CE, State);
State = ProcessZeroAllocation(C, CE, 0, State);
State = ProcessZeroAllocation(C, CE, 1, State);
- } else if (FunI == II_free || FunI == II_g_free) {
+ } else if (FunI == II_free || FunI == II_g_free || FunI == II_kfree) {
+ if (suppressDeallocationsInSuspiciousContexts(CE, C))
+ return;
+
State = FreeMemAux(C, CE, State, 0, false, ReleasedAllocatedMemory);
} else if (FunI == II_strdup || FunI == II_win_strdup ||
FunI == II_wcsdup || FunI == II_win_wcsdup) {
@@ -985,7 +995,7 @@ ProgramStateRef MallocChecker::ProcessZeroAllocation(
}
else if (const CXXNewExpr *NE = dyn_cast<CXXNewExpr>(E)) {
if (NE->isArray())
- Arg = NE->getArraySize();
+ Arg = *NE->getArraySize();
else
return State;
}
@@ -1117,7 +1127,7 @@ ProgramStateRef MallocChecker::addExtentSize(CheckerContext &C,
SVal ElementCount;
const SubRegion *Region;
if (NE->isArray()) {
- const Expr *SizeExpr = NE->getArraySize();
+ const Expr *SizeExpr = *NE->getArraySize();
ElementCount = C.getSVal(SizeExpr);
// Store the extent size for the (symbolic)region
// containing the elements.
@@ -1207,7 +1217,7 @@ void MallocChecker::checkPostObjCMessage(const ObjCMethodCall &Call,
ProgramStateRef State = FreeMemAux(C, Call.getArgExpr(0),
Call.getOriginExpr(), C.getState(),
/*Hold=*/true, ReleasedAllocatedMemory,
- /*RetNullOnFailure=*/true);
+ /*ReturnsNullOnFailure=*/true);
C.addTransition(State);
}
@@ -2301,14 +2311,14 @@ void MallocChecker::reportLeak(SymbolRef Sym, ExplodedNode *N,
assert(N);
if (!BT_Leak[*CheckKind]) {
- BT_Leak[*CheckKind].reset(new BugType(CheckNames[*CheckKind], "Memory leak",
- categories::MemoryError));
// Leaks should not be reported if they are post-dominated by a sink:
// (1) Sinks are higher importance bugs.
// (2) NoReturnFunctionChecker uses sink nodes to represent paths ending
// with __noreturn functions such as assert() or exit(). We choose not
// to report leaks on such paths.
- BT_Leak[*CheckKind]->setSuppressOnSink(true);
+ BT_Leak[*CheckKind].reset(new BugType(CheckNames[*CheckKind], "Memory leak",
+ categories::MemoryError,
+ /*SuppressOnSink=*/true));
}
// Most bug reports are cached at the location where they occurred.
@@ -2531,6 +2541,35 @@ bool MallocChecker::isReleased(SymbolRef Sym, CheckerContext &C) const {
return (RS && RS->isReleased());
}
+bool MallocChecker::suppressDeallocationsInSuspiciousContexts(
+ const CallExpr *CE, CheckerContext &C) const {
+ if (CE->getNumArgs() == 0)
+ return false;
+
+ StringRef FunctionStr = "";
+ if (const auto *FD = dyn_cast<FunctionDecl>(C.getStackFrame()->getDecl()))
+ if (const Stmt *Body = FD->getBody())
+ if (Body->getBeginLoc().isValid())
+ FunctionStr =
+ Lexer::getSourceText(CharSourceRange::getTokenRange(
+ {FD->getBeginLoc(), Body->getBeginLoc()}),
+ C.getSourceManager(), C.getLangOpts());
+
+ // We do not model the Integer Set Library's retain-count based allocation.
+ if (!FunctionStr.contains("__isl_"))
+ return false;
+
+ ProgramStateRef State = C.getState();
+
+ for (const Expr *Arg : CE->arguments())
+ if (SymbolRef Sym = C.getSVal(Arg).getAsSymbol())
+ if (const RefState *RS = State->get<RegionState>(Sym))
+ State = State->set<RegionState>(Sym, RefState::getEscaped(RS));
+
+ C.addTransition(State);
+ return true;
+}
+
bool MallocChecker::checkUseAfterFree(SymbolRef Sym, CheckerContext &C,
const Stmt *S) const {
@@ -2832,7 +2871,6 @@ ProgramStateRef MallocChecker::checkPointerEscapeAux(ProgramStateRef State,
if (const RefState *RS = State->get<RegionState>(sym)) {
if ((RS->isAllocated() || RS->isAllocatedOfSizeZero()) &&
CheckRefState(RS)) {
- State = State->remove<RegionState>(sym);
State = State->set<RegionState>(sym, RefState::getEscaped(RS));
}
}
@@ -3087,47 +3125,37 @@ markReleased(ProgramStateRef State, SymbolRef Sym, const Expr *Origin) {
} // end namespace ento
} // end namespace clang
-void ento::registerNewDeleteLeaksChecker(CheckerManager &mgr) {
- registerCStringCheckerBasic(mgr);
- MallocChecker *checker = mgr.registerChecker<MallocChecker>();
- checker->IsOptimistic = mgr.getAnalyzerOptions().getCheckerBooleanOption(
- "Optimistic", false, checker);
- checker->ChecksEnabled[MallocChecker::CK_NewDeleteLeaksChecker] = true;
- checker->CheckNames[MallocChecker::CK_NewDeleteLeaksChecker] =
- mgr.getCurrentCheckName();
- // We currently treat NewDeleteLeaks checker as a subchecker of NewDelete
- // checker.
- if (!checker->ChecksEnabled[MallocChecker::CK_NewDeleteChecker]) {
- checker->ChecksEnabled[MallocChecker::CK_NewDeleteChecker] = true;
- // FIXME: This does not set the correct name, but without this workaround
- // no name will be set at all.
- checker->CheckNames[MallocChecker::CK_NewDeleteChecker] =
- mgr.getCurrentCheckName();
- }
-}
-
// Intended to be used in InnerPointerChecker to register the part of
// MallocChecker connected to it.
void ento::registerInnerPointerCheckerAux(CheckerManager &mgr) {
- registerCStringCheckerBasic(mgr);
- MallocChecker *checker = mgr.registerChecker<MallocChecker>();
- checker->IsOptimistic = mgr.getAnalyzerOptions().getCheckerBooleanOption(
- "Optimistic", false, checker);
- checker->ChecksEnabled[MallocChecker::CK_InnerPointerChecker] = true;
- checker->CheckNames[MallocChecker::CK_InnerPointerChecker] =
- mgr.getCurrentCheckName();
+ MallocChecker *checker = mgr.getChecker<MallocChecker>();
+ checker->ChecksEnabled[MallocChecker::CK_InnerPointerChecker] = true;
+ checker->CheckNames[MallocChecker::CK_InnerPointerChecker] =
+ mgr.getCurrentCheckName();
+}
+
+void ento::registerDynamicMemoryModeling(CheckerManager &mgr) {
+ auto *checker = mgr.registerChecker<MallocChecker>();
+ checker->IsOptimistic = mgr.getAnalyzerOptions().getCheckerBooleanOption(
+ checker, "Optimistic");
+}
+
+bool ento::shouldRegisterDynamicMemoryModeling(const LangOptions &LO) {
+ return true;
}
#define REGISTER_CHECKER(name) \
void ento::register##name(CheckerManager &mgr) { \
- registerCStringCheckerBasic(mgr); \
- MallocChecker *checker = mgr.registerChecker<MallocChecker>(); \
- checker->IsOptimistic = mgr.getAnalyzerOptions().getCheckerBooleanOption( \
- "Optimistic", false, checker); \
+ MallocChecker *checker = mgr.getChecker<MallocChecker>(); \
checker->ChecksEnabled[MallocChecker::CK_##name] = true; \
checker->CheckNames[MallocChecker::CK_##name] = mgr.getCurrentCheckName(); \
+ } \
+ \
+ bool ento::shouldRegister##name(const LangOptions &LO) { \
+ return true; \
}
REGISTER_CHECKER(MallocChecker)
REGISTER_CHECKER(NewDeleteChecker)
+REGISTER_CHECKER(NewDeleteLeaksChecker)
REGISTER_CHECKER(MismatchedDeallocatorChecker)
diff --git a/lib/StaticAnalyzer/Checkers/MallocOverflowSecurityChecker.cpp b/lib/StaticAnalyzer/Checkers/MallocOverflowSecurityChecker.cpp
index d02ed48bceaa..4fd06f24c5bc 100644
--- a/lib/StaticAnalyzer/Checkers/MallocOverflowSecurityChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/MallocOverflowSecurityChecker.cpp
@@ -1,9 +1,8 @@
// MallocOverflowSecurityChecker.cpp - Check for malloc overflows -*- C++ -*-=//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -334,7 +333,10 @@ void MallocOverflowSecurityChecker::checkASTCodeBody(const Decl *D,
OutputPossibleOverflows(PossibleMallocOverflows, D, BR, mgr);
}
-void
-ento::registerMallocOverflowSecurityChecker(CheckerManager &mgr) {
+void ento::registerMallocOverflowSecurityChecker(CheckerManager &mgr) {
mgr.registerChecker<MallocOverflowSecurityChecker>();
}
+
+bool ento::shouldRegisterMallocOverflowSecurityChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp b/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp
index bb245d82bc2b..2eb4d7141e28 100644
--- a/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp
@@ -1,9 +1,8 @@
// MallocSizeofChecker.cpp - Check for dubious malloc arguments ---*- C++ -*-=//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -250,3 +249,7 @@ public:
void ento::registerMallocSizeofChecker(CheckerManager &mgr) {
mgr.registerChecker<MallocSizeofChecker>();
}
+
+bool ento::shouldRegisterMallocSizeofChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/MmapWriteExecChecker.cpp b/lib/StaticAnalyzer/Checkers/MmapWriteExecChecker.cpp
index e3b24f20b0f0..270efede8385 100644
--- a/lib/StaticAnalyzer/Checkers/MmapWriteExecChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/MmapWriteExecChecker.cpp
@@ -1,9 +1,8 @@
// MmapWriteExecChecker.cpp - Check for the prot argument -----------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -83,8 +82,12 @@ void ento::registerMmapWriteExecChecker(CheckerManager &mgr) {
mgr.registerChecker<MmapWriteExecChecker>();
Mwec->ProtExecOv =
mgr.getAnalyzerOptions()
- .getCheckerIntegerOption("MmapProtExec", 0x04, Mwec);
+ .getCheckerIntegerOption(Mwec, "MmapProtExec");
Mwec->ProtReadOv =
mgr.getAnalyzerOptions()
- .getCheckerIntegerOption("MmapProtRead", 0x01, Mwec);
+ .getCheckerIntegerOption(Mwec, "MmapProtRead");
+}
+
+bool ento::shouldRegisterMmapWriteExecChecker(const LangOptions &LO) {
+ return true;
}
diff --git a/lib/StaticAnalyzer/Checkers/Move.h b/lib/StaticAnalyzer/Checkers/Move.h
new file mode 100644
index 000000000000..10644a8fcb37
--- /dev/null
+++ b/lib/StaticAnalyzer/Checkers/Move.h
@@ -0,0 +1,30 @@
+//=== Move.h - Tracking moved-from objects. ------------------------*- C++ -*-//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines inter-checker API for the use-after-move checker. It allows
+// dependent checkers to figure out if an object is in a moved-from state.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_MOVE_H
+#define LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_MOVE_H
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+
+namespace clang {
+namespace ento {
+namespace move {
+
+/// Returns true if the object is known to have been recently std::moved.
+bool isMovedFrom(ProgramStateRef State, const MemRegion *Region);
+
+} // namespace move
+} // namespace ento
+} // namespace clang
+
+#endif // LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_MOVE_H
diff --git a/lib/StaticAnalyzer/Checkers/MoveChecker.cpp b/lib/StaticAnalyzer/Checkers/MoveChecker.cpp
index 6efa2dfbe5b4..d8a9af78536a 100644
--- a/lib/StaticAnalyzer/Checkers/MoveChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/MoveChecker.cpp
@@ -1,9 +1,8 @@
// MoveChecker.cpp - Check use of moved-from objects. - C++ ---------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -14,6 +13,7 @@
//===----------------------------------------------------------------------===//
#include "clang/AST/ExprCXX.h"
+#include "clang/Driver/DriverDiagnostic.h"
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
@@ -187,13 +187,17 @@ private:
AggressivenessKind Aggressiveness;
public:
- void setAggressiveness(StringRef Str) {
+ void setAggressiveness(StringRef Str, CheckerManager &Mgr) {
Aggressiveness =
llvm::StringSwitch<AggressivenessKind>(Str)
.Case("KnownsOnly", AK_KnownsOnly)
.Case("KnownsAndLocals", AK_KnownsAndLocals)
.Case("All", AK_All)
- .Default(AK_KnownsAndLocals); // A sane default.
+ .Default(AK_Invalid);
+
+ if (Aggressiveness == AK_Invalid)
+ Mgr.reportInvalidCheckerOptionValue(this, "WarnOn",
+ "either \"KnownsOnly\", \"KnownsAndLocals\" or \"All\" string value");
};
private:
@@ -223,6 +227,18 @@ private:
REGISTER_MAP_WITH_PROGRAMSTATE(TrackedRegionMap, const MemRegion *, RegionState)
+// Define the inter-checker API.
+namespace clang {
+namespace ento {
+namespace move {
+bool isMovedFrom(ProgramStateRef State, const MemRegion *Region) {
+ const RegionState *RS = State->get<TrackedRegionMap>(Region);
+ return RS && (RS->isMoved() || RS->isReported());
+}
+} // namespace move
+} // namespace ento
+} // namespace clang
+
// If a region is removed all of the subregions needs to be removed too.
static ProgramStateRef removeFromState(ProgramStateRef State,
const MemRegion *Region) {
@@ -502,9 +518,9 @@ bool MoveChecker::isStateResetMethod(const CXXMethodDecl *MethodDec) const {
std::string MethodName = MethodDec->getName().lower();
// TODO: Some of these methods (eg., resize) are not always resetting
// the state, so we should consider looking at the arguments.
- if (MethodName == "reset" || MethodName == "clear" ||
- MethodName == "destroy" || MethodName == "resize" ||
- MethodName == "shrink")
+ if (MethodName == "assign" || MethodName == "clear" ||
+ MethodName == "destroy" || MethodName == "reset" ||
+ MethodName == "resize" || MethodName == "shrink")
return true;
}
return false;
@@ -736,5 +752,9 @@ void MoveChecker::printState(raw_ostream &Out, ProgramStateRef State,
void ento::registerMoveChecker(CheckerManager &mgr) {
MoveChecker *chk = mgr.registerChecker<MoveChecker>();
chk->setAggressiveness(
- mgr.getAnalyzerOptions().getCheckerStringOption("WarnOn", "", chk));
+ mgr.getAnalyzerOptions().getCheckerStringOption(chk, "WarnOn"), mgr);
+}
+
+bool ento::shouldRegisterMoveChecker(const LangOptions &LO) {
+ return true;
}
diff --git a/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp b/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp
index 4ed1b25cb09e..6fc7c17bc42f 100644
--- a/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp
@@ -1,9 +1,8 @@
//=- NSAutoreleasePoolChecker.cpp --------------------------------*- C++ -*-==//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -76,6 +75,9 @@ void NSAutoreleasePoolChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
}
void ento::registerNSAutoreleasePoolChecker(CheckerManager &mgr) {
- if (mgr.getLangOpts().getGC() != LangOptions::NonGC)
- mgr.registerChecker<NSAutoreleasePoolChecker>();
+ mgr.registerChecker<NSAutoreleasePoolChecker>();
+}
+
+bool ento::shouldRegisterNSAutoreleasePoolChecker(const LangOptions &LO) {
+ return LO.getGC() != LangOptions::NonGC;
}
diff --git a/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp b/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp
index 06c43c6b9470..5cec012258c1 100644
--- a/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp
@@ -1,9 +1,8 @@
//=- NSErrorChecker.cpp - Coding conventions for uses of NSError -*- C++ -*-==//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -308,16 +307,30 @@ static bool IsCFError(QualType T, IdentifierInfo *II) {
return TT->getDecl()->getIdentifier() == II;
}
+void ento::registerNSOrCFErrorDerefChecker(CheckerManager &mgr) {
+ mgr.registerChecker<NSOrCFErrorDerefChecker>();
+}
+
+bool ento::shouldRegisterNSOrCFErrorDerefChecker(const LangOptions &LO) {
+ return true;
+}
+
void ento::registerNSErrorChecker(CheckerManager &mgr) {
mgr.registerChecker<NSErrorMethodChecker>();
- NSOrCFErrorDerefChecker *checker =
- mgr.registerChecker<NSOrCFErrorDerefChecker>();
+ NSOrCFErrorDerefChecker *checker = mgr.getChecker<NSOrCFErrorDerefChecker>();
checker->ShouldCheckNSError = true;
}
+bool ento::shouldRegisterNSErrorChecker(const LangOptions &LO) {
+ return true;
+}
+
void ento::registerCFErrorChecker(CheckerManager &mgr) {
mgr.registerChecker<CFErrorFunctionChecker>();
- NSOrCFErrorDerefChecker *checker =
- mgr.registerChecker<NSOrCFErrorDerefChecker>();
+ NSOrCFErrorDerefChecker *checker = mgr.getChecker<NSOrCFErrorDerefChecker>();
checker->ShouldCheckCFError = true;
}
+
+bool ento::shouldRegisterCFErrorChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/NoReturnFunctionChecker.cpp b/lib/StaticAnalyzer/Checkers/NoReturnFunctionChecker.cpp
index 83d4b5b0758b..fc34255bf6c9 100644
--- a/lib/StaticAnalyzer/Checkers/NoReturnFunctionChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/NoReturnFunctionChecker.cpp
@@ -1,9 +1,8 @@
//=== NoReturnFunctionChecker.cpp -------------------------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -143,3 +142,7 @@ void NoReturnFunctionChecker::checkPostObjCMessage(const ObjCMethodCall &Msg,
void ento::registerNoReturnFunctionChecker(CheckerManager &mgr) {
mgr.registerChecker<NoReturnFunctionChecker>();
}
+
+bool ento::shouldRegisterNoReturnFunctionChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp b/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp
index 3c4363b6850e..bf6b3e3e87cf 100644
--- a/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp
@@ -1,9 +1,8 @@
//===--- NonNullParamChecker.cpp - Undefined arguments checker -*- C++ -*--===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -217,3 +216,7 @@ std::unique_ptr<BugReport> NonNullParamChecker::genReportReferenceToNullPointer(
void ento::registerNonNullParamChecker(CheckerManager &mgr) {
mgr.registerChecker<NonNullParamChecker>();
}
+
+bool ento::shouldRegisterNonNullParamChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/NonnullGlobalConstantsChecker.cpp b/lib/StaticAnalyzer/Checkers/NonnullGlobalConstantsChecker.cpp
index ce9e950aa9ba..43dbe57b8432 100644
--- a/lib/StaticAnalyzer/Checkers/NonnullGlobalConstantsChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/NonnullGlobalConstantsChecker.cpp
@@ -1,9 +1,8 @@
//==- NonnullGlobalConstantsChecker.cpp ---------------------------*- C++ -*--//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -107,14 +106,21 @@ bool NonnullGlobalConstantsChecker::isGlobalConstString(SVal V) const {
return true;
// Look through the typedefs.
- while (auto *T = dyn_cast<TypedefType>(Ty)) {
- Ty = T->getDecl()->getUnderlyingType();
-
- // It is sufficient for any intermediate typedef
- // to be classified const.
- HasConst = HasConst || Ty.isConstQualified();
- if (isNonnullType(Ty) && HasConst)
- return true;
+ while (const Type *T = Ty.getTypePtr()) {
+ if (const auto *TT = dyn_cast<TypedefType>(T)) {
+ Ty = TT->getDecl()->getUnderlyingType();
+ // It is sufficient for any intermediate typedef
+ // to be classified const.
+ HasConst = HasConst || Ty.isConstQualified();
+ if (isNonnullType(Ty) && HasConst)
+ return true;
+ } else if (const auto *AT = dyn_cast<AttributedType>(T)) {
+ if (AT->getAttrKind() == attr::TypeNonNull)
+ return true;
+ Ty = AT->getModifiedType();
+ } else {
+ return false;
+ }
}
return false;
}
@@ -138,3 +144,7 @@ bool NonnullGlobalConstantsChecker::isNonnullType(QualType Ty) const {
void ento::registerNonnullGlobalConstantsChecker(CheckerManager &Mgr) {
Mgr.registerChecker<NonnullGlobalConstantsChecker>();
}
+
+bool ento::shouldRegisterNonnullGlobalConstantsChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp b/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp
index e535d1ae27ac..af21c84b995b 100644
--- a/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp
@@ -1,9 +1,8 @@
-//== Nullabilityhecker.cpp - Nullability checker ----------------*- C++ -*--==//
+//===-- NullabilityChecker.cpp - Nullability checker ----------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -479,7 +478,7 @@ void NullabilityChecker::checkEvent(ImplicitNullDerefEvent Event) const {
return;
const MemRegion *Region =
- getTrackRegion(Event.Location, /*CheckSuperregion=*/true);
+ getTrackRegion(Event.Location, /*CheckSuperRegion=*/true);
if (!Region)
return;
@@ -1192,16 +1191,28 @@ void NullabilityChecker::printState(raw_ostream &Out, ProgramStateRef State,
}
}
+void ento::registerNullabilityBase(CheckerManager &mgr) {
+ mgr.registerChecker<NullabilityChecker>();
+}
+
+bool ento::shouldRegisterNullabilityBase(const LangOptions &LO) {
+ return true;
+}
+
#define REGISTER_CHECKER(name, trackingRequired) \
void ento::register##name##Checker(CheckerManager &mgr) { \
- NullabilityChecker *checker = mgr.registerChecker<NullabilityChecker>(); \
+ NullabilityChecker *checker = mgr.getChecker<NullabilityChecker>(); \
checker->Filter.Check##name = true; \
checker->Filter.CheckName##name = mgr.getCurrentCheckName(); \
checker->NeedTracking = checker->NeedTracking || trackingRequired; \
checker->NoDiagnoseCallsToSystemHeaders = \
checker->NoDiagnoseCallsToSystemHeaders || \
- mgr.getAnalyzerOptions().getCheckerBooleanOption( \
- "NoDiagnoseCallsToSystemHeaders", false, checker, true); \
+ mgr.getAnalyzerOptions().getCheckerBooleanOption( \
+ checker, "NoDiagnoseCallsToSystemHeaders", true); \
+ } \
+ \
+ bool ento::shouldRegister##name##Checker(const LangOptions &LO) { \
+ return true; \
}
// The checks are likely to be turned on by default and it is possible to do
diff --git a/lib/StaticAnalyzer/Checkers/NumberObjectConversionChecker.cpp b/lib/StaticAnalyzer/Checkers/NumberObjectConversionChecker.cpp
index 4e3a7205f1f4..1053424ae6fa 100644
--- a/lib/StaticAnalyzer/Checkers/NumberObjectConversionChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/NumberObjectConversionChecker.cpp
@@ -1,9 +1,8 @@
//===- NumberObjectConversionChecker.cpp -------------------------*- C++ -*-==//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -347,5 +346,9 @@ void ento::registerNumberObjectConversionChecker(CheckerManager &Mgr) {
NumberObjectConversionChecker *Chk =
Mgr.registerChecker<NumberObjectConversionChecker>();
Chk->Pedantic =
- Mgr.getAnalyzerOptions().getCheckerBooleanOption("Pedantic", false, Chk);
+ Mgr.getAnalyzerOptions().getCheckerBooleanOption(Chk, "Pedantic");
+}
+
+bool ento::shouldRegisterNumberObjectConversionChecker(const LangOptions &LO) {
+ return true;
}
diff --git a/lib/StaticAnalyzer/Checkers/OSObjectCStyleCast.cpp b/lib/StaticAnalyzer/Checkers/OSObjectCStyleCast.cpp
new file mode 100644
index 000000000000..5b9895c338d8
--- /dev/null
+++ b/lib/StaticAnalyzer/Checkers/OSObjectCStyleCast.cpp
@@ -0,0 +1,90 @@
+//===- OSObjectCStyleCast.cpp ------------------------------------*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines OSObjectCStyleCast checker, which checks for C-style casts
+// of OSObjects. Such casts almost always indicate a code smell,
+// as an explicit static or dynamic cast should be used instead.
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/ASTMatchers/ASTMatchFinder.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "llvm/Support/Debug.h"
+
+using namespace clang;
+using namespace ento;
+using namespace ast_matchers;
+
+namespace {
+
+const char *WarnAtNode = "OSObjCast";
+
+class OSObjectCStyleCastChecker : public Checker<check::ASTCodeBody> {
+public:
+ void checkASTCodeBody(const Decl *D,
+ AnalysisManager &AM,
+ BugReporter &BR) const;
+};
+
+static void emitDiagnostics(const BoundNodes &Nodes,
+ BugReporter &BR,
+ AnalysisDeclContext *ADC,
+ const OSObjectCStyleCastChecker *Checker) {
+ const auto *CE = Nodes.getNodeAs<CastExpr>(WarnAtNode);
+ assert(CE);
+
+ std::string Diagnostics;
+ llvm::raw_string_ostream OS(Diagnostics);
+ OS << "C-style cast of OSObject. Use OSDynamicCast instead.";
+
+ BR.EmitBasicReport(
+ ADC->getDecl(),
+ Checker,
+ /*Name=*/"OSObject C-Style Cast",
+ /*BugCategory=*/"Security",
+ OS.str(),
+ PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), ADC),
+ CE->getSourceRange());
+}
+
+static auto hasTypePointingTo(DeclarationMatcher DeclM)
+ -> decltype(hasType(pointerType())) {
+ return hasType(pointerType(pointee(hasDeclaration(DeclM))));
+}
+
+void OSObjectCStyleCastChecker::checkASTCodeBody(const Decl *D, AnalysisManager &AM,
+ BugReporter &BR) const {
+
+ AnalysisDeclContext *ADC = AM.getAnalysisDeclContext(D);
+
+ auto DynamicCastM = callExpr(callee(functionDecl(hasName("safeMetaCast"))));
+
+ auto OSObjTypeM = hasTypePointingTo(cxxRecordDecl(isDerivedFrom("OSMetaClassBase")));
+ auto OSObjSubclassM = hasTypePointingTo(
+ cxxRecordDecl(isDerivedFrom("OSObject")));
+
+ auto CastM = cStyleCastExpr(
+ allOf(hasSourceExpression(allOf(OSObjTypeM, unless(DynamicCastM))),
+ OSObjSubclassM)).bind(WarnAtNode);
+
+ auto Matches = match(stmt(forEachDescendant(CastM)), *D->getBody(), AM.getASTContext());
+ for (BoundNodes Match : Matches)
+ emitDiagnostics(Match, BR, ADC, this);
+}
+}
+
+void ento::registerOSObjectCStyleCast(CheckerManager &Mgr) {
+ Mgr.registerChecker<OSObjectCStyleCastChecker>();
+}
+
+bool ento::shouldRegisterOSObjectCStyleCast(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp b/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp
index 185b57575cb0..bd8cfb14680f 100644
--- a/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp
@@ -1,9 +1,8 @@
//== ObjCAtSyncChecker.cpp - nil mutex checker for @synchronized -*- C++ -*--=//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -89,6 +88,9 @@ void ObjCAtSyncChecker::checkPreStmt(const ObjCAtSynchronizedStmt *S,
}
void ento::registerObjCAtSyncChecker(CheckerManager &mgr) {
- if (mgr.getLangOpts().ObjC)
- mgr.registerChecker<ObjCAtSyncChecker>();
+ mgr.registerChecker<ObjCAtSyncChecker>();
+}
+
+bool ento::shouldRegisterObjCAtSyncChecker(const LangOptions &LO) {
+ return LO.ObjC;
}
diff --git a/lib/StaticAnalyzer/Checkers/ObjCAutoreleaseWriteChecker.cpp b/lib/StaticAnalyzer/Checkers/ObjCAutoreleaseWriteChecker.cpp
index 0424958f8e65..d2371fe60d21 100644
--- a/lib/StaticAnalyzer/Checkers/ObjCAutoreleaseWriteChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ObjCAutoreleaseWriteChecker.cpp
@@ -1,9 +1,8 @@
//===- ObjCAutoreleaseWriteChecker.cpp ----------------------------*- C++ -*-==//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -137,7 +136,7 @@ static void emitDiagnostics(BoundNodes &Match, const Decl *D, BugReporter &BR,
ADC->getDecl(), Checker,
/*Name=*/(llvm::Twine(ActionMsg)
+ " autoreleasing out parameter inside autorelease pool").str(),
- /*Category=*/"Memory",
+ /*BugCategory=*/"Memory",
(llvm::Twine(ActionMsg) + " autoreleasing out parameter " +
(IsCapture ? "'" + PVD->getName() + "'" + " " : "") + "inside " +
"autorelease pool that may exit before " + Name + " returns; consider "
@@ -207,3 +206,7 @@ void ObjCAutoreleaseWriteChecker::checkASTCodeBody(const Decl *D,
void ento::registerAutoreleaseWriteChecker(CheckerManager &Mgr) {
Mgr.registerChecker<ObjCAutoreleaseWriteChecker>();
}
+
+bool ento::shouldRegisterAutoreleaseWriteChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp b/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp
index 34ce47823d51..4450c464f89d 100644
--- a/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp
@@ -1,9 +1,8 @@
//== ObjCContainersASTChecker.cpp - CoreFoundation containers API *- C++ -*-==//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -172,3 +171,7 @@ public:
void ento::registerObjCContainersASTChecker(CheckerManager &mgr) {
mgr.registerChecker<ObjCContainersASTChecker>();
}
+
+bool ento::shouldRegisterObjCContainersASTChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp b/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp
index 1c8c0d8dedda..f69a3944a56c 100644
--- a/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp
@@ -1,9 +1,8 @@
//== ObjCContainersChecker.cpp - Path sensitive checker for CFArray *- C++ -*=//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -187,3 +186,7 @@ void ObjCContainersChecker::printState(raw_ostream &OS, ProgramStateRef State,
void ento::registerObjCContainersChecker(CheckerManager &mgr) {
mgr.registerChecker<ObjCContainersChecker>();
}
+
+bool ento::shouldRegisterObjCContainersChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp b/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp
index d383302b2790..33e4d2af000d 100644
--- a/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp
@@ -1,9 +1,8 @@
//==- ObjCMissingSuperCallChecker.cpp - Check missing super-calls in ObjC --==//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -222,6 +221,9 @@ void ento::registerObjCSuperCallChecker(CheckerManager &Mgr) {
Mgr.registerChecker<ObjCSuperCallChecker>();
}
+bool ento::shouldRegisterObjCSuperCallChecker(const LangOptions &LO) {
+ return true;
+}
/*
ToDo list for expanding this check in the future, the list is not exhaustive.
diff --git a/lib/StaticAnalyzer/Checkers/ObjCPropertyChecker.cpp b/lib/StaticAnalyzer/Checkers/ObjCPropertyChecker.cpp
index 018d3fcfceb9..9a49200545e3 100644
--- a/lib/StaticAnalyzer/Checkers/ObjCPropertyChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ObjCPropertyChecker.cpp
@@ -1,9 +1,8 @@
//==- ObjCPropertyChecker.cpp - Check ObjC properties ------------*- C++ -*-==//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -79,3 +78,7 @@ void ObjCPropertyChecker::checkCopyMutable(const ObjCPropertyDecl *D,
void ento::registerObjCPropertyChecker(CheckerManager &Mgr) {
Mgr.registerChecker<ObjCPropertyChecker>();
}
+
+bool ento::shouldRegisterObjCPropertyChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp b/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp
index efa804220765..767b7bf4063c 100644
--- a/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp
@@ -1,9 +1,8 @@
//== ObjCSelfInitChecker.cpp - Checker for 'self' initialization -*- C++ -*--=//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -437,3 +436,7 @@ static bool isInitMessage(const ObjCMethodCall &Call) {
void ento::registerObjCSelfInitChecker(CheckerManager &mgr) {
mgr.registerChecker<ObjCSelfInitChecker>();
}
+
+bool ento::shouldRegisterObjCSelfInitChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/ObjCSuperDeallocChecker.cpp b/lib/StaticAnalyzer/Checkers/ObjCSuperDeallocChecker.cpp
index 9058784dd345..f435f00c08e7 100644
--- a/lib/StaticAnalyzer/Checkers/ObjCSuperDeallocChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ObjCSuperDeallocChecker.cpp
@@ -1,9 +1,8 @@
//===- ObjCSuperDeallocChecker.cpp - Check correct use of [super dealloc] -===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -282,8 +281,9 @@ SuperDeallocBRVisitor::VisitNode(const ExplodedNode *Succ,
//===----------------------------------------------------------------------===//
void ento::registerObjCSuperDeallocChecker(CheckerManager &Mgr) {
- const LangOptions &LangOpts = Mgr.getLangOpts();
- if (LangOpts.getGC() == LangOptions::GCOnly || LangOpts.ObjCAutoRefCount)
- return;
Mgr.registerChecker<ObjCSuperDeallocChecker>();
}
+
+bool ento::shouldRegisterObjCSuperDeallocChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp b/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp
index 7f7b45316087..4b39a97c7e8d 100644
--- a/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp
@@ -1,9 +1,8 @@
//==- ObjCUnusedIVarsChecker.cpp - Check for unused ivars --------*- C++ -*-==//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -186,3 +185,7 @@ public:
void ento::registerObjCUnusedIvarsChecker(CheckerManager &mgr) {
mgr.registerChecker<ObjCUnusedIvarsChecker>();
}
+
+bool ento::shouldRegisterObjCUnusedIvarsChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp b/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp
index 211db392bf71..0aa410de15ff 100644
--- a/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp
@@ -1,9 +1,8 @@
//=======- PaddingChecker.cpp ------------------------------------*- C++ -*-==//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -17,6 +16,7 @@
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/Driver/DriverDiagnostic.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
@@ -33,17 +33,14 @@ namespace {
class PaddingChecker : public Checker<check::ASTDecl<TranslationUnitDecl>> {
private:
mutable std::unique_ptr<BugType> PaddingBug;
- mutable int64_t AllowedPad;
mutable BugReporter *BR;
public:
+ int64_t AllowedPad;
+
void checkASTDecl(const TranslationUnitDecl *TUD, AnalysisManager &MGR,
BugReporter &BRArg) const {
BR = &BRArg;
- AllowedPad =
- MGR.getAnalyzerOptions()
- .getCheckerIntegerOption("AllowedPad", 24, this);
- assert(AllowedPad >= 0 && "AllowedPad option should be non-negative");
// The calls to checkAST* from AnalysisConsumer don't
// visit template instantiations or lambda classes. We
@@ -277,15 +274,13 @@ public:
long long CurAlignmentBits = 1ull << (std::min)(TrailingZeros, 62u);
CharUnits CurAlignment = CharUnits::fromQuantity(CurAlignmentBits);
FieldInfo InsertPoint = {CurAlignment, CharUnits::Zero(), nullptr};
- auto CurBegin = Fields.begin();
- auto CurEnd = Fields.end();
// In the typical case, this will find the last element
// of the vector. We won't find a middle element unless
// we started on a poorly aligned address or have an overly
// aligned field.
- auto Iter = std::upper_bound(CurBegin, CurEnd, InsertPoint);
- if (Iter != CurBegin) {
+ auto Iter = llvm::upper_bound(Fields, InsertPoint);
+ if (Iter != Fields.begin()) {
// We found a field that we can layout with the current alignment.
--Iter;
NewOffset += Iter->Size;
@@ -349,5 +344,14 @@ public:
} // namespace
void ento::registerPaddingChecker(CheckerManager &Mgr) {
- Mgr.registerChecker<PaddingChecker>();
+ auto *Checker = Mgr.registerChecker<PaddingChecker>();
+ Checker->AllowedPad = Mgr.getAnalyzerOptions()
+ .getCheckerIntegerOption(Checker, "AllowedPad");
+ if (Checker->AllowedPad < 0)
+ Mgr.reportInvalidCheckerOptionValue(
+ Checker, "AllowedPad", "a non-negative value");
+}
+
+bool ento::shouldRegisterPaddingChecker(const LangOptions &LO) {
+ return true;
}
diff --git a/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp b/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp
index de3a16ebc729..03c3f4dd2357 100644
--- a/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp
@@ -1,9 +1,8 @@
//=== PointerArithChecker.cpp - Pointer arithmetic checker -----*- C++ -*--===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -343,3 +342,7 @@ void PointerArithChecker::checkPreStmt(const BinaryOperator *BOp,
void ento::registerPointerArithChecker(CheckerManager &mgr) {
mgr.registerChecker<PointerArithChecker>();
}
+
+bool ento::shouldRegisterPointerArithChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/PointerIterationChecker.cpp b/lib/StaticAnalyzer/Checkers/PointerIterationChecker.cpp
new file mode 100644
index 000000000000..307e59b8eebc
--- /dev/null
+++ b/lib/StaticAnalyzer/Checkers/PointerIterationChecker.cpp
@@ -0,0 +1,100 @@
+//== PointerIterationChecker.cpp ------------------------------- -*- C++ -*--=//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines PointerIterationChecker which checks for non-determinism
+// caused due to iteration of unordered containers of pointer elements.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/ASTMatchers/ASTMatchFinder.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+
+using namespace clang;
+using namespace ento;
+using namespace ast_matchers;
+
+namespace {
+
+// ID of a node at which the diagnostic would be emitted.
+constexpr llvm::StringLiteral WarnAtNode = "iter";
+
+class PointerIterationChecker : public Checker<check::ASTCodeBody> {
+public:
+ void checkASTCodeBody(const Decl *D,
+ AnalysisManager &AM,
+ BugReporter &BR) const;
+};
+
+static void emitDiagnostics(const BoundNodes &Match, const Decl *D,
+ BugReporter &BR, AnalysisManager &AM,
+ const PointerIterationChecker *Checker) {
+ auto *ADC = AM.getAnalysisDeclContext(D);
+
+ const auto *MarkedStmt = Match.getNodeAs<Stmt>(WarnAtNode);
+ assert(MarkedStmt);
+
+ auto Range = MarkedStmt->getSourceRange();
+ auto Location = PathDiagnosticLocation::createBegin(MarkedStmt,
+ BR.getSourceManager(),
+ ADC);
+ std::string Diagnostics;
+ llvm::raw_string_ostream OS(Diagnostics);
+ OS << "Iteration of pointer-like elements "
+ << "can result in non-deterministic ordering";
+
+ BR.EmitBasicReport(ADC->getDecl(), Checker,
+ "Iteration of pointer-like elements", "Non-determinism",
+ OS.str(), Location, Range);
+}
+
+// Assumption: Iteration of ordered containers of pointers is deterministic.
+
+// TODO: Currently, we only check for std::unordered_set. Other unordered
+// containers like std::unordered_map also need to be handled.
+
+// TODO: Currently, we do not check what the for loop does with the iterated
+// pointer values. Not all iterations may cause non-determinism. For example,
+// counting or summing up the elements should not be non-deterministic.
+
+auto matchUnorderedIterWithPointers() -> decltype(decl()) {
+
+ auto UnorderedContainerM = declRefExpr(to(varDecl(hasType(
+ recordDecl(hasName("std::unordered_set")
+ )))));
+
+ auto PointerTypeM = varDecl(hasType(hasCanonicalType(pointerType())));
+
+ auto PointerIterM = stmt(cxxForRangeStmt(
+ hasLoopVariable(PointerTypeM),
+ hasRangeInit(UnorderedContainerM)
+ )).bind(WarnAtNode);
+
+ return decl(forEachDescendant(PointerIterM));
+}
+
+void PointerIterationChecker::checkASTCodeBody(const Decl *D,
+ AnalysisManager &AM,
+ BugReporter &BR) const {
+ auto MatcherM = matchUnorderedIterWithPointers();
+
+ auto Matches = match(MatcherM, *D, AM.getASTContext());
+ for (const auto &Match : Matches)
+ emitDiagnostics(Match, D, BR, AM, this);
+}
+
+} // end of anonymous namespace
+
+void ento::registerPointerIterationChecker(CheckerManager &Mgr) {
+ Mgr.registerChecker<PointerIterationChecker>();
+}
+
+bool ento::shouldRegisterPointerIterationChecker(const LangOptions &LO) {
+ return LO.CPlusPlus;
+}
diff --git a/lib/StaticAnalyzer/Checkers/PointerSortingChecker.cpp b/lib/StaticAnalyzer/Checkers/PointerSortingChecker.cpp
new file mode 100644
index 000000000000..586d9d3af2a6
--- /dev/null
+++ b/lib/StaticAnalyzer/Checkers/PointerSortingChecker.cpp
@@ -0,0 +1,113 @@
+//== PointerSortingChecker.cpp --------------------------------- -*- C++ -*--=//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines PointerSortingChecker which checks for non-determinism
+// caused due to sorting containers with pointer-like elements.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/ASTMatchers/ASTMatchFinder.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+
+using namespace clang;
+using namespace ento;
+using namespace ast_matchers;
+
+namespace {
+
+// ID of a node at which the diagnostic would be emitted.
+constexpr llvm::StringLiteral WarnAtNode = "sort";
+
+class PointerSortingChecker : public Checker<check::ASTCodeBody> {
+public:
+ void checkASTCodeBody(const Decl *D,
+ AnalysisManager &AM,
+ BugReporter &BR) const;
+};
+
+static void emitDiagnostics(const BoundNodes &Match, const Decl *D,
+ BugReporter &BR, AnalysisManager &AM,
+ const PointerSortingChecker *Checker) {
+ auto *ADC = AM.getAnalysisDeclContext(D);
+
+ const auto *MarkedStmt = Match.getNodeAs<CallExpr>(WarnAtNode);
+ assert(MarkedStmt);
+
+ auto Range = MarkedStmt->getSourceRange();
+ auto Location = PathDiagnosticLocation::createBegin(MarkedStmt,
+ BR.getSourceManager(),
+ ADC);
+ std::string Diagnostics;
+ llvm::raw_string_ostream OS(Diagnostics);
+ OS << "Sorting pointer-like elements "
+ << "can result in non-deterministic ordering";
+
+ BR.EmitBasicReport(ADC->getDecl(), Checker,
+ "Sorting of pointer-like elements", "Non-determinism",
+ OS.str(), Location, Range);
+}
+
+auto callsName(const char *FunctionName) -> decltype(callee(functionDecl())) {
+ return callee(functionDecl(hasName(FunctionName)));
+}
+
+// FIXME: Currently we simply check if std::sort is used with pointer-like
+// elements. This approach can have a big false positive rate. Using std::sort,
+// std::unique and then erase is common technique for deduplicating a container
+// (which in some cases might even be quicker than using, let's say std::set).
+// In case a container contains arbitrary memory addresses (e.g. multiple
+// things give different stuff but might give the same thing multiple times)
+// which we don't want to do things with more than once, we might use
+// sort-unique-erase and the sort call will emit a report.
+auto matchSortWithPointers() -> decltype(decl()) {
+ // Match any of these function calls.
+ auto SortFuncM = anyOf(
+ callsName("std::is_sorted"),
+ callsName("std::nth_element"),
+ callsName("std::partial_sort"),
+ callsName("std::partition"),
+ callsName("std::sort"),
+ callsName("std::stable_partition"),
+ callsName("std::stable_sort")
+ );
+
+ // Match only if the container has pointer-type elements.
+ auto IteratesPointerEltsM = hasArgument(0,
+ hasType(cxxRecordDecl(has(
+ fieldDecl(hasType(hasCanonicalType(
+ pointsTo(hasCanonicalType(pointerType()))
+ )))
+ ))));
+
+ auto PointerSortM = stmt(callExpr(allOf(SortFuncM, IteratesPointerEltsM))
+ ).bind(WarnAtNode);
+
+ return decl(forEachDescendant(PointerSortM));
+}
+
+void PointerSortingChecker::checkASTCodeBody(const Decl *D,
+ AnalysisManager &AM,
+ BugReporter &BR) const {
+ auto MatcherM = matchSortWithPointers();
+
+ auto Matches = match(MatcherM, *D, AM.getASTContext());
+ for (const auto &Match : Matches)
+ emitDiagnostics(Match, D, BR, AM, this);
+}
+
+} // end of anonymous namespace
+
+void ento::registerPointerSortingChecker(CheckerManager &Mgr) {
+ Mgr.registerChecker<PointerSortingChecker>();
+}
+
+bool ento::shouldRegisterPointerSortingChecker(const LangOptions &LO) {
+ return LO.CPlusPlus;
+}
diff --git a/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp b/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp
index 41490e45f241..c9512f4fc42f 100644
--- a/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp
@@ -1,9 +1,8 @@
//=== PointerSubChecker.cpp - Pointer subtraction checker ------*- C++ -*--===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -73,3 +72,7 @@ void PointerSubChecker::checkPreStmt(const BinaryOperator *B,
void ento::registerPointerSubChecker(CheckerManager &mgr) {
mgr.registerChecker<PointerSubChecker>();
}
+
+bool ento::shouldRegisterPointerSubChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp b/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp
index 66cc37278809..33f677e1c258 100644
--- a/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp
@@ -1,9 +1,8 @@
//===--- PthreadLockChecker.cpp - Check for locking problems ---*- C++ -*--===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -481,3 +480,7 @@ void PthreadLockChecker::checkDeadSymbols(SymbolReaper &SymReaper,
void ento::registerPthreadLockChecker(CheckerManager &mgr) {
mgr.registerChecker<PthreadLockChecker>();
}
+
+bool ento::shouldRegisterPthreadLockChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.cpp b/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.cpp
index 0652af856643..4a3a8dae23a7 100644
--- a/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.cpp
@@ -1,9 +1,8 @@
//==-- RetainCountChecker.cpp - Checks for leaks and other issues -*- C++ -*--//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -13,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "RetainCountChecker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
using namespace clang;
using namespace ento;
@@ -29,83 +29,20 @@ const RefVal *getRefBinding(ProgramStateRef State, SymbolRef Sym) {
return State->get<RefBindings>(Sym);
}
-ProgramStateRef setRefBinding(ProgramStateRef State, SymbolRef Sym,
+} // end namespace retaincountchecker
+} // end namespace ento
+} // end namespace clang
+
+static ProgramStateRef setRefBinding(ProgramStateRef State, SymbolRef Sym,
RefVal Val) {
assert(Sym != nullptr);
return State->set<RefBindings>(Sym, Val);
}
-ProgramStateRef removeRefBinding(ProgramStateRef State, SymbolRef Sym) {
+static ProgramStateRef removeRefBinding(ProgramStateRef State, SymbolRef Sym) {
return State->remove<RefBindings>(Sym);
}
-class UseAfterRelease : public RefCountBug {
-public:
- UseAfterRelease(const CheckerBase *checker)
- : RefCountBug(checker, "Use-after-release") {}
-
- const char *getDescription() const override {
- return "Reference-counted object is used after it is released";
- }
-};
-
-class BadRelease : public RefCountBug {
-public:
- BadRelease(const CheckerBase *checker) : RefCountBug(checker, "Bad release") {}
-
- const char *getDescription() const override {
- return "Incorrect decrement of the reference count of an object that is "
- "not owned at this point by the caller";
- }
-};
-
-class DeallocNotOwned : public RefCountBug {
-public:
- DeallocNotOwned(const CheckerBase *checker)
- : RefCountBug(checker, "-dealloc sent to non-exclusively owned object") {}
-
- const char *getDescription() const override {
- return "-dealloc sent to object that may be referenced elsewhere";
- }
-};
-
-class OverAutorelease : public RefCountBug {
-public:
- OverAutorelease(const CheckerBase *checker)
- : RefCountBug(checker, "Object autoreleased too many times") {}
-
- const char *getDescription() const override {
- return "Object autoreleased too many times";
- }
-};
-
-class ReturnedNotOwnedForOwned : public RefCountBug {
-public:
- ReturnedNotOwnedForOwned(const CheckerBase *checker)
- : RefCountBug(checker, "Method should return an owned object") {}
-
- const char *getDescription() const override {
- return "Object with a +0 retain count returned to caller where a +1 "
- "(owning) retain count is expected";
- }
-};
-
-class Leak : public RefCountBug {
-public:
- Leak(const CheckerBase *checker, StringRef name) : RefCountBug(checker, name) {
- // Leaks should not be reported if they are post-dominated by a sink.
- setSuppressOnSink(true);
- }
-
- const char *getDescription() const override { return ""; }
-
- bool isLeak() const override { return true; }
-};
-
-} // end namespace retaincountchecker
-} // end namespace ento
-} // end namespace clang
-
void RefVal::print(raw_ostream &Out) const {
if (!T.isNull())
Out << "Tracked " << T.getAsString() << " | ";
@@ -196,7 +133,7 @@ public:
ProgramStateRef getState() const { return state; }
bool VisitSymbol(SymbolRef sym) override {
- state = state->remove<RefBindings>(sym);
+ state = removeRefBinding(state, sym);
return true;
}
};
@@ -248,7 +185,15 @@ void RetainCountChecker::checkPostStmt(const CastExpr *CE,
if (!BE)
return;
- ArgEffect AE = ArgEffect(IncRef, ObjKind::ObjC);
+ QualType QT = CE->getType();
+ ObjKind K;
+ if (QT->isObjCObjectPointerType()) {
+ K = ObjKind::ObjC;
+ } else {
+ K = ObjKind::CF;
+ }
+
+ ArgEffect AE = ArgEffect(IncRef, K);
switch (BE->getBridgeKind()) {
case OBC_Bridge:
@@ -390,6 +335,31 @@ void RetainCountChecker::checkPostStmt(const ObjCIvarRefExpr *IRE,
C.addTransition(State);
}
+static bool isReceiverUnconsumedSelf(const CallEvent &Call) {
+ if (const auto *MC = dyn_cast<ObjCMethodCall>(&Call)) {
+
+ // Check if the message is not consumed, we know it will not be used in
+ // an assignment, ex: "self = [super init]".
+ return MC->getMethodFamily() == OMF_init && MC->isReceiverSelfOrSuper() &&
+ !Call.getLocationContext()
+ ->getAnalysisDeclContext()
+ ->getParentMap()
+ .isConsumedExpr(Call.getOriginExpr());
+ }
+ return false;
+}
+
+const static RetainSummary *getSummary(RetainSummaryManager &Summaries,
+ const CallEvent &Call,
+ QualType ReceiverType) {
+ const Expr *CE = Call.getOriginExpr();
+ AnyCall C =
+ CE ? *AnyCall::forExpr(CE)
+ : AnyCall(cast<CXXDestructorDecl>(Call.getDecl()));
+ return Summaries.getSummary(C, Call.hasNonZeroCallbackArg(),
+ isReceiverUnconsumedSelf(Call), ReceiverType);
+}
+
void RetainCountChecker::checkPostCall(const CallEvent &Call,
CheckerContext &C) const {
RetainSummaryManager &Summaries = getSummaryManager(C);
@@ -405,7 +375,7 @@ void RetainCountChecker::checkPostCall(const CallEvent &Call,
}
}
- const RetainSummary *Summ = Summaries.getSummary(Call, ReceiverType);
+ const RetainSummary *Summ = getSummary(Summaries, Call, ReceiverType);
if (C.wasInlined) {
processSummaryOfInlined(*Summ, Call, C);
@@ -414,20 +384,6 @@ void RetainCountChecker::checkPostCall(const CallEvent &Call,
checkSummary(*Summ, Call, C);
}
-RefCountBug *
-RetainCountChecker::getLeakWithinFunctionBug(const LangOptions &LOpts) const {
- if (!leakWithinFunction)
- leakWithinFunction.reset(new Leak(this, "Leak"));
- return leakWithinFunction.get();
-}
-
-RefCountBug *
-RetainCountChecker::getLeakAtReturnBug(const LangOptions &LOpts) const {
- if (!leakAtReturn)
- leakAtReturn.reset(new Leak(this, "Leak of returned object"));
- return leakAtReturn.get();
-}
-
/// GetReturnType - Used to get the return type of a message expression or
/// function call with the intention of affixing that type to a tracked symbol.
/// While the return type can be queried directly from RetEx, when
@@ -529,12 +485,36 @@ void RetainCountChecker::processSummaryOfInlined(const RetainSummary &Summ,
C.addTransition(state);
}
+static bool isSmartPtrField(const MemRegion *MR) {
+ const auto *TR = dyn_cast<TypedValueRegion>(
+ cast<SubRegion>(MR)->getSuperRegion());
+ return TR && RetainSummaryManager::isKnownSmartPointer(TR->getValueType());
+}
+
+
+/// A value escapes in these possible cases:
+///
+/// - binding to something that is not a memory region.
+/// - binding to a memregion that does not have stack storage
+/// - binding to a variable that has a destructor attached using CleanupAttr
+///
+/// We do not currently model what happens when a symbol is
+/// assigned to a struct field, unless it is a known smart pointer
+/// implementation, about which we know that it is inlined.
+/// FIXME: This could definitely be improved upon.
static bool shouldEscapeRegion(const MemRegion *R) {
+ if (isSmartPtrField(R))
+ return false;
+
+ const auto *VR = dyn_cast<VarRegion>(R);
- // We do not currently model what happens when a symbol is
- // assigned to a struct field, so be conservative here and let the symbol
- // go. TODO: This could definitely be improved upon.
- return !R->hasStackStorage() || !isa<VarRegion>(R);
+ if (!R->hasStackStorage() || !VR)
+ return true;
+
+ const VarDecl *VD = VR->getDecl();
+ if (!VD->hasAttr<CleanupAttr>())
+ return false; // CleanupAttr attaches destructors, which cause escaping.
+ return true;
}
static SmallVector<ProgramStateRef, 2>
@@ -557,6 +537,11 @@ updateOutParameters(ProgramStateRef State, const RetainSummary &Summ,
ProgramStateRef AssumeZeroReturn = State;
if (SplitNecessary) {
+ if (!CE.getResultType()->isScalarType()) {
+ // Structures cannot be assumed. This probably deserves
+ // a compiler warning for invalid annotations.
+ return {State};
+ }
if (auto DL = L.getAs<DefinedOrUnknownSVal>()) {
AssumeNonZeroReturn = AssumeNonZeroReturn->assume(*DL, true);
AssumeZeroReturn = AssumeZeroReturn->assume(*DL, false);
@@ -629,7 +614,6 @@ void RetainCountChecker::checkSummary(const RetainSummary &Summ,
// Helper tag for providing diagnostics: indicate whether dealloc was sent
// at this location.
- static CheckerProgramPointTag DeallocSentTag(this, DeallocTagDescription);
bool DeallocSent = false;
for (unsigned idx = 0, e = CallOrMsg.getNumArgs(); idx != e; ++idx) {
@@ -855,6 +839,23 @@ ProgramStateRef RetainCountChecker::updateSymbol(ProgramStateRef state,
return setRefBinding(state, sym, V);
}
+const RefCountBug &
+RetainCountChecker::errorKindToBugKind(RefVal::Kind ErrorKind,
+ SymbolRef Sym) const {
+ switch (ErrorKind) {
+ case RefVal::ErrorUseAfterRelease:
+ return useAfterRelease;
+ case RefVal::ErrorReleaseNotOwned:
+ return releaseNotOwned;
+ case RefVal::ErrorDeallocNotOwned:
+ if (Sym->getType()->getPointeeCXXRecordDecl())
+ return freeNotOwned;
+ return deallocNotOwned;
+ default:
+ llvm_unreachable("Unhandled error.");
+ }
+}
+
void RetainCountChecker::processNonLeakError(ProgramStateRef St,
SourceRange ErrorRange,
RefVal::Kind ErrorKind,
@@ -874,30 +875,9 @@ void RetainCountChecker::processNonLeakError(ProgramStateRef St,
if (!N)
return;
- RefCountBug *BT;
- switch (ErrorKind) {
- default:
- llvm_unreachable("Unhandled error.");
- case RefVal::ErrorUseAfterRelease:
- if (!useAfterRelease)
- useAfterRelease.reset(new UseAfterRelease(this));
- BT = useAfterRelease.get();
- break;
- case RefVal::ErrorReleaseNotOwned:
- if (!releaseNotOwned)
- releaseNotOwned.reset(new BadRelease(this));
- BT = releaseNotOwned.get();
- break;
- case RefVal::ErrorDeallocNotOwned:
- if (!deallocNotOwned)
- deallocNotOwned.reset(new DeallocNotOwned(this));
- BT = deallocNotOwned.get();
- break;
- }
-
- assert(BT);
auto report = llvm::make_unique<RefCountReport>(
- *BT, C.getASTContext().getLangOpts(), N, Sym);
+ errorKindToBugKind(ErrorKind, Sym),
+ C.getASTContext().getLangOpts(), N, Sym);
report->addRange(ErrorRange);
C.emitReport(std::move(report));
}
@@ -906,15 +886,19 @@ void RetainCountChecker::processNonLeakError(ProgramStateRef St,
// Handle the return values of retain-count-related functions.
//===----------------------------------------------------------------------===//
-bool RetainCountChecker::evalCall(const CallExpr *CE, CheckerContext &C) const {
- // Get the callee. We're only interested in simple C functions.
+bool RetainCountChecker::evalCall(const CallEvent &Call,
+ CheckerContext &C) const {
ProgramStateRef state = C.getState();
- const FunctionDecl *FD = C.getCalleeDecl(CE);
+ const auto *FD = dyn_cast_or_null<FunctionDecl>(Call.getDecl());
if (!FD)
return false;
+ const auto *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr());
+ if (!CE)
+ return false;
+
RetainSummaryManager &SmrMgr = getSummaryManager(C);
- QualType ResultTy = CE->getCallReturnType(C.getASTContext());
+ QualType ResultTy = Call.getResultType();
// See if the function has 'rc_ownership_trusted_implementation'
// annotate attribute. If it does, we will not inline it.
@@ -932,18 +916,27 @@ bool RetainCountChecker::evalCall(const CallExpr *CE, CheckerContext &C) const {
// Bind the return value.
if (BSmr == BehaviorSummary::Identity ||
- BSmr == BehaviorSummary::IdentityOrZero) {
- SVal RetVal = state->getSVal(CE->getArg(0), LCtx);
+ BSmr == BehaviorSummary::IdentityOrZero ||
+ BSmr == BehaviorSummary::IdentityThis) {
+
+ const Expr *BindReturnTo =
+ (BSmr == BehaviorSummary::IdentityThis)
+ ? cast<CXXMemberCallExpr>(CE)->getImplicitObjectArgument()
+ : CE->getArg(0);
+ SVal RetVal = state->getSVal(BindReturnTo, LCtx);
// If the receiver is unknown or the function has
// 'rc_ownership_trusted_implementation' annotate attribute, conjure a
// return value.
+ // FIXME: this branch is very strange.
if (RetVal.isUnknown() ||
(hasTrustedImplementationAnnotation && !ResultTy.isNull())) {
SValBuilder &SVB = C.getSValBuilder();
RetVal =
SVB.conjureSymbolVal(nullptr, CE, LCtx, ResultTy, C.blockCount());
}
+
+ // Bind the value.
state = state->BindExpr(CE, LCtx, RetVal, /*Invalidate=*/false);
if (BSmr == BehaviorSummary::IdentityOrZero) {
@@ -953,13 +946,12 @@ bool RetainCountChecker::evalCall(const CallExpr *CE, CheckerContext &C) const {
// Assume that output is zero on the other branch.
NullOutputState = NullOutputState->BindExpr(
CE, LCtx, C.getSValBuilder().makeNull(), /*Invalidate=*/false);
-
- C.addTransition(NullOutputState);
+ C.addTransition(NullOutputState, &CastFailTag);
// And on the original branch assume that both input and
// output are non-zero.
if (auto L = RetVal.getAs<DefinedOrUnknownSVal>())
- state = state->assume(*L, /*Assumption=*/true);
+ state = state->assume(*L, /*assumption=*/true);
}
}
@@ -988,8 +980,10 @@ ExplodedNode * RetainCountChecker::processReturn(const ReturnStmt *S,
return Pred;
ProgramStateRef state = C.getState();
- SymbolRef Sym =
- state->getSValAsScalarOrLoc(RetE, C.getLocationContext()).getAsLocSymbol();
+ // We need to dig down to the symbolic base here because various
+ // custom allocators do sometimes return the symbol with an offset.
+ SymbolRef Sym = state->getSValAsScalarOrLoc(RetE, C.getLocationContext())
+ .getAsLocSymbol(/*IncludeBaseRegions=*/true);
if (!Sym)
return Pred;
@@ -1057,11 +1051,11 @@ ExplodedNode * RetainCountChecker::processReturn(const ReturnStmt *S,
// FIXME: What is the convention for blocks? Is there one?
if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(CD)) {
- const RetainSummary *Summ = Summaries.getMethodSummary(MD);
+ const RetainSummary *Summ = Summaries.getSummary(AnyCall(MD));
RE = Summ->getRetEffect();
} else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CD)) {
if (!isa<CXXMethodDecl>(FD)) {
- const RetainSummary *Summ = Summaries.getFunctionSummary(FD);
+ const RetainSummary *Summ = Summaries.getSummary(AnyCall(FD));
RE = Summ->getRetEffect();
}
}
@@ -1100,8 +1094,8 @@ ExplodedNode * RetainCountChecker::checkReturnWithRetEffect(const ReturnStmt *S,
ExplodedNode *N = C.addTransition(state, Pred, &ReturnOwnLeakTag);
if (N) {
const LangOptions &LOpts = C.getASTContext().getLangOpts();
- auto R = llvm::make_unique<RefLeakReport>(
- *getLeakAtReturnBug(LOpts), LOpts, N, Sym, C);
+ auto R =
+ llvm::make_unique<RefLeakReport>(leakAtReturn, LOpts, N, Sym, C);
C.emitReport(std::move(R));
}
return N;
@@ -1125,11 +1119,8 @@ ExplodedNode * RetainCountChecker::checkReturnWithRetEffect(const ReturnStmt *S,
ExplodedNode *N = C.addTransition(state, Pred, &ReturnNotOwnedTag);
if (N) {
- if (!returnNotOwnedForOwned)
- returnNotOwnedForOwned.reset(new ReturnedNotOwnedForOwned(this));
-
auto R = llvm::make_unique<RefCountReport>(
- *returnNotOwnedForOwned, C.getASTContext().getLangOpts(), N, Sym);
+ returnNotOwnedForOwned, C.getASTContext().getLangOpts(), N, Sym);
C.emitReport(std::move(R));
}
return N;
@@ -1145,39 +1136,15 @@ ExplodedNode * RetainCountChecker::checkReturnWithRetEffect(const ReturnStmt *S,
void RetainCountChecker::checkBind(SVal loc, SVal val, const Stmt *S,
CheckerContext &C) const {
- // Are we storing to something that causes the value to "escape"?
- bool escapes = true;
-
- // A value escapes in three possible cases (this may change):
- //
- // (1) we are binding to something that is not a memory region.
- // (2) we are binding to a memregion that does not have stack storage
ProgramStateRef state = C.getState();
+ const MemRegion *MR = loc.getAsRegion();
- if (auto regionLoc = loc.getAs<loc::MemRegionVal>()) {
- escapes = shouldEscapeRegion(regionLoc->getRegion());
- }
-
- // If we are storing the value into an auto function scope variable annotated
- // with (__attribute__((cleanup))), stop tracking the value to avoid leak
- // false positives.
- if (const auto *LVR = dyn_cast_or_null<VarRegion>(loc.getAsRegion())) {
- const VarDecl *VD = LVR->getDecl();
- if (VD->hasAttr<CleanupAttr>()) {
- escapes = true;
- }
- }
-
- // If our store can represent the binding and we aren't storing to something
- // that doesn't have local storage then just return and have the simulation
- // state continue as is.
- if (!escapes)
- return;
-
- // Otherwise, find all symbols referenced by 'val' that we are tracking
+ // Find all symbols referenced by 'val' that we are tracking
// and stop tracking them.
- state = state->scanReachableSymbols<StopTrackingCallback>(val).getState();
- C.addTransition(state);
+ if (MR && shouldEscapeRegion(MR)) {
+ state = state->scanReachableSymbols<StopTrackingCallback>(val).getState();
+ C.addTransition(state);
+ }
}
ProgramStateRef RetainCountChecker::evalAssume(ProgramStateRef state,
@@ -1196,14 +1163,14 @@ ProgramStateRef RetainCountChecker::evalAssume(ProgramStateRef state,
bool changed = false;
RefBindingsTy::Factory &RefBFactory = state->get_context<RefBindings>();
+ ConstraintManager &CMgr = state->getConstraintManager();
- for (RefBindingsTy::iterator I = B.begin(), E = B.end(); I != E; ++I) {
+ for (auto &I : B) {
// Check if the symbol is null stop tracking the symbol.
- ConstraintManager &CMgr = state->getConstraintManager();
- ConditionTruthVal AllocFailed = CMgr.isNull(state, I.getKey());
+ ConditionTruthVal AllocFailed = CMgr.isNull(state, I.first);
if (AllocFailed.isConstrainedTrue()) {
changed = true;
- B = RefBFactory.remove(B, I.getKey());
+ B = RefBFactory.remove(B, I.first);
}
}
@@ -1213,25 +1180,21 @@ ProgramStateRef RetainCountChecker::evalAssume(ProgramStateRef state,
return state;
}
-ProgramStateRef
-RetainCountChecker::checkRegionChanges(ProgramStateRef state,
- const InvalidatedSymbols *invalidated,
- ArrayRef<const MemRegion *> ExplicitRegions,
- ArrayRef<const MemRegion *> Regions,
- const LocationContext *LCtx,
- const CallEvent *Call) const {
+ProgramStateRef RetainCountChecker::checkRegionChanges(
+ ProgramStateRef state, const InvalidatedSymbols *invalidated,
+ ArrayRef<const MemRegion *> ExplicitRegions,
+ ArrayRef<const MemRegion *> Regions, const LocationContext *LCtx,
+ const CallEvent *Call) const {
if (!invalidated)
return state;
llvm::SmallPtrSet<SymbolRef, 8> WhitelistedSymbols;
- for (ArrayRef<const MemRegion *>::iterator I = ExplicitRegions.begin(),
- E = ExplicitRegions.end(); I != E; ++I) {
- if (const SymbolicRegion *SR = (*I)->StripCasts()->getAs<SymbolicRegion>())
+
+ for (const MemRegion *I : ExplicitRegions)
+ if (const SymbolicRegion *SR = I->StripCasts()->getAs<SymbolicRegion>())
WhitelistedSymbols.insert(SR->getSymbol());
- }
- for (SymbolRef sym :
- llvm::make_range(invalidated->begin(), invalidated->end())) {
+ for (SymbolRef sym : *invalidated) {
if (WhitelistedSymbols.count(sym))
continue;
// Remove any existing reference-count binding.
@@ -1309,12 +1272,9 @@ RetainCountChecker::handleAutoreleaseCounts(ProgramStateRef state,
os << "but ";
os << "has a +" << V.getCount() << " retain count";
- if (!overAutorelease)
- overAutorelease.reset(new OverAutorelease(this));
-
const LangOptions &LOpts = Ctx.getASTContext().getLangOpts();
- auto R = llvm::make_unique<RefCountReport>(*overAutorelease, LOpts, N, Sym,
- os.str());
+ auto R = llvm::make_unique<RefCountReport>(overAutorelease, LOpts, N, Sym,
+ os.str());
Ctx.emitReport(std::move(R));
}
@@ -1356,56 +1316,48 @@ RetainCountChecker::processLeaks(ProgramStateRef state,
ExplodedNode *Pred) const {
// Generate an intermediate node representing the leak point.
ExplodedNode *N = Ctx.addTransition(state, Pred);
+ const LangOptions &LOpts = Ctx.getASTContext().getLangOpts();
if (N) {
- for (SmallVectorImpl<SymbolRef>::iterator
- I = Leaked.begin(), E = Leaked.end(); I != E; ++I) {
-
- const LangOptions &LOpts = Ctx.getASTContext().getLangOpts();
- RefCountBug *BT = Pred ? getLeakWithinFunctionBug(LOpts)
- : getLeakAtReturnBug(LOpts);
- assert(BT && "BugType not initialized.");
-
- Ctx.emitReport(
- llvm::make_unique<RefLeakReport>(*BT, LOpts, N, *I, Ctx));
+ for (SymbolRef L : Leaked) {
+ const RefCountBug &BT = Pred ? leakWithinFunction : leakAtReturn;
+ Ctx.emitReport(llvm::make_unique<RefLeakReport>(BT, LOpts, N, L, Ctx));
}
}
return N;
}
-static bool isISLObjectRef(QualType Ty) {
- return StringRef(Ty.getAsString()).startswith("isl_");
-}
-
void RetainCountChecker::checkBeginFunction(CheckerContext &Ctx) const {
if (!Ctx.inTopFrame())
return;
RetainSummaryManager &SmrMgr = getSummaryManager(Ctx);
const LocationContext *LCtx = Ctx.getLocationContext();
- const FunctionDecl *FD = dyn_cast<FunctionDecl>(LCtx->getDecl());
+ const Decl *D = LCtx->getDecl();
+ Optional<AnyCall> C = AnyCall::forDecl(D);
- if (!FD || SmrMgr.isTrustedReferenceCountImplementation(FD))
+ if (!C || SmrMgr.isTrustedReferenceCountImplementation(D))
return;
ProgramStateRef state = Ctx.getState();
- const RetainSummary *FunctionSummary = SmrMgr.getFunctionSummary(FD);
+ const RetainSummary *FunctionSummary = SmrMgr.getSummary(*C);
ArgEffects CalleeSideArgEffects = FunctionSummary->getArgEffects();
- for (unsigned idx = 0, e = FD->getNumParams(); idx != e; ++idx) {
- const ParmVarDecl *Param = FD->getParamDecl(idx);
+ for (unsigned idx = 0, e = C->param_size(); idx != e; ++idx) {
+ const ParmVarDecl *Param = C->parameters()[idx];
SymbolRef Sym = state->getSVal(state->getRegion(Param, LCtx)).getAsSymbol();
QualType Ty = Param->getType();
const ArgEffect *AE = CalleeSideArgEffects.lookup(idx);
- if (AE && AE->getKind() == DecRef && isISLObjectRef(Ty)) {
- state = setRefBinding(
- state, Sym, RefVal::makeOwned(ObjKind::Generalized, Ty));
- } else if (isISLObjectRef(Ty)) {
- state = setRefBinding(
- state, Sym,
- RefVal::makeNotOwned(ObjKind::Generalized, Ty));
+ if (AE) {
+ ObjKind K = AE->getObjKind();
+ if (K == ObjKind::Generalized || K == ObjKind::OS ||
+ (TrackNSCFStartParam && (K == ObjKind::ObjC || K == ObjKind::CF))) {
+ RefVal NewVal = AE->getKind() == DecRef ? RefVal::makeOwned(K, Ty)
+ : RefVal::makeNotOwned(K, Ty);
+ state = setRefBinding(state, Sym, NewVal);
+ }
}
}
@@ -1431,9 +1383,9 @@ void RetainCountChecker::checkEndFunction(const ReturnStmt *RS,
return;
}
- for (RefBindingsTy::iterator I = B.begin(), E = B.end(); I != E; ++I) {
+ for (auto &I : B) {
state = handleAutoreleaseCounts(state, Pred, /*Tag=*/nullptr, Ctx,
- I->first, I->second);
+ I.first, I.second);
if (!state)
return;
}
@@ -1448,8 +1400,8 @@ void RetainCountChecker::checkEndFunction(const ReturnStmt *RS,
B = state->get<RefBindings>();
SmallVector<SymbolRef, 10> Leaked;
- for (RefBindingsTy::iterator I = B.begin(), E = B.end(); I != E; ++I)
- state = handleSymbolDeath(state, I->first, I->second, Leaked);
+ for (auto &I : B)
+ state = handleSymbolDeath(state, I.first, I.second, Leaked);
processLeaks(state, Leaked, Ctx, Pred);
}
@@ -1459,7 +1411,6 @@ void RetainCountChecker::checkDeadSymbols(SymbolReaper &SymReaper,
ExplodedNode *Pred = C.getPredecessor();
ProgramStateRef state = C.getState();
- RefBindingsTy B = state->get<RefBindings>();
SmallVector<SymbolRef, 10> Leaked;
// Update counts from autorelease pools
@@ -1492,12 +1443,10 @@ void RetainCountChecker::checkDeadSymbols(SymbolReaper &SymReaper,
// Now generate a new node that nukes the old bindings.
// The only bindings left at this point are the leaked symbols.
RefBindingsTy::Factory &F = state->get_context<RefBindings>();
- B = state->get<RefBindings>();
+ RefBindingsTy B = state->get<RefBindings>();
- for (SmallVectorImpl<SymbolRef>::iterator I = Leaked.begin(),
- E = Leaked.end();
- I != E; ++I)
- B = F.remove(B, *I);
+ for (SymbolRef L : Leaked)
+ B = F.remove(B, L);
state = state->set<RefBindings>(B);
C.addTransition(state, Pred);
@@ -1513,9 +1462,9 @@ void RetainCountChecker::printState(raw_ostream &Out, ProgramStateRef State,
Out << Sep << NL;
- for (RefBindingsTy::iterator I = B.begin(), E = B.end(); I != E; ++I) {
- Out << I->first << " : ";
- I->second.print(Out);
+ for (auto &I : B) {
+ Out << I.first << " : ";
+ I.second.print(Out);
Out << NL;
}
}
@@ -1524,24 +1473,48 @@ void RetainCountChecker::printState(raw_ostream &Out, ProgramStateRef State,
// Checker registration.
//===----------------------------------------------------------------------===//
-void ento::registerRetainCountChecker(CheckerManager &Mgr) {
- auto *Chk = Mgr.registerChecker<RetainCountChecker>();
- Chk->TrackObjCAndCFObjects = true;
+void ento::registerRetainCountBase(CheckerManager &Mgr) {
+ Mgr.registerChecker<RetainCountChecker>();
+}
+
+bool ento::shouldRegisterRetainCountBase(const LangOptions &LO) {
+ return true;
}
// FIXME: remove this, hack for backwards compatibility:
// it should be possible to enable the NS/CF retain count checker as
// osx.cocoa.RetainCount, and it should be possible to disable
// osx.OSObjectRetainCount using osx.cocoa.RetainCount:CheckOSObject=false.
-static bool hasPrevCheckOSObjectOptionDisabled(AnalyzerOptions &Options) {
- auto I = Options.Config.find("osx.cocoa.RetainCount:CheckOSObject");
+static bool getOption(AnalyzerOptions &Options,
+ StringRef Postfix,
+ StringRef Value) {
+ auto I = Options.Config.find(
+ (StringRef("osx.cocoa.RetainCount:") + Postfix).str());
if (I != Options.Config.end())
- return I->getValue() == "false";
+ return I->getValue() == Value;
return false;
}
+void ento::registerRetainCountChecker(CheckerManager &Mgr) {
+ auto *Chk = Mgr.getChecker<RetainCountChecker>();
+ Chk->TrackObjCAndCFObjects = true;
+ Chk->TrackNSCFStartParam = getOption(Mgr.getAnalyzerOptions(),
+ "TrackNSCFStartParam",
+ "true");
+}
+
+bool ento::shouldRegisterRetainCountChecker(const LangOptions &LO) {
+ return true;
+}
+
void ento::registerOSObjectRetainCountChecker(CheckerManager &Mgr) {
- auto *Chk = Mgr.registerChecker<RetainCountChecker>();
- if (!hasPrevCheckOSObjectOptionDisabled(Mgr.getAnalyzerOptions()))
+ auto *Chk = Mgr.getChecker<RetainCountChecker>();
+ if (!getOption(Mgr.getAnalyzerOptions(),
+ "CheckOSObject",
+ "false"))
Chk->TrackOSObjects = true;
}
+
+bool ento::shouldRegisterOSObjectRetainCountChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.h b/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.h
index 31e2d9ae4932..124c0e5040b9 100644
--- a/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.h
+++ b/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.h
@@ -1,9 +1,8 @@
//==--- RetainCountChecker.h - Checks for leaks and other issues -*- C++ -*--//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -22,6 +21,7 @@
#include "clang/AST/DeclObjC.h"
#include "clang/AST/ParentMap.h"
#include "clang/Analysis/DomainSpecific/CocoaConventions.h"
+#include "clang/Analysis/RetainSummaryManager.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Analysis/SelectorExtras.h"
@@ -33,7 +33,6 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
-#include "clang/StaticAnalyzer/Core/RetainSummaryManager.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/ImmutableList.h"
@@ -251,14 +250,21 @@ class RetainCountChecker
check::RegionChanges,
eval::Assume,
eval::Call > {
- mutable std::unique_ptr<RefCountBug> useAfterRelease, releaseNotOwned;
- mutable std::unique_ptr<RefCountBug> deallocNotOwned;
- mutable std::unique_ptr<RefCountBug> overAutorelease, returnNotOwnedForOwned;
- mutable std::unique_ptr<RefCountBug> leakWithinFunction, leakAtReturn;
+
+ RefCountBug useAfterRelease{this, RefCountBug::UseAfterRelease};
+ RefCountBug releaseNotOwned{this, RefCountBug::ReleaseNotOwned};
+ RefCountBug deallocNotOwned{this, RefCountBug::DeallocNotOwned};
+ RefCountBug freeNotOwned{this, RefCountBug::FreeNotOwned};
+ RefCountBug overAutorelease{this, RefCountBug::OverAutorelease};
+ RefCountBug returnNotOwnedForOwned{this, RefCountBug::ReturnNotOwnedForOwned};
+ RefCountBug leakWithinFunction{this, RefCountBug::LeakWithinFunction};
+ RefCountBug leakAtReturn{this, RefCountBug::LeakAtReturn};
+
+ CheckerProgramPointTag DeallocSentTag{this, "DeallocSent"};
+ CheckerProgramPointTag CastFailTag{this, "DynamicCastFail"};
mutable std::unique_ptr<RetainSummaryManager> Summaries;
public:
- static constexpr const char *DeallocTagDescription = "DeallocSent";
/// Track Objective-C and CoreFoundation objects.
bool TrackObjCAndCFObjects = false;
@@ -266,22 +272,15 @@ public:
/// Track sublcasses of OSObject.
bool TrackOSObjects = false;
- RetainCountChecker() {}
+ /// Track initial parameters (for the entry point) for NS/CF objects.
+ bool TrackNSCFStartParam = false;
- RefCountBug *getLeakWithinFunctionBug(const LangOptions &LOpts) const;
-
- RefCountBug *getLeakAtReturnBug(const LangOptions &LOpts) const;
+ RetainCountChecker() {};
RetainSummaryManager &getSummaryManager(ASTContext &Ctx) const {
- // FIXME: We don't support ARC being turned on and off during one analysis.
- // (nor, for that matter, do we support changing ASTContexts)
- bool ARCEnabled = (bool)Ctx.getLangOpts().ObjCAutoRefCount;
- if (!Summaries) {
- Summaries.reset(new RetainSummaryManager(
- Ctx, ARCEnabled, TrackObjCAndCFObjects, TrackOSObjects));
- } else {
- assert(Summaries->isARCEnabled() == ARCEnabled);
- }
+ if (!Summaries)
+ Summaries.reset(
+ new RetainSummaryManager(Ctx, TrackObjCAndCFObjects, TrackOSObjects));
return *Summaries;
}
@@ -311,7 +310,7 @@ public:
const CallEvent &Call,
CheckerContext &C) const;
- bool evalCall(const CallExpr *CE, CheckerContext &C) const;
+ bool evalCall(const CallEvent &Call, CheckerContext &C) const;
ProgramStateRef evalAssume(ProgramStateRef state, SVal Cond,
bool Assumption) const;
@@ -336,6 +335,9 @@ public:
RefVal V, ArgEffect E, RefVal::Kind &hasErr,
CheckerContext &C) const;
+ const RefCountBug &errorKindToBugKind(RefVal::Kind ErrorKind,
+ SymbolRef Sym) const;
+
void processNonLeakError(ProgramStateRef St, SourceRange ErrorRange,
RefVal::Kind ErrorKind, SymbolRef Sym,
CheckerContext &C) const;
@@ -358,6 +360,14 @@ public:
CheckerContext &Ctx,
ExplodedNode *Pred = nullptr) const;
+ const CheckerProgramPointTag &getDeallocSentTag() const {
+ return DeallocSentTag;
+ }
+
+ const CheckerProgramPointTag &getCastFailTag() const {
+ return CastFailTag;
+ }
+
private:
/// Perform the necessary checks and state adjustments at the end of the
/// function.
@@ -371,11 +381,6 @@ private:
const RefVal *getRefBinding(ProgramStateRef State, SymbolRef Sym);
-ProgramStateRef setRefBinding(ProgramStateRef State, SymbolRef Sym,
- RefVal Val);
-
-ProgramStateRef removeRefBinding(ProgramStateRef State, SymbolRef Sym);
-
/// Returns true if this stack frame is for an Objective-C method that is a
/// property getter or setter whose body has been synthesized by the analyzer.
inline bool isSynthesizedAccessor(const StackFrameContext *SFC) {
diff --git a/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp b/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp
index cda1a928de13..796fd882ffd5 100644
--- a/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp
+++ b/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp
@@ -1,9 +1,8 @@
// RetainCountDiagnostics.cpp - Checks for leaks and other issues -*- C++ -*--//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -19,6 +18,56 @@ using namespace clang;
using namespace ento;
using namespace retaincountchecker;
+StringRef RefCountBug::bugTypeToName(RefCountBug::RefCountBugType BT) {
+ switch (BT) {
+ case UseAfterRelease:
+ return "Use-after-release";
+ case ReleaseNotOwned:
+ return "Bad release";
+ case DeallocNotOwned:
+ return "-dealloc sent to non-exclusively owned object";
+ case FreeNotOwned:
+ return "freeing non-exclusively owned object";
+ case OverAutorelease:
+ return "Object autoreleased too many times";
+ case ReturnNotOwnedForOwned:
+ return "Method should return an owned object";
+ case LeakWithinFunction:
+ return "Leak";
+ case LeakAtReturn:
+ return "Leak of returned object";
+ }
+ llvm_unreachable("Unknown RefCountBugType");
+}
+
+StringRef RefCountBug::getDescription() const {
+ switch (BT) {
+ case UseAfterRelease:
+ return "Reference-counted object is used after it is released";
+ case ReleaseNotOwned:
+ return "Incorrect decrement of the reference count of an object that is "
+ "not owned at this point by the caller";
+ case DeallocNotOwned:
+ return "-dealloc sent to object that may be referenced elsewhere";
+ case FreeNotOwned:
+ return "'free' called on an object that may be referenced elsewhere";
+ case OverAutorelease:
+ return "Object autoreleased too many times";
+ case ReturnNotOwnedForOwned:
+ return "Object with a +0 retain count returned to caller where a +1 "
+ "(owning) retain count is expected";
+ case LeakWithinFunction:
+ case LeakAtReturn:
+ return "";
+ }
+ llvm_unreachable("Unknown RefCountBugType");
+}
+
+RefCountBug::RefCountBug(const CheckerBase *Checker, RefCountBugType BT)
+ : BugType(Checker, bugTypeToName(BT), categories::MemoryRefCount,
+ /*SuppressOnSink=*/BT == LeakWithinFunction || BT == LeakAtReturn),
+ BT(BT), Checker(Checker) {}
+
static bool isNumericLiteralExpression(const Expr *E) {
// FIXME: This set of cases was copied from SemaExprObjC.
return isa<IntegerLiteral>(E) ||
@@ -42,7 +91,8 @@ static std::string getPrettyTypeName(QualType QT) {
/// Write information about the type state change to {@code os},
/// return whether the note should be generated.
static bool shouldGenerateNote(llvm::raw_string_ostream &os,
- const RefVal *PrevT, const RefVal &CurrV,
+ const RefVal *PrevT,
+ const RefVal &CurrV,
bool DeallocSent) {
// Get the previous type state.
RefVal PrevV = *PrevT;
@@ -132,6 +182,31 @@ static Optional<unsigned> findArgIdxOfSymbol(ProgramStateRef CurrSt,
return None;
}
+static Optional<std::string> findMetaClassAlloc(const Expr *Callee) {
+ if (const auto *ME = dyn_cast<MemberExpr>(Callee)) {
+ if (ME->getMemberDecl()->getNameAsString() != "alloc")
+ return None;
+ const Expr *This = ME->getBase()->IgnoreParenImpCasts();
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(This)) {
+ const ValueDecl *VD = DRE->getDecl();
+ if (VD->getNameAsString() != "metaClass")
+ return None;
+
+ if (const auto *RD = dyn_cast<CXXRecordDecl>(VD->getDeclContext()))
+ return RD->getNameAsString();
+
+ }
+ }
+ return None;
+}
+
+static std::string findAllocatedObjectName(const Stmt *S, QualType QT) {
+ if (const auto *CE = dyn_cast<CallExpr>(S))
+ if (auto Out = findMetaClassAlloc(CE->getCallee()))
+ return *Out;
+ return getPrettyTypeName(QT);
+}
+
static void generateDiagnosticsForCallLike(ProgramStateRef CurrSt,
const LocationContext *LCtx,
const RefVal &CurrV, SymbolRef &Sym,
@@ -189,7 +264,7 @@ static void generateDiagnosticsForCallLike(ProgramStateRef CurrSt,
os << "a Core Foundation object of type '"
<< Sym->getType().getAsString() << "' with a ";
} else if (CurrV.getObjKind() == ObjKind::OS) {
- os << "an OSObject of type '" << getPrettyTypeName(Sym->getType())
+ os << "an OSObject of type '" << findAllocatedObjectName(S, Sym->getType())
<< "' with a ";
} else if (CurrV.getObjKind() == ObjKind::Generalized) {
os << "an object of type '" << Sym->getType().getAsString()
@@ -338,15 +413,38 @@ annotateConsumedSummaryMismatch(const ExplodedNode *N,
if (os.str().empty())
return nullptr;
- // FIXME: remove the code duplication with NoStoreFuncVisitor.
- PathDiagnosticLocation L;
- if (const ReturnStmt *RS = CallExitLoc.getReturnStmt()) {
- L = PathDiagnosticLocation::createBegin(RS, SM, N->getLocationContext());
+ PathDiagnosticLocation L = PathDiagnosticLocation::create(CallExitLoc, SM);
+ return std::make_shared<PathDiagnosticEventPiece>(L, os.str());
+}
+
+/// Annotate the parameter at the analysis entry point.
+static std::shared_ptr<PathDiagnosticEventPiece>
+annotateStartParameter(const ExplodedNode *N, SymbolRef Sym,
+ const SourceManager &SM) {
+ auto PP = N->getLocationAs<BlockEdge>();
+ if (!PP)
+ return nullptr;
+
+ const CFGBlock *Src = PP->getSrc();
+ const RefVal *CurrT = getRefBinding(N->getState(), Sym);
+
+ if (&Src->getParent()->getEntry() != Src || !CurrT ||
+ getRefBinding(N->getFirstPred()->getState(), Sym))
+ return nullptr;
+
+ const auto *VR = cast<VarRegion>(cast<SymbolRegionValue>(Sym)->getRegion());
+ const auto *PVD = cast<ParmVarDecl>(VR->getDecl());
+ PathDiagnosticLocation L = PathDiagnosticLocation(PVD, SM);
+
+ std::string s;
+ llvm::raw_string_ostream os(s);
+ os << "Parameter '" << PVD->getNameAsString() << "' starts at +";
+ if (CurrT->getCount() == 1) {
+ os << "1, as it is marked as consuming";
} else {
- L = PathDiagnosticLocation(
- Call->getRuntimeDefinition().getDecl()->getSourceRange().getEnd(), SM);
+ assert(CurrT->getCount() == 0);
+ os << "0";
}
-
return std::make_shared<PathDiagnosticEventPiece>(L, os.str());
}
@@ -354,12 +452,22 @@ std::shared_ptr<PathDiagnosticPiece>
RefCountReportVisitor::VisitNode(const ExplodedNode *N,
BugReporterContext &BRC, BugReport &BR) {
+ const auto &BT = static_cast<const RefCountBug&>(BR.getBugType());
+ const auto *Checker =
+ static_cast<const RetainCountChecker *>(BT.getChecker());
+
+ bool IsFreeUnowned = BT.getBugType() == RefCountBug::FreeNotOwned ||
+ BT.getBugType() == RefCountBug::DeallocNotOwned;
+
const SourceManager &SM = BRC.getSourceManager();
CallEventManager &CEMgr = BRC.getStateManager().getCallEventManager();
if (auto CE = N->getLocationAs<CallExitBegin>())
if (auto PD = annotateConsumedSummaryMismatch(N, *CE, SM, CEMgr))
return PD;
+ if (auto PD = annotateStartParameter(N, Sym, SM))
+ return PD;
+
// FIXME: We will eventually need to handle non-statement-based events
// (__attribute__((cleanup))).
if (!N->getLocation().getAs<StmtPoint>())
@@ -372,7 +480,8 @@ RefCountReportVisitor::VisitNode(const ExplodedNode *N,
const LocationContext *LCtx = N->getLocationContext();
const RefVal* CurrT = getRefBinding(CurrSt, Sym);
- if (!CurrT) return nullptr;
+ if (!CurrT)
+ return nullptr;
const RefVal &CurrV = *CurrT;
const RefVal *PrevT = getRefBinding(PrevSt, Sym);
@@ -382,6 +491,12 @@ RefCountReportVisitor::VisitNode(const ExplodedNode *N,
std::string sbuf;
llvm::raw_string_ostream os(sbuf);
+ if (PrevT && IsFreeUnowned && CurrV.isNotOwned() && PrevT->isOwned()) {
+ os << "Object is now not exclusively owned";
+ auto Pos = PathDiagnosticLocation::create(N->getLocation(), SM);
+ return std::make_shared<PathDiagnosticEventPiece>(Pos, os.str());
+ }
+
// This is the allocation site since the previous node had no bindings
// for this symbol.
if (!PrevT) {
@@ -428,9 +543,13 @@ RefCountReportVisitor::VisitNode(const ExplodedNode *N,
// program point
bool DeallocSent = false;
- if (N->getLocation().getTag() &&
- N->getLocation().getTag()->getTagDescription().contains(
- RetainCountChecker::DeallocTagDescription)) {
+ const ProgramPointTag *Tag = N->getLocation().getTag();
+
+ if (Tag == &Checker->getCastFailTag()) {
+ os << "Assuming dynamic cast returns null due to type mismatch";
+ }
+
+ if (Tag == &Checker->getDeallocSentTag()) {
// We only have summaries attached to nodes after evaluating CallExpr and
// ObjCMessageExprs.
const Stmt *S = N->getLocation().castAs<StmtPoint>().getStmt();
@@ -587,7 +706,7 @@ static AllocationInfo GetAllocationSite(ProgramStateManager &StateMgr,
if (AllocationNodeInCurrentOrParentContext &&
AllocationNodeInCurrentOrParentContext->getLocationContext() !=
- LeakContext)
+ LeakContext)
FirstBinding = nullptr;
return AllocationInfo(AllocationNodeInCurrentOrParentContext,
@@ -671,10 +790,19 @@ RefLeakReportVisitor::getEndPath(BugReporterContext &BRC,
}
} else {
const FunctionDecl *FD = cast<FunctionDecl>(D);
- os << "whose name ('" << *FD
- << "') does not contain 'Copy' or 'Create'. This violates the naming"
- " convention rules given in the Memory Management Guide for Core"
- " Foundation";
+ ObjKind K = RV->getObjKind();
+ if (K == ObjKind::ObjC || K == ObjKind::CF) {
+ os << "whose name ('" << *FD
+ << "') does not contain 'Copy' or 'Create'. This violates the "
+ "naming"
+ " convention rules given in the Memory Management Guide for "
+ "Core"
+ " Foundation";
+ } else if (RV->getObjKind() == ObjKind::OS) {
+ std::string FuncName = FD->getNameAsString();
+ os << "whose name ('" << FuncName
+ << "') starts with '" << StringRef(FuncName).substr(0, 3) << "'";
+ }
}
}
} else {
@@ -685,15 +813,15 @@ RefLeakReportVisitor::getEndPath(BugReporterContext &BRC,
return std::make_shared<PathDiagnosticEventPiece>(L, os.str());
}
-RefCountReport::RefCountReport(RefCountBug &D, const LangOptions &LOpts,
+RefCountReport::RefCountReport(const RefCountBug &D, const LangOptions &LOpts,
ExplodedNode *n, SymbolRef sym,
- bool registerVisitor)
- : BugReport(D, D.getDescription(), n), Sym(sym) {
- if (registerVisitor)
+ bool isLeak)
+ : BugReport(D, D.getDescription(), n), Sym(sym), isLeak(isLeak) {
+ if (!isLeak)
addVisitor(llvm::make_unique<RefCountReportVisitor>(sym));
}
-RefCountReport::RefCountReport(RefCountBug &D, const LangOptions &LOpts,
+RefCountReport::RefCountReport(const RefCountBug &D, const LangOptions &LOpts,
ExplodedNode *n, SymbolRef sym,
StringRef endText)
: BugReport(D, D.getDescription(), endText, n) {
@@ -779,10 +907,10 @@ void RefLeakReport::createDescription(CheckerContext &Ctx) {
}
}
-RefLeakReport::RefLeakReport(RefCountBug &D, const LangOptions &LOpts,
+RefLeakReport::RefLeakReport(const RefCountBug &D, const LangOptions &LOpts,
ExplodedNode *n, SymbolRef sym,
CheckerContext &Ctx)
- : RefCountReport(D, LOpts, n, sym, false) {
+ : RefCountReport(D, LOpts, n, sym, /*isLeak=*/true) {
deriveAllocLocation(Ctx, sym);
if (!AllocBinding)
diff --git a/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.h b/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.h
index 9f796abe8eae..ef3c75f87af5 100644
--- a/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.h
+++ b/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.h
@@ -1,9 +1,8 @@
//== RetainCountDiagnostics.h - Checks for leaks and other issues -*- C++ -*--//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -15,42 +14,61 @@
#ifndef LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_RETAINCOUNTCHECKER_DIAGNOSTICS_H
#define LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_RETAINCOUNTCHECKER_DIAGNOSTICS_H
+#include "clang/Analysis/RetainSummaryManager.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitors.h"
#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
-#include "clang/StaticAnalyzer/Core/RetainSummaryManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
namespace clang {
namespace ento {
namespace retaincountchecker {
class RefCountBug : public BugType {
-protected:
- RefCountBug(const CheckerBase *checker, StringRef name)
- : BugType(checker, name, categories::MemoryRefCount) {}
-
public:
- virtual const char *getDescription() const = 0;
+ enum RefCountBugType {
+ UseAfterRelease,
+ ReleaseNotOwned,
+ DeallocNotOwned,
+ FreeNotOwned,
+ OverAutorelease,
+ ReturnNotOwnedForOwned,
+ LeakWithinFunction,
+ LeakAtReturn,
+ };
+ RefCountBug(const CheckerBase *checker, RefCountBugType BT);
+ StringRef getDescription() const;
+
+ RefCountBugType getBugType() const {
+ return BT;
+ }
+
+ const CheckerBase *getChecker() const {
+ return Checker;
+ }
- virtual bool isLeak() const { return false; }
+private:
+ RefCountBugType BT;
+ const CheckerBase *Checker;
+ static StringRef bugTypeToName(RefCountBugType BT);
};
class RefCountReport : public BugReport {
protected:
SymbolRef Sym;
+ bool isLeak = false;
public:
- RefCountReport(RefCountBug &D, const LangOptions &LOpts,
+ RefCountReport(const RefCountBug &D, const LangOptions &LOpts,
ExplodedNode *n, SymbolRef sym,
- bool registerVisitor = true);
+ bool isLeak=false);
- RefCountReport(RefCountBug &D, const LangOptions &LOpts,
+ RefCountReport(const RefCountBug &D, const LangOptions &LOpts,
ExplodedNode *n, SymbolRef sym,
StringRef endText);
llvm::iterator_range<ranges_iterator> getRanges() override {
- const RefCountBug& BugTy = static_cast<RefCountBug&>(getBugType());
- if (!BugTy.isLeak())
+ if (!isLeak)
return BugReport::getRanges();
return llvm::make_range(ranges_iterator(), ranges_iterator());
}
@@ -69,7 +87,7 @@ class RefLeakReport : public RefCountReport {
void createDescription(CheckerContext &Ctx);
public:
- RefLeakReport(RefCountBug &D, const LangOptions &LOpts, ExplodedNode *n,
+ RefLeakReport(const RefCountBug &D, const LangOptions &LOpts, ExplodedNode *n,
SymbolRef sym, CheckerContext &Ctx);
PathDiagnosticLocation getLocation(const SourceManager &SM) const override {
diff --git a/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp b/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp
index 17ef39531628..9eb47e0526dc 100644
--- a/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp
@@ -1,9 +1,8 @@
//== ReturnPointerRangeChecker.cpp ------------------------------*- C++ -*--==//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -90,3 +89,7 @@ void ReturnPointerRangeChecker::checkPreStmt(const ReturnStmt *RS,
void ento::registerReturnPointerRangeChecker(CheckerManager &mgr) {
mgr.registerChecker<ReturnPointerRangeChecker>();
}
+
+bool ento::shouldRegisterReturnPointerRangeChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp b/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp
index 3e0613e8ba68..f55c369da67e 100644
--- a/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp
@@ -1,9 +1,8 @@
//== ReturnUndefChecker.cpp -------------------------------------*- C++ -*--==//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -121,3 +120,7 @@ void ReturnUndefChecker::checkReference(CheckerContext &C, const Expr *RetE,
void ento::registerReturnUndefChecker(CheckerManager &mgr) {
mgr.registerChecker<ReturnUndefChecker>();
}
+
+bool ento::shouldRegisterReturnUndefChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/ReturnValueChecker.cpp b/lib/StaticAnalyzer/Checkers/ReturnValueChecker.cpp
new file mode 100644
index 000000000000..103208d8b5a5
--- /dev/null
+++ b/lib/StaticAnalyzer/Checkers/ReturnValueChecker.cpp
@@ -0,0 +1,170 @@
+//===- ReturnValueChecker - Applies guaranteed return values ----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines ReturnValueChecker, which checks for calls with guaranteed
+// boolean return value. It ensures the return value of each function call.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallVector.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class ReturnValueChecker : public Checker<check::PostCall, check::EndFunction> {
+public:
+ // It sets the predefined invariant ('CDM') if the current call not break it.
+ void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
+
+ // It reports whether a predefined invariant ('CDM') is broken.
+ void checkEndFunction(const ReturnStmt *RS, CheckerContext &C) const;
+
+private:
+ // The pairs are in the following form: {{{class, call}}, return value}
+ const CallDescriptionMap<bool> CDM = {
+ // These are known in the LLVM project: 'Error()'
+ {{{"ARMAsmParser", "Error"}}, true},
+ {{{"HexagonAsmParser", "Error"}}, true},
+ {{{"LLLexer", "Error"}}, true},
+ {{{"LLParser", "Error"}}, true},
+ {{{"MCAsmParser", "Error"}}, true},
+ {{{"MCAsmParserExtension", "Error"}}, true},
+ {{{"TGParser", "Error"}}, true},
+ {{{"X86AsmParser", "Error"}}, true},
+ // 'TokError()'
+ {{{"LLParser", "TokError"}}, true},
+ {{{"MCAsmParser", "TokError"}}, true},
+ {{{"MCAsmParserExtension", "TokError"}}, true},
+ {{{"TGParser", "TokError"}}, true},
+ // 'error()'
+ {{{"MIParser", "error"}}, true},
+ {{{"WasmAsmParser", "error"}}, true},
+ {{{"WebAssemblyAsmParser", "error"}}, true},
+ // Other
+ {{{"AsmParser", "printError"}}, true}};
+};
+} // namespace
+
+static std::string getName(const CallEvent &Call) {
+ std::string Name = "";
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(Call.getDecl()))
+ if (const CXXRecordDecl *RD = MD->getParent())
+ Name += RD->getNameAsString() + "::";
+
+ Name += Call.getCalleeIdentifier()->getName();
+ return Name;
+}
+
+// The predefinitions ('CDM') could break due to the ever growing code base.
+// Check for the expected invariants and see whether they apply.
+static Optional<bool> isInvariantBreak(bool ExpectedValue, SVal ReturnV,
+ CheckerContext &C) {
+ auto ReturnDV = ReturnV.getAs<DefinedOrUnknownSVal>();
+ if (!ReturnDV)
+ return None;
+
+ if (ExpectedValue)
+ return C.getState()->isNull(*ReturnDV).isConstrainedTrue();
+
+ return C.getState()->isNull(*ReturnDV).isConstrainedFalse();
+}
+
+void ReturnValueChecker::checkPostCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ const bool *RawExpectedValue = CDM.lookup(Call);
+ if (!RawExpectedValue)
+ return;
+
+ SVal ReturnV = Call.getReturnValue();
+ bool ExpectedValue = *RawExpectedValue;
+ Optional<bool> IsInvariantBreak = isInvariantBreak(ExpectedValue, ReturnV, C);
+ if (!IsInvariantBreak)
+ return;
+
+ // If the invariant is broken it is reported by 'checkEndFunction()'.
+ if (*IsInvariantBreak)
+ return;
+
+ std::string Name = getName(Call);
+ const NoteTag *CallTag = C.getNoteTag(
+ [Name, ExpectedValue](BugReport &) -> std::string {
+ SmallString<128> Msg;
+ llvm::raw_svector_ostream Out(Msg);
+
+ Out << '\'' << Name << "' returns "
+ << (ExpectedValue ? "true" : "false");
+ return Out.str();
+ },
+ /*IsPrunable=*/true);
+
+ ProgramStateRef State = C.getState();
+ State = State->assume(ReturnV.castAs<DefinedOrUnknownSVal>(), ExpectedValue);
+ C.addTransition(State, CallTag);
+}
+
+void ReturnValueChecker::checkEndFunction(const ReturnStmt *RS,
+ CheckerContext &C) const {
+ if (!RS || !RS->getRetValue())
+ return;
+
+ // We cannot get the caller in the top-frame.
+ const StackFrameContext *SFC = C.getStackFrame();
+ if (C.getStackFrame()->inTopFrame())
+ return;
+
+ ProgramStateRef State = C.getState();
+ CallEventManager &CMgr = C.getStateManager().getCallEventManager();
+ CallEventRef<> Call = CMgr.getCaller(SFC, State);
+ if (!Call)
+ return;
+
+ const bool *RawExpectedValue = CDM.lookup(*Call);
+ if (!RawExpectedValue)
+ return;
+
+ SVal ReturnV = State->getSVal(RS->getRetValue(), C.getLocationContext());
+ bool ExpectedValue = *RawExpectedValue;
+ Optional<bool> IsInvariantBreak = isInvariantBreak(ExpectedValue, ReturnV, C);
+ if (!IsInvariantBreak)
+ return;
+
+ // If the invariant is appropriate it is reported by 'checkPostCall()'.
+ if (!*IsInvariantBreak)
+ return;
+
+ std::string Name = getName(*Call);
+ const NoteTag *CallTag = C.getNoteTag(
+ [Name, ExpectedValue](BugReport &BR) -> std::string {
+ SmallString<128> Msg;
+ llvm::raw_svector_ostream Out(Msg);
+
+ // The following is swapped because the invariant is broken.
+ Out << '\'' << Name << "' returns "
+ << (ExpectedValue ? "false" : "true");
+
+ return Out.str();
+ },
+ /*IsPrunable=*/false);
+
+ C.addTransition(State, CallTag);
+}
+
+void ento::registerReturnValueChecker(CheckerManager &Mgr) {
+ Mgr.registerChecker<ReturnValueChecker>();
+}
+
+bool ento::shouldRegisterReturnValueChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/RunLoopAutoreleaseLeakChecker.cpp b/lib/StaticAnalyzer/Checkers/RunLoopAutoreleaseLeakChecker.cpp
index cf03b3c21132..5e305aa709b6 100644
--- a/lib/StaticAnalyzer/Checkers/RunLoopAutoreleaseLeakChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/RunLoopAutoreleaseLeakChecker.cpp
@@ -1,9 +1,8 @@
//=- RunLoopAutoreleaseLeakChecker.cpp --------------------------*- C++ -*-==//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//
//===----------------------------------------------------------------------===//
@@ -116,7 +115,7 @@ static void emitDiagnostics(BoundNodes &Match,
BR.EmitBasicReport(ADC->getDecl(), Checker,
/*Name=*/"Memory leak inside autorelease pool",
- /*Category=*/"Memory",
+ /*BugCategory=*/"Memory",
/*Name=*/
(Twine("Temporary objects allocated in the") +
" autorelease pool " +
@@ -203,3 +202,7 @@ void RunLoopAutoreleaseLeakChecker::checkASTCodeBody(const Decl *D,
void ento::registerRunLoopAutoreleaseLeakChecker(CheckerManager &mgr) {
mgr.registerChecker<RunLoopAutoreleaseLeakChecker>();
}
+
+bool ento::shouldRegisterRunLoopAutoreleaseLeakChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp b/lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp
index 819d437e6883..ec5e9622c236 100644
--- a/lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp
@@ -1,9 +1,8 @@
//===-- SimpleStreamChecker.cpp -----------------------------------------*- C++ -*--//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -109,10 +108,10 @@ SimpleStreamChecker::SimpleStreamChecker()
DoubleCloseBugType.reset(
new BugType(this, "Double fclose", "Unix Stream API Error"));
- LeakBugType.reset(
- new BugType(this, "Resource Leak", "Unix Stream API Error"));
// Sinks are higher importance bugs as well as calls to assert() or exit(0).
- LeakBugType->setSuppressOnSink(true);
+ LeakBugType.reset(
+ new BugType(this, "Resource Leak", "Unix Stream API Error",
+ /*SuppressOnSink=*/true));
}
void SimpleStreamChecker::checkPostCall(const CallEvent &Call,
@@ -269,3 +268,8 @@ SimpleStreamChecker::checkPointerEscape(ProgramStateRef State,
void ento::registerSimpleStreamChecker(CheckerManager &mgr) {
mgr.registerChecker<SimpleStreamChecker>();
}
+
+// This checker should be enabled regardless of how language options are set.
+bool ento::shouldRegisterSimpleStreamChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/SmartPtrModeling.cpp b/lib/StaticAnalyzer/Checkers/SmartPtrModeling.cpp
new file mode 100644
index 000000000000..fd372aafa50d
--- /dev/null
+++ b/lib/StaticAnalyzer/Checkers/SmartPtrModeling.cpp
@@ -0,0 +1,72 @@
+// SmartPtrModeling.cpp - Model behavior of C++ smart pointers - C++ ------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a checker that models various aspects of
+// C++ smart pointer behavior.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Move.h"
+
+#include "clang/AST/ExprCXX.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class SmartPtrModeling : public Checker<eval::Call> {
+ bool isNullAfterMoveMethod(const CallEvent &Call) const;
+
+public:
+ bool evalCall(const CallEvent &Call, CheckerContext &C) const;
+};
+} // end of anonymous namespace
+
+bool SmartPtrModeling::isNullAfterMoveMethod(const CallEvent &Call) const {
+ // TODO: Update CallDescription to support anonymous calls?
+ // TODO: Handle other methods, such as .get() or .release().
+ // But once we do, we'd need a visitor to explain null dereferences
+ // that are found via such modeling.
+ const auto *CD = dyn_cast_or_null<CXXConversionDecl>(Call.getDecl());
+ return CD && CD->getConversionType()->isBooleanType();
+}
+
+bool SmartPtrModeling::evalCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ if (!isNullAfterMoveMethod(Call))
+ return false;
+
+ ProgramStateRef State = C.getState();
+ const MemRegion *ThisR =
+ cast<CXXInstanceCall>(&Call)->getCXXThisVal().getAsRegion();
+
+ if (!move::isMovedFrom(State, ThisR)) {
+ // TODO: Model this case as well. At least, avoid invalidation of globals.
+ return false;
+ }
+
+ // TODO: Add a note to bug reports describing this decision.
+ C.addTransition(
+ State->BindExpr(Call.getOriginExpr(), C.getLocationContext(),
+ C.getSValBuilder().makeZeroVal(Call.getResultType())));
+ return true;
+}
+
+void ento::registerSmartPtrModeling(CheckerManager &Mgr) {
+ Mgr.registerChecker<SmartPtrModeling>();
+}
+
+bool ento::shouldRegisterSmartPtrModeling(const LangOptions &LO) {
+ return LO.CPlusPlus;
+}
diff --git a/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp b/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp
index 0f53d826a5f6..b93bed5c3097 100644
--- a/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp
@@ -1,9 +1,8 @@
//=== StackAddrEscapeChecker.cpp ----------------------------------*- C++ -*--//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -360,11 +359,23 @@ void StackAddrEscapeChecker::checkEndFunction(const ReturnStmt *RS,
}
}
-#define REGISTER_CHECKER(name) \
- void ento::register##name(CheckerManager &Mgr) { \
- StackAddrEscapeChecker *Chk = \
- Mgr.registerChecker<StackAddrEscapeChecker>(); \
- Chk->ChecksEnabled[StackAddrEscapeChecker::CK_##name] = true; \
+void ento::registerStackAddrEscapeBase(CheckerManager &mgr) {
+ mgr.registerChecker<StackAddrEscapeChecker>();
+}
+
+bool ento::shouldRegisterStackAddrEscapeBase(const LangOptions &LO) {
+ return true;
+}
+
+#define REGISTER_CHECKER(name) \
+ void ento::register##name(CheckerManager &Mgr) { \
+ StackAddrEscapeChecker *Chk = \
+ Mgr.getChecker<StackAddrEscapeChecker>(); \
+ Chk->ChecksEnabled[StackAddrEscapeChecker::CK_##name] = true; \
+ } \
+ \
+ bool ento::shouldRegister##name(const LangOptions &LO) { \
+ return true; \
}
REGISTER_CHECKER(StackAddrEscapeChecker)
diff --git a/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp b/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp
index 6478128ce954..2cdee8da375e 100644
--- a/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp
@@ -1,9 +1,8 @@
//=== StdLibraryFunctionsChecker.cpp - Model standard functions -*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -225,7 +224,7 @@ class StdLibraryFunctionsChecker : public Checker<check::PostCall, eval::Call> {
public:
void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
- bool evalCall(const CallExpr *CE, CheckerContext &C) const;
+ bool evalCall(const CallEvent &Call, CheckerContext &C) const;
private:
Optional<FunctionSummaryTy> findFunctionSummary(const FunctionDecl *FD,
@@ -368,12 +367,16 @@ void StdLibraryFunctionsChecker::checkPostCall(const CallEvent &Call,
}
}
-bool StdLibraryFunctionsChecker::evalCall(const CallExpr *CE,
+bool StdLibraryFunctionsChecker::evalCall(const CallEvent &Call,
CheckerContext &C) const {
- const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CE->getCalleeDecl());
+ const auto *FD = dyn_cast_or_null<FunctionDecl>(Call.getDecl());
if (!FD)
return false;
+ const auto *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr());
+ if (!CE)
+ return false;
+
Optional<FunctionSummaryTy> FoundSummary = findFunctionSummary(FD, CE, C);
if (!FoundSummary)
return false;
@@ -1056,3 +1059,7 @@ void ento::registerStdCLibraryFunctionsChecker(CheckerManager &mgr) {
// class, turning on different function summaries.
mgr.registerChecker<StdLibraryFunctionsChecker>();
}
+
+bool ento::shouldRegisterStdCLibraryFunctionsChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/StreamChecker.cpp b/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
index 92647f032730..1ea5e0769513 100644
--- a/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
@@ -1,9 +1,8 @@
//===-- StreamChecker.cpp -----------------------------------------*- C++ -*--//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -15,6 +14,7 @@
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
@@ -72,7 +72,7 @@ public:
II_fsetpos(nullptr), II_clearerr(nullptr), II_feof(nullptr),
II_ferror(nullptr), II_fileno(nullptr) {}
- bool evalCall(const CallExpr *CE, CheckerContext &C) const;
+ bool evalCall(const CallEvent &Call, CheckerContext &C) const;
void checkDeadSymbols(SymbolReaper &SymReaper, CheckerContext &C) const;
private:
@@ -104,11 +104,15 @@ private:
REGISTER_MAP_WITH_PROGRAMSTATE(StreamMap, SymbolRef, StreamState)
-bool StreamChecker::evalCall(const CallExpr *CE, CheckerContext &C) const {
- const FunctionDecl *FD = C.getCalleeDecl(CE);
+bool StreamChecker::evalCall(const CallEvent &Call, CheckerContext &C) const {
+ const auto *FD = dyn_cast_or_null<FunctionDecl>(Call.getDecl());
if (!FD || FD->getKind() != Decl::Function)
return false;
+ const auto *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr());
+ if (!CE)
+ return false;
+
ASTContext &Ctx = C.getASTContext();
if (!II_fopen)
II_fopen = &Ctx.Idents.get("fopen");
@@ -409,3 +413,7 @@ void StreamChecker::checkDeadSymbols(SymbolReaper &SymReaper,
void ento::registerStreamChecker(CheckerManager &mgr) {
mgr.registerChecker<StreamChecker>();
}
+
+bool ento::shouldRegisterStreamChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/Taint.cpp b/lib/StaticAnalyzer/Checkers/Taint.cpp
new file mode 100644
index 000000000000..bc120949ee4f
--- /dev/null
+++ b/lib/StaticAnalyzer/Checkers/Taint.cpp
@@ -0,0 +1,227 @@
+//=== Taint.cpp - Taint tracking and basic propagation rules. ------*- C++ -*-//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines basic, non-domain-specific mechanisms for tracking tainted values.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Taint.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+
+using namespace clang;
+using namespace ento;
+using namespace taint;
+
+// Fully tainted symbols.
+REGISTER_MAP_WITH_PROGRAMSTATE(TaintMap, SymbolRef, TaintTagType)
+
+// Partially tainted symbols.
+REGISTER_MAP_FACTORY_WITH_PROGRAMSTATE(TaintedSubRegions, const SubRegion *,
+ TaintTagType)
+REGISTER_MAP_WITH_PROGRAMSTATE(DerivedSymTaint, SymbolRef, TaintedSubRegions)
+
+void taint::printTaint(ProgramStateRef State, raw_ostream &Out, const char *NL,
+ const char *Sep) {
+ TaintMapTy TM = State->get<TaintMap>();
+
+ if (!TM.isEmpty())
+ Out << "Tainted symbols:" << NL;
+
+ for (const auto &I : TM)
+ Out << I.first << " : " << I.second << NL;
+}
+
+void dumpTaint(ProgramStateRef State) {
+ printTaint(State, llvm::errs());
+}
+
+ProgramStateRef taint::addTaint(ProgramStateRef State, const Stmt *S,
+ const LocationContext *LCtx,
+ TaintTagType Kind) {
+ return addTaint(State, State->getSVal(S, LCtx), Kind);
+}
+
+ProgramStateRef taint::addTaint(ProgramStateRef State, SVal V,
+ TaintTagType Kind) {
+ SymbolRef Sym = V.getAsSymbol();
+ if (Sym)
+ return addTaint(State, Sym, Kind);
+
+ // If the SVal represents a structure, try to mass-taint all values within the
+ // structure. For now it only works efficiently on lazy compound values that
+ // were conjured during a conservative evaluation of a function - either as
+ // return values of functions that return structures or arrays by value, or as
+ // values of structures or arrays passed into the function by reference,
+ // directly or through pointer aliasing. Such lazy compound values are
+ // characterized by having exactly one binding in their captured store within
+ // their parent region, which is a conjured symbol default-bound to the base
+ // region of the parent region.
+ if (auto LCV = V.getAs<nonloc::LazyCompoundVal>()) {
+ if (Optional<SVal> binding =
+ State->getStateManager().getStoreManager()
+ .getDefaultBinding(*LCV)) {
+ if (SymbolRef Sym = binding->getAsSymbol())
+ return addPartialTaint(State, Sym, LCV->getRegion(), Kind);
+ }
+ }
+
+ const MemRegion *R = V.getAsRegion();
+ return addTaint(State, R, Kind);
+}
+
+ProgramStateRef taint::addTaint(ProgramStateRef State, const MemRegion *R,
+ TaintTagType Kind) {
+ if (const SymbolicRegion *SR = dyn_cast_or_null<SymbolicRegion>(R))
+ return addTaint(State, SR->getSymbol(), Kind);
+ return State;
+}
+
+ProgramStateRef taint::addTaint(ProgramStateRef State, SymbolRef Sym,
+ TaintTagType Kind) {
+ // If this is a symbol cast, remove the cast before adding the taint. Taint
+ // is cast agnostic.
+ while (const SymbolCast *SC = dyn_cast<SymbolCast>(Sym))
+ Sym = SC->getOperand();
+
+ ProgramStateRef NewState = State->set<TaintMap>(Sym, Kind);
+ assert(NewState);
+ return NewState;
+}
+
+ProgramStateRef taint::addPartialTaint(ProgramStateRef State,
+ SymbolRef ParentSym,
+ const SubRegion *SubRegion,
+ TaintTagType Kind) {
+ // Ignore partial taint if the entire parent symbol is already tainted.
+ if (const TaintTagType *T = State->get<TaintMap>(ParentSym))
+ if (*T == Kind)
+ return State;
+
+ // Partial taint applies if only a portion of the symbol is tainted.
+ if (SubRegion == SubRegion->getBaseRegion())
+ return addTaint(State, ParentSym, Kind);
+
+ const TaintedSubRegions *SavedRegs = State->get<DerivedSymTaint>(ParentSym);
+ TaintedSubRegions::Factory &F = State->get_context<TaintedSubRegions>();
+ TaintedSubRegions Regs = SavedRegs ? *SavedRegs : F.getEmptyMap();
+
+ Regs = F.add(Regs, SubRegion, Kind);
+ ProgramStateRef NewState = State->set<DerivedSymTaint>(ParentSym, Regs);
+ assert(NewState);
+ return NewState;
+}
+
+bool taint::isTainted(ProgramStateRef State, const Stmt *S,
+ const LocationContext *LCtx, TaintTagType Kind) {
+ SVal val = State->getSVal(S, LCtx);
+ return isTainted(State, val, Kind);
+}
+
+bool taint::isTainted(ProgramStateRef State, SVal V, TaintTagType Kind) {
+ if (const SymExpr *Sym = V.getAsSymExpr())
+ return isTainted(State, Sym, Kind);
+ if (const MemRegion *Reg = V.getAsRegion())
+ return isTainted(State, Reg, Kind);
+ return false;
+}
+
+bool taint::isTainted(ProgramStateRef State, const MemRegion *Reg,
+ TaintTagType K) {
+ if (!Reg)
+ return false;
+
+ // Element region (array element) is tainted if either the base or the offset
+ // are tainted.
+ if (const ElementRegion *ER = dyn_cast<ElementRegion>(Reg))
+ return isTainted(State, ER->getSuperRegion(), K) ||
+ isTainted(State, ER->getIndex(), K);
+
+ if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(Reg))
+ return isTainted(State, SR->getSymbol(), K);
+
+ if (const SubRegion *ER = dyn_cast<SubRegion>(Reg))
+ return isTainted(State, ER->getSuperRegion(), K);
+
+ return false;
+}
+
+bool taint::isTainted(ProgramStateRef State, SymbolRef Sym, TaintTagType Kind) {
+ if (!Sym)
+ return false;
+
+ // Traverse all the symbols this symbol depends on to see if any are tainted.
+ for (SymExpr::symbol_iterator SI = Sym->symbol_begin(),
+ SE = Sym->symbol_end(); SI != SE; ++SI) {
+ if (!isa<SymbolData>(*SI))
+ continue;
+
+ if (const TaintTagType *Tag = State->get<TaintMap>(*SI)) {
+ if (*Tag == Kind)
+ return true;
+ }
+
+ if (const auto *SD = dyn_cast<SymbolDerived>(*SI)) {
+ // If this is a SymbolDerived with a tainted parent, it's also tainted.
+ if (isTainted(State, SD->getParentSymbol(), Kind))
+ return true;
+
+ // If this is a SymbolDerived with the same parent symbol as another
+ // tainted SymbolDerived and a region that's a sub-region of that tainted
+ // symbol, it's also tainted.
+ if (const TaintedSubRegions *Regs =
+ State->get<DerivedSymTaint>(SD->getParentSymbol())) {
+ const TypedValueRegion *R = SD->getRegion();
+ for (auto I : *Regs) {
+ // FIXME: The logic to identify tainted regions could be more
+ // complete. For example, this would not currently identify
+ // overlapping fields in a union as tainted. To identify this we can
+ // check for overlapping/nested byte offsets.
+ if (Kind == I.second && R->isSubRegionOf(I.first))
+ return true;
+ }
+ }
+ }
+
+ // If memory region is tainted, data is also tainted.
+ if (const auto *SRV = dyn_cast<SymbolRegionValue>(*SI)) {
+ if (isTainted(State, SRV->getRegion(), Kind))
+ return true;
+ }
+
+ // If this is a SymbolCast from a tainted value, it's also tainted.
+ if (const auto *SC = dyn_cast<SymbolCast>(*SI)) {
+ if (isTainted(State, SC->getOperand(), Kind))
+ return true;
+ }
+ }
+
+ return false;
+}
+
+std::shared_ptr<PathDiagnosticPiece>
+TaintBugVisitor::VisitNode(const ExplodedNode *N, BugReporterContext &BRC,
+ BugReport &BR) {
+
+ // Find the ExplodedNode where the taint was first introduced
+ if (!isTainted(N->getState(), V) ||
+ isTainted(N->getFirstPred()->getState(), V))
+ return nullptr;
+
+ const Stmt *S = PathDiagnosticLocation::getStmt(N);
+ if (!S)
+ return nullptr;
+
+ const LocationContext *NCtx = N->getLocationContext();
+ PathDiagnosticLocation L =
+ PathDiagnosticLocation::createBegin(S, BRC.getSourceManager(), NCtx);
+ if (!L.isValid() || !L.asLocation().isValid())
+ return nullptr;
+
+ return std::make_shared<PathDiagnosticEventPiece>(L, "Taint originated here");
+}
diff --git a/lib/StaticAnalyzer/Checkers/Taint.h b/lib/StaticAnalyzer/Checkers/Taint.h
new file mode 100644
index 000000000000..72cf6a79d52c
--- /dev/null
+++ b/lib/StaticAnalyzer/Checkers/Taint.h
@@ -0,0 +1,102 @@
+//=== Taint.h - Taint tracking and basic propagation rules. --------*- C++ -*-//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines basic, non-domain-specific mechanisms for tracking tainted values.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_TAINT_H
+#define LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_TAINT_H
+
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitors.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+
+namespace clang {
+namespace ento {
+namespace taint {
+
+/// The type of taint, which helps to differentiate between different types of
+/// taint.
+using TaintTagType = unsigned;
+
+static constexpr TaintTagType TaintTagGeneric = 0;
+
+/// Create a new state in which the value of the statement is marked as tainted.
+LLVM_NODISCARD ProgramStateRef
+addTaint(ProgramStateRef State, const Stmt *S, const LocationContext *LCtx,
+ TaintTagType Kind = TaintTagGeneric);
+
+/// Create a new state in which the value is marked as tainted.
+LLVM_NODISCARD ProgramStateRef
+addTaint(ProgramStateRef State, SVal V,
+ TaintTagType Kind = TaintTagGeneric);
+
+/// Create a new state in which the symbol is marked as tainted.
+LLVM_NODISCARD ProgramStateRef
+addTaint(ProgramStateRef State, SymbolRef Sym,
+ TaintTagType Kind = TaintTagGeneric);
+
+/// Create a new state in which the pointer represented by the region
+/// is marked as tainted.
+LLVM_NODISCARD ProgramStateRef
+addTaint(ProgramStateRef State, const MemRegion *R,
+ TaintTagType Kind = TaintTagGeneric);
+
+/// Create a new state in a which a sub-region of a given symbol is tainted.
+/// This might be necessary when referring to regions that can not have an
+/// individual symbol, e.g. if they are represented by the default binding of
+/// a LazyCompoundVal.
+LLVM_NODISCARD ProgramStateRef
+addPartialTaint(ProgramStateRef State,
+ SymbolRef ParentSym, const SubRegion *SubRegion,
+ TaintTagType Kind = TaintTagGeneric);
+
+/// Check if the statement has a tainted value in the given state.
+bool isTainted(ProgramStateRef State, const Stmt *S,
+ const LocationContext *LCtx,
+ TaintTagType Kind = TaintTagGeneric);
+
+/// Check if the value is tainted in the given state.
+bool isTainted(ProgramStateRef State, SVal V,
+ TaintTagType Kind = TaintTagGeneric);
+
+/// Check if the symbol is tainted in the given state.
+bool isTainted(ProgramStateRef State, SymbolRef Sym,
+ TaintTagType Kind = TaintTagGeneric);
+
+/// Check if the pointer represented by the region is tainted in the given
+/// state.
+bool isTainted(ProgramStateRef State, const MemRegion *Reg,
+ TaintTagType Kind = TaintTagGeneric);
+
+void printTaint(ProgramStateRef State, raw_ostream &Out, const char *nl = "\n",
+ const char *sep = "");
+
+LLVM_DUMP_METHOD void dumpTaint(ProgramStateRef State);
+
+/// The bug visitor prints a diagnostic message at the location where a given
+/// variable was tainted.
+class TaintBugVisitor final : public BugReporterVisitor {
+private:
+ const SVal V;
+
+public:
+ TaintBugVisitor(const SVal V) : V(V) {}
+ void Profile(llvm::FoldingSetNodeID &ID) const override { ID.Add(V); }
+
+ std::shared_ptr<PathDiagnosticPiece> VisitNode(const ExplodedNode *N,
+ BugReporterContext &BRC,
+ BugReport &BR) override;
+};
+
+} // namespace taint
+} // namespace ento
+} // namespace clang
+
+#endif
+
diff --git a/lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp b/lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp
index 3aa8e95d0ad0..094762e2faf8 100644
--- a/lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp
@@ -1,15 +1,16 @@
//== TaintTesterChecker.cpp ----------------------------------- -*- C++ -*--=//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This checker can be used for testing how taint data is propagated.
//
//===----------------------------------------------------------------------===//
+
+#include "Taint.h"
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
@@ -18,6 +19,7 @@
using namespace clang;
using namespace ento;
+using namespace taint;
namespace {
class TaintTesterChecker : public Checker< check::PostStmt<Expr> > {
@@ -47,7 +49,7 @@ void TaintTesterChecker::checkPostStmt(const Expr *E,
if (!State)
return;
- if (State->isTainted(E, C.getLocationContext())) {
+ if (isTainted(State, E, C.getLocationContext())) {
if (ExplodedNode *N = C.generateNonFatalErrorNode()) {
initBugType();
auto report = llvm::make_unique<BugReport>(*BT, "tainted",N);
@@ -60,3 +62,7 @@ void TaintTesterChecker::checkPostStmt(const Expr *E,
void ento::registerTaintTesterChecker(CheckerManager &mgr) {
mgr.registerChecker<TaintTesterChecker>();
}
+
+bool ento::shouldRegisterTaintTesterChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/TestAfterDivZeroChecker.cpp b/lib/StaticAnalyzer/Checkers/TestAfterDivZeroChecker.cpp
index 527e371571f1..7a33845a6a26 100644
--- a/lib/StaticAnalyzer/Checkers/TestAfterDivZeroChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/TestAfterDivZeroChecker.cpp
@@ -1,9 +1,8 @@
//== TestAfterDivZeroChecker.cpp - Test after division by zero checker --*--==//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -261,3 +260,7 @@ void TestAfterDivZeroChecker::checkBranchCondition(const Stmt *Condition,
void ento::registerTestAfterDivZeroChecker(CheckerManager &mgr) {
mgr.registerChecker<TestAfterDivZeroChecker>();
}
+
+bool ento::shouldRegisterTestAfterDivZeroChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/TraversalChecker.cpp b/lib/StaticAnalyzer/Checkers/TraversalChecker.cpp
index 2f06469bb209..73183aa468f6 100644
--- a/lib/StaticAnalyzer/Checkers/TraversalChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/TraversalChecker.cpp
@@ -1,9 +1,8 @@
//== TraversalChecker.cpp -------------------------------------- -*- C++ -*--=//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -65,6 +64,10 @@ void ento::registerTraversalDumper(CheckerManager &mgr) {
mgr.registerChecker<TraversalDumper>();
}
+bool ento::shouldRegisterTraversalDumper(const LangOptions &LO) {
+ return true;
+}
+
//------------------------------------------------------------------------------
namespace {
@@ -112,3 +115,7 @@ void CallDumper::checkPostCall(const CallEvent &Call, CheckerContext &C) const {
void ento::registerCallDumper(CheckerManager &mgr) {
mgr.registerChecker<CallDumper>();
}
+
+bool ento::shouldRegisterCallDumper(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/TrustNonnullChecker.cpp b/lib/StaticAnalyzer/Checkers/TrustNonnullChecker.cpp
index 5e777803af00..62a4c2ab0209 100644
--- a/lib/StaticAnalyzer/Checkers/TrustNonnullChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/TrustNonnullChecker.cpp
@@ -1,9 +1,8 @@
//== TrustNonnullChecker.cpp --------- API nullability modeling -*- C++ -*--==//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -88,7 +87,7 @@ public:
if (isNonNullPtr(Call, C))
if (auto L = Call.getReturnValue().getAs<Loc>())
- State = State->assume(*L, /*Assumption=*/true);
+ State = State->assume(*L, /*assumption=*/true);
C.addTransition(State);
}
@@ -107,7 +106,7 @@ public:
(Msg.getSelector() == SetObjectForKeyedSubscriptSel ||
Msg.getSelector() == SetObjectForKeySel)) {
if (auto L = Msg.getArgSVal(1).getAs<Loc>())
- State = State->assume(*L, /*Assumption=*/true);
+ State = State->assume(*L, /*assumption=*/true);
}
// Record an implication: index is non-null if the output is non-null.
@@ -249,7 +248,10 @@ private:
} // end empty namespace
-
void ento::registerTrustNonnullChecker(CheckerManager &Mgr) {
Mgr.registerChecker<TrustNonnullChecker>(Mgr.getASTContext());
}
+
+bool ento::shouldRegisterTrustNonnullChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp b/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp
index d7fad4e475ab..3a4a1dbf641b 100644
--- a/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp
@@ -1,9 +1,8 @@
//=== UndefBranchChecker.cpp -----------------------------------*- C++ -*--===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -109,3 +108,7 @@ void UndefBranchChecker::checkBranchCondition(const Stmt *Condition,
void ento::registerUndefBranchChecker(CheckerManager &mgr) {
mgr.registerChecker<UndefBranchChecker>();
}
+
+bool ento::shouldRegisterUndefBranchChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp b/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp
index 8a625227b81e..c787ef58660a 100644
--- a/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp
@@ -1,9 +1,8 @@
// UndefCapturedBlockVarChecker.cpp - Uninitialized captured vars -*- C++ -*-=//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -100,3 +99,7 @@ UndefCapturedBlockVarChecker::checkPostStmt(const BlockExpr *BE,
void ento::registerUndefCapturedBlockVarChecker(CheckerManager &mgr) {
mgr.registerChecker<UndefCapturedBlockVarChecker>();
}
+
+bool ento::shouldRegisterUndefCapturedBlockVarChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp b/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp
index 624cff6048fd..1ae287d39f11 100644
--- a/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp
@@ -1,9 +1,8 @@
//=== UndefResultChecker.cpp ------------------------------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -186,3 +185,7 @@ void UndefResultChecker::checkPostStmt(const BinaryOperator *B,
void ento::registerUndefResultChecker(CheckerManager &mgr) {
mgr.registerChecker<UndefResultChecker>();
}
+
+bool ento::shouldRegisterUndefResultChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp b/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp
index 1d78d7cebd67..4c517d6f0562 100644
--- a/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp
@@ -1,9 +1,8 @@
//===--- UndefinedArraySubscriptChecker.h ----------------------*- C++ -*--===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -62,3 +61,7 @@ UndefinedArraySubscriptChecker::checkPreStmt(const ArraySubscriptExpr *A,
void ento::registerUndefinedArraySubscriptChecker(CheckerManager &mgr) {
mgr.registerChecker<UndefinedArraySubscriptChecker>();
}
+
+bool ento::shouldRegisterUndefinedArraySubscriptChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp b/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp
index 8e10bfdd2f3c..d32d2a4042de 100644
--- a/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp
@@ -1,9 +1,8 @@
//===--- UndefinedAssignmentChecker.h ---------------------------*- C++ -*--==//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -120,3 +119,7 @@ void UndefinedAssignmentChecker::checkBind(SVal location, SVal val,
void ento::registerUndefinedAssignmentChecker(CheckerManager &mgr) {
mgr.registerChecker<UndefinedAssignmentChecker>();
}
+
+bool ento::shouldRegisterUndefinedAssignmentChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedObject.h b/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedObject.h
index c3291a21c164..2fcdd6086309 100644
--- a/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedObject.h
+++ b/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedObject.h
@@ -1,9 +1,8 @@
//===----- UninitializedObject.h ---------------------------------*- C++ -*-==//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -18,7 +17,7 @@
// won't emit warnings for objects that don't have at least one initialized
// field. This may be set with
//
-// `-analyzer-config alpha.cplusplus.UninitializedObject:Pedantic=true`.
+// `-analyzer-config optin.cplusplus.UninitializedObject:Pedantic=true`.
//
// - "NotesAsWarnings" (boolean). If set to true, the checker will emit a
// warning for each uninitialized field, as opposed to emitting one warning
@@ -26,27 +25,34 @@
// to it in notes. Defaults to false.
//
// `-analyzer-config \
-// alpha.cplusplus.UninitializedObject:NotesAsWarnings=true`.
+// optin.cplusplus.UninitializedObject:NotesAsWarnings=true`.
//
// - "CheckPointeeInitialization" (boolean). If set to false, the checker will
// not analyze the pointee of pointer/reference fields, and will only check
// whether the object itself is initialized. Defaults to false.
//
// `-analyzer-config \
-// alpha.cplusplus.UninitializedObject:CheckPointeeInitialization=true`.
+// optin.cplusplus.UninitializedObject:CheckPointeeInitialization=true`.
+//
+// TODO: With some clever heuristics, some pointers should be dereferenced
+// by default. For example, if the pointee is constructed within the
+// constructor call, it's reasonable to say that no external object
+// references it, and we wouldn't generate multiple report on the same
+// pointee.
//
// - "IgnoreRecordsWithField" (string). If supplied, the checker will not
// analyze structures that have a field with a name or type name that
// matches the given pattern. Defaults to "".
//
// `-analyzer-config \
-// alpha.cplusplus.UninitializedObject:IgnoreRecordsWithField="[Tt]ag|[Kk]ind"`.
+// optin.cplusplus.UninitializedObject:IgnoreRecordsWithField="[Tt]ag|[Kk]ind"`.
//
-// TODO: With some clever heuristics, some pointers should be dereferenced
-// by default. For example, if the pointee is constructed within the
-// constructor call, it's reasonable to say that no external object
-// references it, and we wouldn't generate multiple report on the same
-// pointee.
+// - "IgnoreGuardedFields" (boolean). If set to true, the checker will analyze
+// _syntactically_ whether the found uninitialized object is used without a
+// preceding assert call. Defaults to false.
+//
+// `-analyzer-config \
+// optin.cplusplus.UninitializedObject:IgnoreGuardedFields=true`.
//
// Most of the following methods as well as the checker itself is defined in
// UninitializedObjectChecker.cpp.
@@ -69,6 +75,7 @@ struct UninitObjCheckerOptions {
bool ShouldConvertNotesToWarnings = false;
bool CheckPointeeInitialization = false;
std::string IgnoredRecordsWithFieldPattern;
+ bool IgnoreGuardedFields = false;
};
/// A lightweight polymorphic wrapper around FieldRegion *. We'll use this
@@ -316,8 +323,8 @@ private:
/// needs to be analyzed as much as checking whether their value is undefined.
inline bool isPrimitiveType(const QualType &T) {
return T->isBuiltinType() || T->isEnumeralType() ||
- T->isMemberPointerType() || T->isBlockPointerType() ||
- T->isFunctionType();
+ T->isFunctionType() || T->isAtomicType() ||
+ T->isVectorType() || T->isScalarType();
}
inline bool isDereferencableType(const QualType &T) {
diff --git a/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedObjectChecker.cpp b/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedObjectChecker.cpp
index 208e303e8295..9d608c12d19b 100644
--- a/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedObjectChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedObjectChecker.cpp
@@ -1,9 +1,8 @@
//===----- UninitializedObjectChecker.cpp ------------------------*- C++ -*-==//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -20,6 +19,8 @@
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "UninitializedObject.h"
+#include "clang/ASTMatchers/ASTMatchFinder.h"
+#include "clang/Driver/DriverDiagnostic.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
@@ -27,6 +28,7 @@
using namespace clang;
using namespace clang::ento;
+using namespace clang::ast_matchers;
/// We'll mark fields (and pointee of fields) that are confirmed to be
/// uninitialized as already analyzed.
@@ -119,6 +121,16 @@ static bool willObjectBeAnalyzedLater(const CXXConstructorDecl *Ctor,
/// \p Pattern.
static bool shouldIgnoreRecord(const RecordDecl *RD, StringRef Pattern);
+/// Checks _syntactically_ whether it is possible to access FD from the record
+/// that contains it without a preceding assert (even if that access happens
+/// inside a method). This is mainly used for records that act like unions, like
+/// having multiple bit fields, with only a fraction being properly initialized.
+/// If these fields are properly guarded with asserts, this method returns
+/// false.
+///
+/// Since this check is done syntactically, this method could be inaccurate.
+static bool hasUnguardedAccess(const FieldDecl *FD, ProgramStateRef State);
+
//===----------------------------------------------------------------------===//
// Methods for UninitializedObjectChecker.
//===----------------------------------------------------------------------===//
@@ -235,6 +247,13 @@ bool FindUninitializedFields::addFieldToUninits(FieldChainInfo Chain,
"One must also pass the pointee region as a parameter for "
"dereferenceable fields!");
+ if (State->getStateManager().getContext().getSourceManager().isInSystemHeader(
+ FR->getDecl()->getLocation()))
+ return false;
+
+ if (Opts.IgnoreGuardedFields && !hasUnguardedAccess(FR->getDecl(), State))
+ return false;
+
if (State->contains<AnalyzedRegions>(FR))
return false;
@@ -247,13 +266,10 @@ bool FindUninitializedFields::addFieldToUninits(FieldChainInfo Chain,
State = State->add<AnalyzedRegions>(FR);
- if (State->getStateManager().getContext().getSourceManager().isInSystemHeader(
- FR->getDecl()->getLocation()))
- return false;
-
UninitFieldMap::mapped_type NoteMsgBuf;
llvm::raw_svector_ostream OS(NoteMsgBuf);
Chain.printNoteMsg(OS);
+
return UninitFields.insert({FR, std::move(NoteMsgBuf)}).second;
}
@@ -442,8 +458,8 @@ static const TypedValueRegion *
getConstructedRegion(const CXXConstructorDecl *CtorDecl,
CheckerContext &Context) {
- Loc ThisLoc = Context.getSValBuilder().getCXXThis(CtorDecl,
- Context.getStackFrame());
+ Loc ThisLoc =
+ Context.getSValBuilder().getCXXThis(CtorDecl, Context.getStackFrame());
SVal ObjectV = Context.getState()->getSVal(ThisLoc);
@@ -496,6 +512,75 @@ static bool shouldIgnoreRecord(const RecordDecl *RD, StringRef Pattern) {
return false;
}
+static const Stmt *getMethodBody(const CXXMethodDecl *M) {
+ if (isa<CXXConstructorDecl>(M))
+ return nullptr;
+
+ if (!M->isDefined())
+ return nullptr;
+
+ return M->getDefinition()->getBody();
+}
+
+static bool hasUnguardedAccess(const FieldDecl *FD, ProgramStateRef State) {
+
+ if (FD->getAccess() == AccessSpecifier::AS_public)
+ return true;
+
+ const auto *Parent = dyn_cast<CXXRecordDecl>(FD->getParent());
+
+ if (!Parent)
+ return true;
+
+ Parent = Parent->getDefinition();
+ assert(Parent && "The record's definition must be avaible if an uninitialized"
+ " field of it was found!");
+
+ ASTContext &AC = State->getStateManager().getContext();
+
+ auto FieldAccessM = memberExpr(hasDeclaration(equalsNode(FD))).bind("access");
+
+ auto AssertLikeM = callExpr(callee(functionDecl(
+ anyOf(hasName("exit"), hasName("panic"), hasName("error"),
+ hasName("Assert"), hasName("assert"), hasName("ziperr"),
+ hasName("assfail"), hasName("db_error"), hasName("__assert"),
+ hasName("__assert2"), hasName("_wassert"), hasName("__assert_rtn"),
+ hasName("__assert_fail"), hasName("dtrace_assfail"),
+ hasName("yy_fatal_error"), hasName("_XCAssertionFailureHandler"),
+ hasName("_DTAssertionFailureHandler"),
+ hasName("_TSAssertionFailureHandler")))));
+
+ auto NoReturnFuncM = callExpr(callee(functionDecl(isNoReturn())));
+
+ auto GuardM =
+ stmt(anyOf(ifStmt(), switchStmt(), conditionalOperator(), AssertLikeM,
+ NoReturnFuncM))
+ .bind("guard");
+
+ for (const CXXMethodDecl *M : Parent->methods()) {
+ const Stmt *MethodBody = getMethodBody(M);
+ if (!MethodBody)
+ continue;
+
+ auto Accesses = match(stmt(hasDescendant(FieldAccessM)), *MethodBody, AC);
+ if (Accesses.empty())
+ continue;
+ const auto *FirstAccess = Accesses[0].getNodeAs<MemberExpr>("access");
+ assert(FirstAccess);
+
+ auto Guards = match(stmt(hasDescendant(GuardM)), *MethodBody, AC);
+ if (Guards.empty())
+ return true;
+ const auto *FirstGuard = Guards[0].getNodeAs<Stmt>("guard");
+ assert(FirstGuard);
+
+ if (FirstAccess->getBeginLoc() < FirstGuard->getBeginLoc())
+ return true;
+ }
+
+ return false;
+}
+
std::string clang::ento::getVariableName(const FieldDecl *Field) {
// If Field is a captured lambda variable, Field->getName() will return with
// an empty string. We can however acquire it's name from the lambda's
@@ -526,13 +611,23 @@ void ento::registerUninitializedObjectChecker(CheckerManager &Mgr) {
AnalyzerOptions &AnOpts = Mgr.getAnalyzerOptions();
UninitObjCheckerOptions &ChOpts = Chk->Opts;
- ChOpts.IsPedantic =
- AnOpts.getCheckerBooleanOption("Pedantic", /*DefaultVal*/ false, Chk);
- ChOpts.ShouldConvertNotesToWarnings =
- AnOpts.getCheckerBooleanOption("NotesAsWarnings", /*DefaultVal*/ false, Chk);
+ ChOpts.IsPedantic = AnOpts.getCheckerBooleanOption(Chk, "Pedantic");
+ ChOpts.ShouldConvertNotesToWarnings = AnOpts.getCheckerBooleanOption(
+ Chk, "NotesAsWarnings");
ChOpts.CheckPointeeInitialization = AnOpts.getCheckerBooleanOption(
- "CheckPointeeInitialization", /*DefaultVal*/ false, Chk);
+ Chk, "CheckPointeeInitialization");
ChOpts.IgnoredRecordsWithFieldPattern =
- AnOpts.getCheckerStringOption("IgnoreRecordsWithField",
- /*DefaultVal*/ "", Chk);
+ AnOpts.getCheckerStringOption(Chk, "IgnoreRecordsWithField");
+ ChOpts.IgnoreGuardedFields =
+ AnOpts.getCheckerBooleanOption(Chk, "IgnoreGuardedFields");
+
+ std::string ErrorMsg;
+ if (!llvm::Regex(ChOpts.IgnoredRecordsWithFieldPattern).isValid(ErrorMsg))
+ Mgr.reportInvalidCheckerOptionValue(Chk, "IgnoreRecordsWithField",
+ "a valid regex, building failed with error message "
+ "\"" + ErrorMsg + "\"");
+}
+
+bool ento::shouldRegisterUninitializedObjectChecker(const LangOptions &LO) {
+ return true;
}
diff --git a/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedPointee.cpp b/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedPointee.cpp
index aead59c7bf87..a5dc250104f3 100644
--- a/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedPointee.cpp
+++ b/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedPointee.cpp
@@ -1,9 +1,8 @@
//===----- UninitializedPointee.cpp ------------------------------*- C++ -*-==//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp b/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
index bab0c12704fa..2ccb519891f3 100644
--- a/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
@@ -1,9 +1,8 @@
//= UnixAPIChecker.h - Checks preconditions for various Unix APIs --*- C++ -*-//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -21,10 +20,7 @@
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
-#include "llvm/ADT/StringExtras.h"
-#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/raw_ostream.h"
-#include <fcntl.h>
using namespace clang;
using namespace ento;
@@ -40,8 +36,9 @@ enum class OpenVariant {
};
namespace {
-class UnixAPIChecker : public Checker< check::PreStmt<CallExpr> > {
- mutable std::unique_ptr<BugType> BT_open, BT_pthreadOnce, BT_mallocZero;
+
+class UnixAPIMisuseChecker : public Checker< check::PreStmt<CallExpr> > {
+ mutable std::unique_ptr<BugType> BT_open, BT_pthreadOnce;
mutable Optional<uint64_t> Val_O_CREAT;
public:
@@ -51,8 +48,25 @@ public:
void CheckOpen(CheckerContext &C, const CallExpr *CE) const;
void CheckOpenAt(CheckerContext &C, const CallExpr *CE) const;
-
void CheckPthreadOnce(CheckerContext &C, const CallExpr *CE) const;
+
+ void CheckOpenVariant(CheckerContext &C,
+ const CallExpr *CE, OpenVariant Variant) const;
+
+ void ReportOpenBug(CheckerContext &C,
+ ProgramStateRef State,
+ const char *Msg,
+ SourceRange SR) const;
+
+};
+
+class UnixAPIPortabilityChecker : public Checker< check::PreStmt<CallExpr> > {
+public:
+ void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
+
+private:
+ mutable std::unique_ptr<BugType> BT_mallocZero;
+
void CheckCallocZero(CheckerContext &C, const CallExpr *CE) const;
void CheckMallocZero(CheckerContext &C, const CallExpr *CE) const;
void CheckReallocZero(CheckerContext &C, const CallExpr *CE) const;
@@ -61,13 +75,6 @@ public:
void CheckAllocaWithAlignZero(CheckerContext &C, const CallExpr *CE) const;
void CheckVallocZero(CheckerContext &C, const CallExpr *CE) const;
- typedef void (UnixAPIChecker::*SubChecker)(CheckerContext &,
- const CallExpr *) const;
-private:
-
- void CheckOpenVariant(CheckerContext &C,
- const CallExpr *CE, OpenVariant Variant) const;
-
bool ReportZeroByteAllocation(CheckerContext &C,
ProgramStateRef falseState,
const Expr *arg,
@@ -77,48 +84,75 @@ private:
const unsigned numArgs,
const unsigned sizeArg,
const char *fn) const;
- void LazyInitialize(std::unique_ptr<BugType> &BT, const char *name) const {
- if (BT)
- return;
- BT.reset(new BugType(this, name, categories::UnixAPI));
- }
- void ReportOpenBug(CheckerContext &C,
- ProgramStateRef State,
- const char *Msg,
- SourceRange SR) const;
};
+
} //end anonymous namespace
+static void LazyInitialize(const CheckerBase *Checker,
+ std::unique_ptr<BugType> &BT,
+ const char *name) {
+ if (BT)
+ return;
+ BT.reset(new BugType(Checker, name, categories::UnixAPI));
+}
+
//===----------------------------------------------------------------------===//
// "open" (man 2 open)
-//===----------------------------------------------------------------------===//
+//===----------------------------------------------------------------------===/
-void UnixAPIChecker::ReportOpenBug(CheckerContext &C,
- ProgramStateRef State,
- const char *Msg,
- SourceRange SR) const {
+void UnixAPIMisuseChecker::checkPreStmt(const CallExpr *CE,
+ CheckerContext &C) const {
+ const FunctionDecl *FD = C.getCalleeDecl(CE);
+ if (!FD || FD->getKind() != Decl::Function)
+ return;
+
+ // Don't treat functions in namespaces with the same name a Unix function
+ // as a call to the Unix function.
+ const DeclContext *NamespaceCtx = FD->getEnclosingNamespaceContext();
+ if (NamespaceCtx && isa<NamespaceDecl>(NamespaceCtx))
+ return;
+
+ StringRef FName = C.getCalleeName(FD);
+ if (FName.empty())
+ return;
+
+ if (FName == "open")
+ CheckOpen(C, CE);
+
+ else if (FName == "openat")
+ CheckOpenAt(C, CE);
+
+ else if (FName == "pthread_once")
+ CheckPthreadOnce(C, CE);
+}
+void UnixAPIMisuseChecker::ReportOpenBug(CheckerContext &C,
+ ProgramStateRef State,
+ const char *Msg,
+ SourceRange SR) const {
ExplodedNode *N = C.generateErrorNode(State);
if (!N)
return;
- LazyInitialize(BT_open, "Improper use of 'open'");
+ LazyInitialize(this, BT_open, "Improper use of 'open'");
auto Report = llvm::make_unique<BugReport>(*BT_open, Msg, N);
Report->addRange(SR);
C.emitReport(std::move(Report));
}
-void UnixAPIChecker::CheckOpen(CheckerContext &C, const CallExpr *CE) const {
+void UnixAPIMisuseChecker::CheckOpen(CheckerContext &C,
+ const CallExpr *CE) const {
CheckOpenVariant(C, CE, OpenVariant::Open);
}
-void UnixAPIChecker::CheckOpenAt(CheckerContext &C, const CallExpr *CE) const {
+void UnixAPIMisuseChecker::CheckOpenAt(CheckerContext &C,
+ const CallExpr *CE) const {
CheckOpenVariant(C, CE, OpenVariant::OpenAt);
}
-void UnixAPIChecker::CheckOpenVariant(CheckerContext &C,
- const CallExpr *CE,
- OpenVariant Variant) const {
+void UnixAPIMisuseChecker::CheckOpenVariant(CheckerContext &C,
+ const CallExpr *CE,
+ OpenVariant Variant) const {
// The index of the argument taking the flags open flags (O_RDONLY,
// O_WRONLY, O_CREAT, etc.),
unsigned int FlagsArgIndex;
@@ -236,7 +270,7 @@ void UnixAPIChecker::CheckOpenVariant(CheckerContext &C,
// pthread_once
//===----------------------------------------------------------------------===//
-void UnixAPIChecker::CheckPthreadOnce(CheckerContext &C,
+void UnixAPIMisuseChecker::CheckPthreadOnce(CheckerContext &C,
const CallExpr *CE) const {
// This is similar to 'CheckDispatchOnce' in the MacOSXAPIChecker.
@@ -268,7 +302,7 @@ void UnixAPIChecker::CheckPthreadOnce(CheckerContext &C,
if (isa<VarRegion>(R) && isa<StackLocalsSpaceRegion>(R->getMemorySpace()))
os << " Perhaps you intended to declare the variable as 'static'?";
- LazyInitialize(BT_pthreadOnce, "Improper use of 'pthread_once'");
+ LazyInitialize(this, BT_pthreadOnce, "Improper use of 'pthread_once'");
auto report = llvm::make_unique<BugReport>(*BT_pthreadOnce, os.str(), N);
report->addRange(CE->getArg(0)->getSourceRange());
@@ -279,15 +313,16 @@ void UnixAPIChecker::CheckPthreadOnce(CheckerContext &C,
// "calloc", "malloc", "realloc", "reallocf", "alloca" and "valloc"
// with allocation size 0
//===----------------------------------------------------------------------===//
+
// FIXME: Eventually these should be rolled into the MallocChecker, but right now
// they're more basic and valuable for widespread use.
// Returns true if we try to do a zero byte allocation, false otherwise.
// Fills in trueState and falseState.
static bool IsZeroByteAllocation(ProgramStateRef state,
- const SVal argVal,
- ProgramStateRef *trueState,
- ProgramStateRef *falseState) {
+ const SVal argVal,
+ ProgramStateRef *trueState,
+ ProgramStateRef *falseState) {
std::tie(*trueState, *falseState) =
state->assume(argVal.castAs<DefinedSVal>());
@@ -297,15 +332,16 @@ static bool IsZeroByteAllocation(ProgramStateRef state,
// Generates an error report, indicating that the function whose name is given
// will perform a zero byte allocation.
// Returns false if an error occurred, true otherwise.
-bool UnixAPIChecker::ReportZeroByteAllocation(CheckerContext &C,
- ProgramStateRef falseState,
- const Expr *arg,
- const char *fn_name) const {
+bool UnixAPIPortabilityChecker::ReportZeroByteAllocation(
+ CheckerContext &C,
+ ProgramStateRef falseState,
+ const Expr *arg,
+ const char *fn_name) const {
ExplodedNode *N = C.generateErrorNode(falseState);
if (!N)
return false;
- LazyInitialize(BT_mallocZero,
+ LazyInitialize(this, BT_mallocZero,
"Undefined allocation of 0 bytes (CERT MEM04-C; CWE-131)");
SmallString<256> S;
@@ -322,11 +358,11 @@ bool UnixAPIChecker::ReportZeroByteAllocation(CheckerContext &C,
// Does a basic check for 0-sized allocations suitable for most of the below
// functions (modulo "calloc")
-void UnixAPIChecker::BasicAllocationCheck(CheckerContext &C,
- const CallExpr *CE,
- const unsigned numArgs,
- const unsigned sizeArg,
- const char *fn) const {
+void UnixAPIPortabilityChecker::BasicAllocationCheck(CheckerContext &C,
+ const CallExpr *CE,
+ const unsigned numArgs,
+ const unsigned sizeArg,
+ const char *fn) const {
// Sanity check for the correct number of arguments
if (CE->getNumArgs() != numArgs)
return;
@@ -351,8 +387,8 @@ void UnixAPIChecker::BasicAllocationCheck(CheckerContext &C,
C.addTransition(trueState);
}
-void UnixAPIChecker::CheckCallocZero(CheckerContext &C,
- const CallExpr *CE) const {
+void UnixAPIPortabilityChecker::CheckCallocZero(CheckerContext &C,
+ const CallExpr *CE) const {
unsigned int nArgs = CE->getNumArgs();
if (nArgs != 2)
return;
@@ -387,43 +423,39 @@ void UnixAPIChecker::CheckCallocZero(CheckerContext &C,
C.addTransition(trueState);
}
-void UnixAPIChecker::CheckMallocZero(CheckerContext &C,
- const CallExpr *CE) const {
+void UnixAPIPortabilityChecker::CheckMallocZero(CheckerContext &C,
+ const CallExpr *CE) const {
BasicAllocationCheck(C, CE, 1, 0, "malloc");
}
-void UnixAPIChecker::CheckReallocZero(CheckerContext &C,
- const CallExpr *CE) const {
+void UnixAPIPortabilityChecker::CheckReallocZero(CheckerContext &C,
+ const CallExpr *CE) const {
BasicAllocationCheck(C, CE, 2, 1, "realloc");
}
-void UnixAPIChecker::CheckReallocfZero(CheckerContext &C,
- const CallExpr *CE) const {
+void UnixAPIPortabilityChecker::CheckReallocfZero(CheckerContext &C,
+ const CallExpr *CE) const {
BasicAllocationCheck(C, CE, 2, 1, "reallocf");
}
-void UnixAPIChecker::CheckAllocaZero(CheckerContext &C,
- const CallExpr *CE) const {
+void UnixAPIPortabilityChecker::CheckAllocaZero(CheckerContext &C,
+ const CallExpr *CE) const {
BasicAllocationCheck(C, CE, 1, 0, "alloca");
}
-void UnixAPIChecker::CheckAllocaWithAlignZero(CheckerContext &C,
- const CallExpr *CE) const {
+void UnixAPIPortabilityChecker::CheckAllocaWithAlignZero(
+ CheckerContext &C,
+ const CallExpr *CE) const {
BasicAllocationCheck(C, CE, 2, 0, "__builtin_alloca_with_align");
}
-void UnixAPIChecker::CheckVallocZero(CheckerContext &C,
- const CallExpr *CE) const {
+void UnixAPIPortabilityChecker::CheckVallocZero(CheckerContext &C,
+ const CallExpr *CE) const {
BasicAllocationCheck(C, CE, 1, 0, "valloc");
}
-
-//===----------------------------------------------------------------------===//
-// Central dispatch function.
-//===----------------------------------------------------------------------===//
-
-void UnixAPIChecker::checkPreStmt(const CallExpr *CE,
- CheckerContext &C) const {
+void UnixAPIPortabilityChecker::checkPreStmt(const CallExpr *CE,
+ CheckerContext &C) const {
const FunctionDecl *FD = C.getCalleeDecl(CE);
if (!FD || FD->getKind() != Decl::Function)
return;
@@ -438,42 +470,40 @@ void UnixAPIChecker::checkPreStmt(const CallExpr *CE,
if (FName.empty())
return;
- if (CheckMisuse) {
- if (SubChecker SC =
- llvm::StringSwitch<SubChecker>(FName)
- .Case("open", &UnixAPIChecker::CheckOpen)
- .Case("openat", &UnixAPIChecker::CheckOpenAt)
- .Case("pthread_once", &UnixAPIChecker::CheckPthreadOnce)
- .Default(nullptr)) {
- (this->*SC)(C, CE);
- }
- }
- if (CheckPortability) {
- if (SubChecker SC =
- llvm::StringSwitch<SubChecker>(FName)
- .Case("calloc", &UnixAPIChecker::CheckCallocZero)
- .Case("malloc", &UnixAPIChecker::CheckMallocZero)
- .Case("realloc", &UnixAPIChecker::CheckReallocZero)
- .Case("reallocf", &UnixAPIChecker::CheckReallocfZero)
- .Cases("alloca", "__builtin_alloca",
- &UnixAPIChecker::CheckAllocaZero)
- .Case("__builtin_alloca_with_align",
- &UnixAPIChecker::CheckAllocaWithAlignZero)
- .Case("valloc", &UnixAPIChecker::CheckVallocZero)
- .Default(nullptr)) {
- (this->*SC)(C, CE);
- }
- }
+ if (FName == "calloc")
+ CheckCallocZero(C, CE);
+
+ else if (FName == "malloc")
+ CheckMallocZero(C, CE);
+
+ else if (FName == "realloc")
+ CheckReallocZero(C, CE);
+
+ else if (FName == "reallocf")
+ CheckReallocfZero(C, CE);
+
+ else if (FName == "alloca" || FName == "__builtin_alloca")
+ CheckAllocaZero(C, CE);
+
+ else if (FName == "__builtin_alloca_with_align")
+ CheckAllocaWithAlignZero(C, CE);
+
+ else if (FName == "valloc")
+ CheckVallocZero(C, CE);
}
//===----------------------------------------------------------------------===//
// Registration.
//===----------------------------------------------------------------------===//
-#define REGISTER_CHECKER(Name) \
- void ento::registerUnixAPI##Name##Checker(CheckerManager &mgr) { \
- mgr.registerChecker<UnixAPIChecker>()->Check##Name = true; \
+#define REGISTER_CHECKER(CHECKERNAME) \
+ void ento::register##CHECKERNAME(CheckerManager &mgr) { \
+ mgr.registerChecker<CHECKERNAME>(); \
+ } \
+ \
+ bool ento::shouldRegister##CHECKERNAME(const LangOptions &LO) { \
+ return true; \
}
-REGISTER_CHECKER(Misuse)
-REGISTER_CHECKER(Portability)
+REGISTER_CHECKER(UnixAPIMisuseChecker)
+REGISTER_CHECKER(UnixAPIPortabilityChecker)
diff --git a/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp b/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp
index 16b4d5e925ba..0b0bf8465c9d 100644
--- a/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp
@@ -1,9 +1,8 @@
//==- UnreachableCodeChecker.cpp - Generalized dead code checker -*- C++ -*-==//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// This file implements a generalized unreachable code checker using a
@@ -205,7 +204,7 @@ const Stmt *UnreachableCodeChecker::getUnreachableStmt(const CFGBlock *CB) {
return S->getStmt();
}
}
- if (const Stmt *S = CB->getTerminator())
+ if (const Stmt *S = CB->getTerminatorStmt())
return S;
else
return nullptr;
@@ -251,9 +250,13 @@ bool UnreachableCodeChecker::isInvalidPath(const CFGBlock *CB,
bool UnreachableCodeChecker::isEmptyCFGBlock(const CFGBlock *CB) {
return CB->getLabel() == nullptr // No labels
&& CB->size() == 0 // No statements
- && !CB->getTerminator(); // No terminator
+ && !CB->getTerminatorStmt(); // No terminator
}
void ento::registerUnreachableCodeChecker(CheckerManager &mgr) {
mgr.registerChecker<UnreachableCodeChecker>();
}
+
+bool ento::shouldRegisterUnreachableCodeChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp b/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp
index e458e0554ee2..1630896c3b60 100644
--- a/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp
@@ -1,9 +1,8 @@
//=== VLASizeChecker.cpp - Undefined dereference checker --------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -14,6 +13,7 @@
//
//===----------------------------------------------------------------------===//
+#include "Taint.h"
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/CharUnits.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
@@ -26,6 +26,7 @@
using namespace clang;
using namespace ento;
+using namespace taint;
namespace {
class VLASizeChecker : public Checker< check::PreStmt<DeclStmt> > {
@@ -107,7 +108,7 @@ void VLASizeChecker::checkPreStmt(const DeclStmt *DS, CheckerContext &C) const {
return;
// Check if the size is tainted.
- if (state->isTainted(sizeV)) {
+ if (isTainted(state, sizeV)) {
reportBug(VLA_Tainted, SE, nullptr, C,
llvm::make_unique<TaintBugVisitor>(sizeV));
return;
@@ -183,3 +184,7 @@ void VLASizeChecker::checkPreStmt(const DeclStmt *DS, CheckerContext &C) const {
void ento::registerVLASizeChecker(CheckerManager &mgr) {
mgr.registerChecker<VLASizeChecker>();
}
+
+bool ento::shouldRegisterVLASizeChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/ValistChecker.cpp b/lib/StaticAnalyzer/Checkers/ValistChecker.cpp
index 748b226b7a1e..13ad3d98e8eb 100644
--- a/lib/StaticAnalyzer/Checkers/ValistChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ValistChecker.cpp
@@ -1,9 +1,8 @@
//== ValistChecker.cpp - stdarg.h macro usage checker -----------*- C++ -*--==//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -276,8 +275,8 @@ void ValistChecker::reportLeakedVALists(const RegionVector &LeakedVALists,
new BugType(CheckNames[CK_Unterminated].getName().empty()
? CheckNames[CK_Uninitialized]
: CheckNames[CK_Unterminated],
- "Leaked va_list", categories::MemoryError));
- BT_leakedvalist->setSuppressOnSink(true);
+ "Leaked va_list", categories::MemoryError,
+ /*SuppressOnSink=*/true));
}
const ExplodedNode *StartNode = getStartCallSite(N, Reg);
@@ -400,11 +399,23 @@ std::shared_ptr<PathDiagnosticPiece> ValistChecker::ValistBugVisitor::VisitNode(
return std::make_shared<PathDiagnosticEventPiece>(Pos, Msg, true);
}
+void ento::registerValistBase(CheckerManager &mgr) {
+ mgr.registerChecker<ValistChecker>();
+}
+
+bool ento::shouldRegisterValistBase(const LangOptions &LO) {
+ return true;
+}
+
#define REGISTER_CHECKER(name) \
void ento::register##name##Checker(CheckerManager &mgr) { \
- ValistChecker *checker = mgr.registerChecker<ValistChecker>(); \
+ ValistChecker *checker = mgr.getChecker<ValistChecker>(); \
checker->ChecksEnabled[ValistChecker::CK_##name] = true; \
checker->CheckNames[ValistChecker::CK_##name] = mgr.getCurrentCheckName(); \
+ } \
+ \
+ bool ento::shouldRegister##name##Checker(const LangOptions &LO) { \
+ return true; \
}
REGISTER_CHECKER(Uninitialized)
diff --git a/lib/StaticAnalyzer/Checkers/VforkChecker.cpp b/lib/StaticAnalyzer/Checkers/VforkChecker.cpp
index 3ee9f1a07fa2..40d14aa5c7d4 100644
--- a/lib/StaticAnalyzer/Checkers/VforkChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/VforkChecker.cpp
@@ -1,9 +1,8 @@
//===- VforkChecker.cpp -------- Vfork usage checks --------------*- C++ -*-==//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -216,3 +215,7 @@ void VforkChecker::checkPreStmt(const ReturnStmt *RS, CheckerContext &C) const {
void ento::registerVforkChecker(CheckerManager &mgr) {
mgr.registerChecker<VforkChecker>();
}
+
+bool ento::shouldRegisterVforkChecker(const LangOptions &LO) {
+ return true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp b/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp
index 567063197405..762c9c1c8d7a 100644
--- a/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp
@@ -1,9 +1,8 @@
//=======- VirtualCallChecker.cpp --------------------------------*- C++ -*-==//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -280,6 +279,9 @@ void ento::registerVirtualCallChecker(CheckerManager &mgr) {
VirtualCallChecker *checker = mgr.registerChecker<VirtualCallChecker>();
checker->IsPureOnly =
- mgr.getAnalyzerOptions().getCheckerBooleanOption("PureOnly", false,
- checker);
+ mgr.getAnalyzerOptions().getCheckerBooleanOption(checker, "PureOnly");
+}
+
+bool ento::shouldRegisterVirtualCallChecker(const LangOptions &LO) {
+ return true;
}
diff --git a/lib/StaticAnalyzer/Core/APSIntType.cpp b/lib/StaticAnalyzer/Core/APSIntType.cpp
index c7e95268213e..a1de10c89ed9 100644
--- a/lib/StaticAnalyzer/Core/APSIntType.cpp
+++ b/lib/StaticAnalyzer/Core/APSIntType.cpp
@@ -1,9 +1,8 @@
//===--- APSIntType.cpp - Simple record of the type of APSInts ------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/StaticAnalyzer/Core/AnalysisManager.cpp b/lib/StaticAnalyzer/Core/AnalysisManager.cpp
index 7fb1c09ca049..1b1ffff5ade8 100644
--- a/lib/StaticAnalyzer/Core/AnalysisManager.cpp
+++ b/lib/StaticAnalyzer/Core/AnalysisManager.cpp
@@ -1,9 +1,8 @@
//===-- AnalysisManager.cpp -------------------------------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -24,7 +23,7 @@ AnalysisManager::AnalysisManager(ASTContext &ASTCtx, DiagnosticsEngine &diags,
: AnaCtxMgr(
ASTCtx, Options.UnoptimizedCFG,
Options.ShouldIncludeImplicitDtorsInCFG,
- /*AddInitializers=*/true,
+ /*addInitializers=*/true,
Options.ShouldIncludeTemporaryDtorsInCFG,
Options.ShouldIncludeLifetimeInCFG,
// Adding LoopExit elements to the CFG is a requirement for loop
@@ -36,7 +35,9 @@ AnalysisManager::AnalysisManager(ASTContext &ASTCtx, DiagnosticsEngine &diags,
Options.ShouldConditionalizeStaticInitializers,
/*addCXXNewAllocator=*/true,
Options.ShouldIncludeRichConstructorsInCFG,
- Options.ShouldElideConstructors, injector),
+ Options.ShouldElideConstructors,
+ /*addVirtualBaseBranches=*/true,
+ injector),
Ctx(ASTCtx), Diags(diags), LangOpts(ASTCtx.getLangOpts()),
PathConsumers(PDC), CreateStoreMgr(storemgr),
CreateConstraintMgr(constraintmgr), CheckerMgr(checkerMgr),
diff --git a/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp b/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp
index 0588c2bd3d35..71abe2ae6c0e 100644
--- a/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp
+++ b/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp
@@ -1,9 +1,8 @@
//===- AnalyzerOptions.cpp - Analysis Engine Options ----------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -20,6 +19,7 @@
#include "llvm/ADT/Twine.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/FormattedStream.h"
#include "llvm/Support/raw_ostream.h"
#include <cassert>
#include <cstddef>
@@ -34,7 +34,7 @@ std::vector<StringRef>
AnalyzerOptions::getRegisteredCheckers(bool IncludeExperimental /* = false */) {
static const StringRef StaticAnalyzerChecks[] = {
#define GET_CHECKERS
-#define CHECKER(FULLNAME, CLASS, HELPTEXT, DOC_URI) \
+#define CHECKER(FULLNAME, CLASS, HELPTEXT, DOC_URI, IS_HIDDEN) \
FULLNAME,
#include "clang/StaticAnalyzer/Checkers/Checkers.inc"
#undef CHECKER
@@ -49,6 +49,37 @@ AnalyzerOptions::getRegisteredCheckers(bool IncludeExperimental /* = false */) {
return Result;
}
+void AnalyzerOptions::printFormattedEntry(
+ llvm::raw_ostream &Out,
+ std::pair<StringRef, StringRef> EntryDescPair,
+ size_t InitialPad, size_t EntryWidth, size_t MinLineWidth) {
+
+ llvm::formatted_raw_ostream FOut(Out);
+
+ const size_t PadForDesc = InitialPad + EntryWidth;
+
+ FOut.PadToColumn(InitialPad) << EntryDescPair.first;
+ // If the buffer's length is greater then PadForDesc, print a newline.
+ if (FOut.getColumn() > PadForDesc)
+ FOut << '\n';
+
+ FOut.PadToColumn(PadForDesc);
+
+ if (MinLineWidth == 0) {
+ FOut << EntryDescPair.second;
+ return;
+ }
+
+ for (char C : EntryDescPair.second) {
+ if (FOut.getColumn() > MinLineWidth && C == ' ') {
+ FOut << '\n';
+ FOut.PadToColumn(PadForDesc);
+ continue;
+ }
+ FOut << C;
+ }
+}
+
ExplorationStrategyKind
AnalyzerOptions::getExplorationStrategy() const {
auto K =
@@ -102,18 +133,13 @@ AnalyzerOptions::mayInlineCXXMemberFunction(
return *K >= Param;
}
-StringRef AnalyzerOptions::getCheckerStringOption(StringRef OptionName,
- StringRef DefaultVal,
- const CheckerBase *C,
+StringRef AnalyzerOptions::getCheckerStringOption(StringRef CheckerName,
+ StringRef OptionName,
bool SearchInParents) const {
- assert(C);
- // Search for a package option if the option for the checker is not specified
- // and search in parents is enabled.
- StringRef CheckerName = C->getTagDescription();
-
assert(!CheckerName.empty() &&
"Empty checker name! Make sure the checker object (including it's "
"bases!) if fully initialized before calling this function!");
+
ConfigTable::const_iterator E = Config.end();
do {
ConfigTable::const_iterator I =
@@ -122,35 +148,66 @@ StringRef AnalyzerOptions::getCheckerStringOption(StringRef OptionName,
return StringRef(I->getValue());
size_t Pos = CheckerName.rfind('.');
if (Pos == StringRef::npos)
- return DefaultVal;
+ break;
+
CheckerName = CheckerName.substr(0, Pos);
} while (!CheckerName.empty() && SearchInParents);
- return DefaultVal;
+
+ llvm_unreachable("Unknown checker option! Did you call getChecker*Option "
+ "with incorrect parameters? User input must've been "
+ "verified by CheckerRegistry.");
+
+ return "";
}
-bool AnalyzerOptions::getCheckerBooleanOption(StringRef Name, bool DefaultVal,
- const CheckerBase *C,
+StringRef AnalyzerOptions::getCheckerStringOption(const ento::CheckerBase *C,
+ StringRef OptionName,
+ bool SearchInParents) const {
+ return getCheckerStringOption(
+ C->getTagDescription(), OptionName, SearchInParents);
+}
+
+bool AnalyzerOptions::getCheckerBooleanOption(StringRef CheckerName,
+ StringRef OptionName,
bool SearchInParents) const {
- // FIXME: We should emit a warning here if the value is something other than
- // "true", "false", or the empty string (meaning the default value),
- // but the AnalyzerOptions doesn't have access to a diagnostic engine.
- assert(C);
- return llvm::StringSwitch<bool>(
- getCheckerStringOption(Name, DefaultVal ? "true" : "false", C,
+ auto Ret = llvm::StringSwitch<llvm::Optional<bool>>(
+ getCheckerStringOption(CheckerName, OptionName,
SearchInParents))
.Case("true", true)
.Case("false", false)
- .Default(DefaultVal);
+ .Default(None);
+
+ assert(Ret &&
+ "This option should be either 'true' or 'false', and should've been "
+ "validated by CheckerRegistry!");
+
+ return *Ret;
+}
+
+bool AnalyzerOptions::getCheckerBooleanOption(const ento::CheckerBase *C,
+ StringRef OptionName,
+ bool SearchInParents) const {
+ return getCheckerBooleanOption(
+ C->getTagDescription(), OptionName, SearchInParents);
}
-int AnalyzerOptions::getCheckerIntegerOption(StringRef Name, int DefaultVal,
- const CheckerBase *C,
- bool SearchInParents) const {
- int Ret = DefaultVal;
- bool HasFailed = getCheckerStringOption(Name, std::to_string(DefaultVal), C,
+int AnalyzerOptions::getCheckerIntegerOption(StringRef CheckerName,
+ StringRef OptionName,
+ bool SearchInParents) const {
+ int Ret = 0;
+ bool HasFailed = getCheckerStringOption(CheckerName, OptionName,
SearchInParents)
- .getAsInteger(10, Ret);
- assert(!HasFailed && "analyzer-config option should be numeric");
+ .getAsInteger(0, Ret);
+ assert(!HasFailed &&
+ "This option should be numeric, and should've been validated by "
+ "CheckerRegistry!");
(void)HasFailed;
return Ret;
}
+
+int AnalyzerOptions::getCheckerIntegerOption(const ento::CheckerBase *C,
+ StringRef OptionName,
+ bool SearchInParents) const {
+ return getCheckerIntegerOption(
+ C->getTagDescription(), OptionName, SearchInParents);
+}
diff --git a/lib/StaticAnalyzer/Core/BasicValueFactory.cpp b/lib/StaticAnalyzer/Core/BasicValueFactory.cpp
index d8ed6942de81..7cd48bf44374 100644
--- a/lib/StaticAnalyzer/Core/BasicValueFactory.cpp
+++ b/lib/StaticAnalyzer/Core/BasicValueFactory.cpp
@@ -1,9 +1,8 @@
//===- BasicValueFactory.cpp - Basic values for Path Sens analysis --------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -232,9 +231,6 @@ BasicValueFactory::evalAPSInt(BinaryOperator::Opcode Op,
// FIXME: This logic should probably go higher up, where we can
// test these conditions symbolically.
- if (V1.isSigned() && V1.isNegative())
- return nullptr;
-
if (V2.isSigned() && V2.isNegative())
return nullptr;
@@ -243,8 +239,13 @@ BasicValueFactory::evalAPSInt(BinaryOperator::Opcode Op,
if (Amt >= V1.getBitWidth())
return nullptr;
- if (V1.isSigned() && Amt > V1.countLeadingZeros())
+ if (!Ctx.getLangOpts().CPlusPlus2a) {
+ if (V1.isSigned() && V1.isNegative())
+ return nullptr;
+
+ if (V1.isSigned() && Amt > V1.countLeadingZeros())
return nullptr;
+ }
return &getValue( V1.operator<<( (unsigned) Amt ));
}
diff --git a/lib/StaticAnalyzer/Core/BlockCounter.cpp b/lib/StaticAnalyzer/Core/BlockCounter.cpp
index 8c99bd808494..e7ac6f1cfa00 100644
--- a/lib/StaticAnalyzer/Core/BlockCounter.cpp
+++ b/lib/StaticAnalyzer/Core/BlockCounter.cpp
@@ -1,9 +1,8 @@
//==- BlockCounter.h - ADT for counting block visits -------------*- C++ -*-//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/StaticAnalyzer/Core/BugReporter.cpp b/lib/StaticAnalyzer/Core/BugReporter.cpp
index fd7f53210490..e5a0794f10e2 100644
--- a/lib/StaticAnalyzer/Core/BugReporter.cpp
+++ b/lib/StaticAnalyzer/Core/BugReporter.cpp
@@ -1,9 +1,8 @@
//===- BugReporter.cpp - Generate PathDiagnostics for bugs ----------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -155,8 +154,6 @@ static void removeRedundantMsgs(PathPieces &path) {
case PathDiagnosticPiece::Macro:
removeRedundantMsgs(cast<PathDiagnosticMacroPiece>(*piece).subPieces);
break;
- case PathDiagnosticPiece::ControlFlow:
- break;
case PathDiagnosticPiece::Event: {
if (i == N-1)
break;
@@ -176,7 +173,9 @@ static void removeRedundantMsgs(PathPieces &path) {
}
break;
}
+ case PathDiagnosticPiece::ControlFlow:
case PathDiagnosticPiece::Note:
+ case PathDiagnosticPiece::PopUp:
break;
}
path.push_back(std::move(piece));
@@ -231,9 +230,8 @@ static bool removeUnneededCalls(PathPieces &pieces, BugReport *R,
break;
}
case PathDiagnosticPiece::ControlFlow:
- break;
-
case PathDiagnosticPiece::Note:
+ case PathDiagnosticPiece::PopUp:
break;
}
@@ -243,6 +241,16 @@ static bool removeUnneededCalls(PathPieces &pieces, BugReport *R,
return containsSomethingInteresting;
}
+/// Same logic as above to remove extra pieces.
+static void removePopUpNotes(PathPieces &Path) {
+ for (unsigned int i = 0; i < Path.size(); ++i) {
+ auto Piece = std::move(Path.front());
+ Path.pop_front();
+ if (!isa<PathDiagnosticPopUpPiece>(*Piece))
+ Path.push_back(std::move(Piece));
+ }
+}
+
/// Returns true if the given decl has been implicitly given a body, either by
/// the analyzer or by the compiler proper.
static bool hasImplicitBody(const Decl *D) {
@@ -679,7 +687,7 @@ void generateMinimalDiagForBlockEdge(const ExplodedNode *N, BlockEdge BE,
const LocationContext *LC = N->getLocationContext();
const CFGBlock *Src = BE.getSrc();
const CFGBlock *Dst = BE.getDst();
- const Stmt *T = Src->getTerminator();
+ const Stmt *T = Src->getTerminatorStmt();
if (!T)
return;
@@ -1204,7 +1212,7 @@ static void generatePathDiagnosticsForNode(const ExplodedNode *N,
const CFGBlock *BSrc = BE->getSrc();
ParentMap &PM = PDB.getParentMap();
- if (const Stmt *Term = BSrc->getTerminator()) {
+ if (const Stmt *Term = BSrc->getTerminatorStmt()) {
// Are we jumping past the loop body without ever executing the
// loop (because the condition was false)?
if (isLoop(Term)) {
@@ -1247,11 +1255,11 @@ static void generatePathDiagnosticsForNode(const ExplodedNode *N,
static std::unique_ptr<PathDiagnostic>
generateEmptyDiagnosticForReport(BugReport *R, SourceManager &SM) {
- BugType &BT = R->getBugType();
+ const BugType &BT = R->getBugType();
return llvm::make_unique<PathDiagnostic>(
R->getBugType().getCheckName(), R->getDeclWithIssue(),
R->getBugType().getName(), R->getDescription(),
- R->getShortDescription(/*Fallback=*/false), BT.getCategory(),
+ R->getShortDescription(/*UseFallback=*/false), BT.getCategory(),
R->getUniqueingLocation(), R->getUniqueingDecl(),
findExecutedLines(SM, R->getErrorNode()));
}
@@ -1370,8 +1378,7 @@ static void addContextEdges(PathPieces &pieces, SourceManager &SM,
break;
// If the source is in the same context, we're already good.
- if (std::find(SrcContexts.begin(), SrcContexts.end(), DstContext) !=
- SrcContexts.end())
+ if (llvm::find(SrcContexts, DstContext) != SrcContexts.end())
break;
// Update the subexpression node to point to the context edge.
@@ -1983,6 +1990,10 @@ static std::unique_ptr<PathDiagnostic> generatePathDiagnosticForConsumer(
(void)stillHasNotes;
}
+ // Remove pop-up notes if needed.
+ if (!Opts.ShouldAddPopUpNotes)
+ removePopUpNotes(PD->getMutablePieces());
+
// Redirect all call pieces to have valid locations.
adjustCallLocations(PD->getMutablePieces());
removePiecesWithInvalidLocations(PD->getMutablePieces());
@@ -2612,7 +2623,7 @@ std::pair<BugReport*, std::unique_ptr<VisitorsDiagnosticsTy>> findValidReport(
// Register additional node visitors.
R->addVisitor(llvm::make_unique<NilReceiverBRVisitor>());
R->addVisitor(llvm::make_unique<ConditionBRVisitor>());
- R->addVisitor(llvm::make_unique<CXXSelfAssignmentBRVisitor>());
+ R->addVisitor(llvm::make_unique<TagVisitor>());
BugReporterContext BRC(Reporter, ErrorGraph.BackMap);
@@ -2684,7 +2695,7 @@ GRBugReporter::generatePathDiagnostics(
return Out;
}
-void BugReporter::Register(BugType *BT) {
+void BugReporter::Register(const BugType *BT) {
BugTypes = F.add(BugTypes, BT);
}
@@ -2718,7 +2729,7 @@ void BugReporter::emitReport(std::unique_ptr<BugReport> R) {
R->Profile(ID);
// Lookup the equivance class. If there isn't one, create it.
- BugType& BT = R->getBugType();
+ const BugType& BT = R->getBugType();
Register(&BT);
void *InsertPos;
BugReportEquivClass* EQ = EQClasses.FindNodeOrInsertPos(ID, InsertPos);
@@ -2836,7 +2847,7 @@ FindReportInEquivalenceClass(BugReportEquivClass& EQ,
SmallVectorImpl<BugReport*> &bugReports) {
BugReportEquivClass::iterator I = EQ.begin(), E = EQ.end();
assert(I != E);
- BugType& BT = I->getBugType();
+ const BugType& BT = I->getBugType();
// If we don't need to suppress any of the nodes because they are
// post-dominated by a sink, simply add all the nodes in the equivalence class
diff --git a/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp b/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
index da94b6eb21e9..250793c4baff 100644
--- a/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
+++ b/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
@@ -1,9 +1,8 @@
//===- BugReporterVisitors.cpp - Helpers for reporting bugs ---------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -23,6 +22,7 @@
#include "clang/AST/Stmt.h"
#include "clang/AST/Type.h"
#include "clang/ASTMatchers/ASTMatchFinder.h"
+#include "clang/Analysis/Analyses/Dominators.h"
#include "clang/Analysis/AnalysisDeclContext.h"
#include "clang/Analysis/CFG.h"
#include "clang/Analysis/CFGStmtMap.h"
@@ -154,35 +154,47 @@ const Expr *bugreporter::getDerefExpr(const Stmt *S) {
return E;
}
-//===----------------------------------------------------------------------===//
-// Definitions for bug reporter visitors.
-//===----------------------------------------------------------------------===//
+/// Comparing internal representations of symbolic values (via
+/// SVal::operator==()) is a valid way to check if the value was updated,
+/// unless it's a LazyCompoundVal that may have a different internal
+/// representation every time it is loaded from the state. In this function we
+/// do an approximate comparison for lazy compound values, checking that they
+/// are the immediate snapshots of the tracked region's bindings within the
+/// node's respective states but not really checking that these snapshots
+/// actually contain the same set of bindings.
+static bool hasVisibleUpdate(const ExplodedNode *LeftNode, SVal LeftVal,
+ const ExplodedNode *RightNode, SVal RightVal) {
+ if (LeftVal == RightVal)
+ return true;
-std::shared_ptr<PathDiagnosticPiece>
-BugReporterVisitor::getEndPath(BugReporterContext &,
- const ExplodedNode *, BugReport &) {
- return nullptr;
-}
+ const auto LLCV = LeftVal.getAs<nonloc::LazyCompoundVal>();
+ if (!LLCV)
+ return false;
-void
-BugReporterVisitor::finalizeVisitor(BugReporterContext &,
- const ExplodedNode *, BugReport &) {}
+ const auto RLCV = RightVal.getAs<nonloc::LazyCompoundVal>();
+ if (!RLCV)
+ return false;
-std::shared_ptr<PathDiagnosticPiece> BugReporterVisitor::getDefaultEndPath(
- BugReporterContext &BRC, const ExplodedNode *EndPathNode, BugReport &BR) {
- PathDiagnosticLocation L =
- PathDiagnosticLocation::createEndOfPath(EndPathNode,BRC.getSourceManager());
+ return LLCV->getRegion() == RLCV->getRegion() &&
+ LLCV->getStore() == LeftNode->getState()->getStore() &&
+ RLCV->getStore() == RightNode->getState()->getStore();
+}
- const auto &Ranges = BR.getRanges();
+static Optional<const llvm::APSInt *>
+getConcreteIntegerValue(const Expr *CondVarExpr, const ExplodedNode *N) {
+ ProgramStateRef State = N->getState();
+ const LocationContext *LCtx = N->getLocationContext();
- // Only add the statement itself as a range if we didn't specify any
- // special ranges for this report.
- auto P = std::make_shared<PathDiagnosticEventPiece>(
- L, BR.getDescription(), Ranges.begin() == Ranges.end());
- for (SourceRange Range : Ranges)
- P->addRange(Range);
+ // The declaration of the value may rely on a pointer so take its l-value.
+ if (const auto *DRE = dyn_cast_or_null<DeclRefExpr>(CondVarExpr)) {
+ if (const auto *VD = dyn_cast_or_null<VarDecl>(DRE->getDecl())) {
+ SVal DeclSVal = State->getSVal(State->getLValue(VD, LCtx));
+ if (auto DeclCI = DeclSVal.getAs<nonloc::ConcreteInt>())
+ return &DeclCI->getValue();
+ }
+ }
- return P;
+ return {};
}
/// \return name of the macro inside the location \p Loc.
@@ -209,36 +221,70 @@ static bool isFunctionMacroExpansion(SourceLocation Loc,
}
/// \return Whether \c RegionOfInterest was modified at \p N,
-/// where \p ReturnState is a state associated with the return
-/// from the current frame.
-static bool wasRegionOfInterestModifiedAt(
- const SubRegion *RegionOfInterest,
- const ExplodedNode *N,
- SVal ValueAfter) {
+/// where \p ValueAfter is \c RegionOfInterest's value at the end of the
+/// stack frame.
+static bool wasRegionOfInterestModifiedAt(const SubRegion *RegionOfInterest,
+ const ExplodedNode *N,
+ SVal ValueAfter) {
ProgramStateRef State = N->getState();
ProgramStateManager &Mgr = N->getState()->getStateManager();
- if (!N->getLocationAs<PostStore>()
- && !N->getLocationAs<PostInitializer>()
- && !N->getLocationAs<PostStmt>())
+ if (!N->getLocationAs<PostStore>() && !N->getLocationAs<PostInitializer>() &&
+ !N->getLocationAs<PostStmt>())
return false;
// Writing into region of interest.
if (auto PS = N->getLocationAs<PostStmt>())
if (auto *BO = PS->getStmtAs<BinaryOperator>())
if (BO->isAssignmentOp() && RegionOfInterest->isSubRegionOf(
- N->getSVal(BO->getLHS()).getAsRegion()))
+ N->getSVal(BO->getLHS()).getAsRegion()))
return true;
// SVal after the state is possibly different.
SVal ValueAtN = N->getState()->getSVal(RegionOfInterest);
- if (!Mgr.getSValBuilder().areEqual(State, ValueAtN, ValueAfter).isConstrainedTrue() &&
+ if (!Mgr.getSValBuilder()
+ .areEqual(State, ValueAtN, ValueAfter)
+ .isConstrainedTrue() &&
(!ValueAtN.isUndef() || !ValueAfter.isUndef()))
return true;
return false;
}
+//===----------------------------------------------------------------------===//
+// Implementation of BugReporterVisitor.
+//===----------------------------------------------------------------------===//
+
+std::shared_ptr<PathDiagnosticPiece>
+BugReporterVisitor::getEndPath(BugReporterContext &,
+ const ExplodedNode *, BugReport &) {
+ return nullptr;
+}
+
+void
+BugReporterVisitor::finalizeVisitor(BugReporterContext &,
+ const ExplodedNode *, BugReport &) {}
+
+std::shared_ptr<PathDiagnosticPiece> BugReporterVisitor::getDefaultEndPath(
+ BugReporterContext &BRC, const ExplodedNode *EndPathNode, BugReport &BR) {
+ PathDiagnosticLocation L =
+ PathDiagnosticLocation::createEndOfPath(EndPathNode,BRC.getSourceManager());
+
+ const auto &Ranges = BR.getRanges();
+
+ // Only add the statement itself as a range if we didn't specify any
+ // special ranges for this report.
+ auto P = std::make_shared<PathDiagnosticEventPiece>(
+ L, BR.getDescription(), Ranges.begin() == Ranges.end());
+ for (SourceRange Range : Ranges)
+ P->addRange(Range);
+
+ return P;
+}
+
+//===----------------------------------------------------------------------===//
+// Implementation of NoStoreFuncVisitor.
+//===----------------------------------------------------------------------===//
namespace {
@@ -269,6 +315,7 @@ class NoStoreFuncVisitor final : public BugReporterVisitor {
llvm::SmallPtrSet<const StackFrameContext *, 32> FramesModifyingCalculated;
using RegionVector = SmallVector<const MemRegion *, 5>;
+
public:
NoStoreFuncVisitor(const SubRegion *R)
: RegionOfInterest(R), MmrMgr(*R->getMemRegionManager()),
@@ -281,354 +328,418 @@ public:
ID.AddPointer(RegionOfInterest);
}
+ void *getTag() const {
+ static int Tag = 0;
+ return static_cast<void *>(&Tag);
+ }
+
std::shared_ptr<PathDiagnosticPiece> VisitNode(const ExplodedNode *N,
BugReporterContext &BR,
- BugReport &) override {
+ BugReport &R) override;
+private:
+ /// Attempts to find the region of interest in a given record decl,
+ /// by either following the base classes or fields.
+ /// Dereferences fields up to a given recursion limit.
+ /// Note that \p Vec is passed by value, leading to quadratic copying cost,
+ /// but it's OK in practice since its length is limited to DEREFERENCE_LIMIT.
+ /// \return A chain fields leading to the region of interest or None.
+ const Optional<RegionVector>
+ findRegionOfInterestInRecord(const RecordDecl *RD, ProgramStateRef State,
+ const MemRegion *R, const RegionVector &Vec = {},
+ int depth = 0);
+
+ /// Check and lazily calculate whether the region of interest is
+ /// modified in the stack frame to which \p N belongs.
+ /// The calculation is cached in FramesModifyingRegion.
+ bool isRegionOfInterestModifiedInFrame(const ExplodedNode *N) {
const LocationContext *Ctx = N->getLocationContext();
const StackFrameContext *SCtx = Ctx->getStackFrame();
- ProgramStateRef State = N->getState();
- auto CallExitLoc = N->getLocationAs<CallExitBegin>();
+ if (!FramesModifyingCalculated.count(SCtx))
+ findModifyingFrames(N);
+ return FramesModifyingRegion.count(SCtx);
+ }
- // No diagnostic if region was modified inside the frame.
- if (!CallExitLoc || isRegionOfInterestModifiedInFrame(N))
- return nullptr;
+ /// Write to \c FramesModifyingRegion all stack frames along
+ /// the path in the current stack frame which modify \c RegionOfInterest.
+ void findModifyingFrames(const ExplodedNode *N);
- CallEventRef<> Call =
- BR.getStateManager().getCallEventManager().getCaller(SCtx, State);
+ /// Consume the information on the no-store stack frame in order to
+ /// either emit a note or suppress the report enirely.
+ /// \return Diagnostics piece for region not modified in the current function,
+ /// if it decides to emit one.
+ std::shared_ptr<PathDiagnosticPiece>
+ maybeEmitNote(BugReport &R, const CallEvent &Call, const ExplodedNode *N,
+ const RegionVector &FieldChain, const MemRegion *MatchedRegion,
+ StringRef FirstElement, bool FirstIsReferenceType,
+ unsigned IndirectionLevel);
- if (SM.isInSystemHeader(Call->getDecl()->getSourceRange().getBegin()))
- return nullptr;
+ /// Pretty-print region \p MatchedRegion to \p os.
+ /// \return Whether printing succeeded.
+ bool prettyPrintRegionName(StringRef FirstElement, bool FirstIsReferenceType,
+ const MemRegion *MatchedRegion,
+ const RegionVector &FieldChain,
+ int IndirectionLevel,
+ llvm::raw_svector_ostream &os);
- // Region of interest corresponds to an IVar, exiting a method
- // which could have written into that IVar, but did not.
- if (const auto *MC = dyn_cast<ObjCMethodCall>(Call)) {
- if (const auto *IvarR = dyn_cast<ObjCIvarRegion>(RegionOfInterest)) {
- const MemRegion *SelfRegion = MC->getReceiverSVal().getAsRegion();
- if (RegionOfInterest->isSubRegionOf(SelfRegion) &&
- potentiallyWritesIntoIvar(Call->getRuntimeDefinition().getDecl(),
- IvarR->getDecl()))
- return notModifiedDiagnostics(Ctx, *CallExitLoc, Call, {}, SelfRegion,
- "self", /*FirstIsReferenceType=*/false,
- 1);
- }
- }
+ /// Print first item in the chain, return new separator.
+ static StringRef prettyPrintFirstElement(StringRef FirstElement,
+ bool MoreItemsExpected,
+ int IndirectionLevel,
+ llvm::raw_svector_ostream &os);
+};
- if (const auto *CCall = dyn_cast<CXXConstructorCall>(Call)) {
- const MemRegion *ThisR = CCall->getCXXThisVal().getAsRegion();
- if (RegionOfInterest->isSubRegionOf(ThisR)
- && !CCall->getDecl()->isImplicit())
- return notModifiedDiagnostics(Ctx, *CallExitLoc, Call, {}, ThisR,
- "this",
- /*FirstIsReferenceType=*/false, 1);
+} // end of anonymous namespace
- // Do not generate diagnostics for not modified parameters in
- // constructors.
- return nullptr;
- }
+/// \return Whether the method declaration \p Parent
+/// syntactically has a binary operation writing into the ivar \p Ivar.
+static bool potentiallyWritesIntoIvar(const Decl *Parent,
+ const ObjCIvarDecl *Ivar) {
+ using namespace ast_matchers;
+ const char *IvarBind = "Ivar";
+ if (!Parent || !Parent->hasBody())
+ return false;
+ StatementMatcher WriteIntoIvarM = binaryOperator(
+ hasOperatorName("="),
+ hasLHS(ignoringParenImpCasts(
+ objcIvarRefExpr(hasDeclaration(equalsNode(Ivar))).bind(IvarBind))));
+ StatementMatcher ParentM = stmt(hasDescendant(WriteIntoIvarM));
+ auto Matches = match(ParentM, *Parent->getBody(), Parent->getASTContext());
+ for (BoundNodes &Match : Matches) {
+ auto IvarRef = Match.getNodeAs<ObjCIvarRefExpr>(IvarBind);
+ if (IvarRef->isFreeIvar())
+ return true;
- ArrayRef<ParmVarDecl *> parameters = getCallParameters(Call);
- for (unsigned I = 0; I < Call->getNumArgs() && I < parameters.size(); ++I) {
- const ParmVarDecl *PVD = parameters[I];
- SVal S = Call->getArgSVal(I);
- bool ParamIsReferenceType = PVD->getType()->isReferenceType();
- std::string ParamName = PVD->getNameAsString();
-
- int IndirectionLevel = 1;
- QualType T = PVD->getType();
- while (const MemRegion *R = S.getAsRegion()) {
- if (RegionOfInterest->isSubRegionOf(R) && !isPointerToConst(T))
- return notModifiedDiagnostics(Ctx, *CallExitLoc, Call, {}, R,
- ParamName, ParamIsReferenceType,
- IndirectionLevel);
-
- QualType PT = T->getPointeeType();
- if (PT.isNull() || PT->isVoidType()) break;
-
- if (const RecordDecl *RD = PT->getAsRecordDecl())
- if (auto P = findRegionOfInterestInRecord(RD, State, R))
- return notModifiedDiagnostics(
- Ctx, *CallExitLoc, Call, *P, RegionOfInterest, ParamName,
- ParamIsReferenceType, IndirectionLevel);
-
- S = State->getSVal(R, PT);
- T = PT;
- IndirectionLevel++;
- }
- }
+ const Expr *Base = IvarRef->getBase();
+ if (const auto *ICE = dyn_cast<ImplicitCastExpr>(Base))
+ Base = ICE->getSubExpr();
- return nullptr;
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(Base))
+ if (const auto *ID = dyn_cast<ImplicitParamDecl>(DRE->getDecl()))
+ if (ID->getParameterKind() == ImplicitParamDecl::ObjCSelf)
+ return true;
+
+ return false;
}
+ return false;
+}
-private:
- /// Attempts to find the region of interest in a given CXX decl,
- /// by either following the base classes or fields.
- /// Dereferences fields up to a given recursion limit.
- /// Note that \p Vec is passed by value, leading to quadratic copying cost,
- /// but it's OK in practice since its length is limited to DEREFERENCE_LIMIT.
- /// \return A chain fields leading to the region of interest or None.
- const Optional<RegionVector>
- findRegionOfInterestInRecord(const RecordDecl *RD, ProgramStateRef State,
- const MemRegion *R,
- const RegionVector &Vec = {},
- int depth = 0) {
+/// Get parameters associated with runtime definition in order
+/// to get the correct parameter name.
+static ArrayRef<ParmVarDecl *> getCallParameters(CallEventRef<> Call) {
+ // Use runtime definition, if available.
+ RuntimeDefinition RD = Call->getRuntimeDefinition();
+ if (const auto *FD = dyn_cast_or_null<FunctionDecl>(RD.getDecl()))
+ return FD->parameters();
+ if (const auto *MD = dyn_cast_or_null<ObjCMethodDecl>(RD.getDecl()))
+ return MD->parameters();
+
+ return Call->parameters();
+}
- if (depth == DEREFERENCE_LIMIT) // Limit the recursion depth.
+/// \return whether \p Ty points to a const type, or is a const reference.
+static bool isPointerToConst(QualType Ty) {
+ return !Ty->getPointeeType().isNull() &&
+ Ty->getPointeeType().getCanonicalType().isConstQualified();
+}
+
+/// Attempts to find the region of interest in a given CXX decl,
+/// by either following the base classes or fields.
+/// Dereferences fields up to a given recursion limit.
+/// Note that \p Vec is passed by value, leading to quadratic copying cost,
+/// but it's OK in practice since its length is limited to DEREFERENCE_LIMIT.
+/// \return A chain fields leading to the region of interest or None.
+const Optional<NoStoreFuncVisitor::RegionVector>
+NoStoreFuncVisitor::findRegionOfInterestInRecord(
+ const RecordDecl *RD, ProgramStateRef State, const MemRegion *R,
+ const NoStoreFuncVisitor::RegionVector &Vec /* = {} */,
+ int depth /* = 0 */) {
+
+ if (depth == DEREFERENCE_LIMIT) // Limit the recursion depth.
+ return None;
+
+ if (const auto *RDX = dyn_cast<CXXRecordDecl>(RD))
+ if (!RDX->hasDefinition())
return None;
- if (const auto *RDX = dyn_cast<CXXRecordDecl>(RD))
- if (!RDX->hasDefinition())
- return None;
-
- // Recursively examine the base classes.
- // Note that following base classes does not increase the recursion depth.
- if (const auto *RDX = dyn_cast<CXXRecordDecl>(RD))
- for (const auto II : RDX->bases())
- if (const RecordDecl *RRD = II.getType()->getAsRecordDecl())
- if (auto Out = findRegionOfInterestInRecord(RRD, State, R, Vec, depth))
- return Out;
-
- for (const FieldDecl *I : RD->fields()) {
- QualType FT = I->getType();
- const FieldRegion *FR = MmrMgr.getFieldRegion(I, cast<SubRegion>(R));
- const SVal V = State->getSVal(FR);
- const MemRegion *VR = V.getAsRegion();
-
- RegionVector VecF = Vec;
- VecF.push_back(FR);
-
- if (RegionOfInterest == VR)
- return VecF;
-
- if (const RecordDecl *RRD = FT->getAsRecordDecl())
- if (auto Out =
- findRegionOfInterestInRecord(RRD, State, FR, VecF, depth + 1))
+ // Recursively examine the base classes.
+ // Note that following base classes does not increase the recursion depth.
+ if (const auto *RDX = dyn_cast<CXXRecordDecl>(RD))
+ for (const auto II : RDX->bases())
+ if (const RecordDecl *RRD = II.getType()->getAsRecordDecl())
+ if (Optional<RegionVector> Out =
+ findRegionOfInterestInRecord(RRD, State, R, Vec, depth))
return Out;
- QualType PT = FT->getPointeeType();
- if (PT.isNull() || PT->isVoidType() || !VR) continue;
+ for (const FieldDecl *I : RD->fields()) {
+ QualType FT = I->getType();
+ const FieldRegion *FR = MmrMgr.getFieldRegion(I, cast<SubRegion>(R));
+ const SVal V = State->getSVal(FR);
+ const MemRegion *VR = V.getAsRegion();
- if (const RecordDecl *RRD = PT->getAsRecordDecl())
- if (auto Out =
- findRegionOfInterestInRecord(RRD, State, VR, VecF, depth + 1))
- return Out;
+ RegionVector VecF = Vec;
+ VecF.push_back(FR);
- }
+ if (RegionOfInterest == VR)
+ return VecF;
- return None;
+ if (const RecordDecl *RRD = FT->getAsRecordDecl())
+ if (auto Out =
+ findRegionOfInterestInRecord(RRD, State, FR, VecF, depth + 1))
+ return Out;
+
+ QualType PT = FT->getPointeeType();
+ if (PT.isNull() || PT->isVoidType() || !VR)
+ continue;
+
+ if (const RecordDecl *RRD = PT->getAsRecordDecl())
+ if (Optional<RegionVector> Out =
+ findRegionOfInterestInRecord(RRD, State, VR, VecF, depth + 1))
+ return Out;
}
- /// \return Whether the method declaration \p Parent
- /// syntactically has a binary operation writing into the ivar \p Ivar.
- bool potentiallyWritesIntoIvar(const Decl *Parent,
- const ObjCIvarDecl *Ivar) {
- using namespace ast_matchers;
- const char * IvarBind = "Ivar";
- if (!Parent || !Parent->hasBody())
- return false;
- StatementMatcher WriteIntoIvarM = binaryOperator(
- hasOperatorName("="),
- hasLHS(ignoringParenImpCasts(
- objcIvarRefExpr(hasDeclaration(equalsNode(Ivar))).bind(IvarBind))));
- StatementMatcher ParentM = stmt(hasDescendant(WriteIntoIvarM));
- auto Matches = match(ParentM, *Parent->getBody(), Parent->getASTContext());
- for (BoundNodes &Match : Matches) {
- auto IvarRef = Match.getNodeAs<ObjCIvarRefExpr>(IvarBind);
- if (IvarRef->isFreeIvar())
- return true;
+ return None;
+}
+
+std::shared_ptr<PathDiagnosticPiece>
+NoStoreFuncVisitor::VisitNode(const ExplodedNode *N, BugReporterContext &BR,
+ BugReport &R) {
- const Expr *Base = IvarRef->getBase();
- if (const auto *ICE = dyn_cast<ImplicitCastExpr>(Base))
- Base = ICE->getSubExpr();
+ const LocationContext *Ctx = N->getLocationContext();
+ const StackFrameContext *SCtx = Ctx->getStackFrame();
+ ProgramStateRef State = N->getState();
+ auto CallExitLoc = N->getLocationAs<CallExitBegin>();
- if (const auto *DRE = dyn_cast<DeclRefExpr>(Base))
- if (const auto *ID = dyn_cast<ImplicitParamDecl>(DRE->getDecl()))
- if (ID->getParameterKind() == ImplicitParamDecl::ObjCSelf)
- return true;
+ // No diagnostic if region was modified inside the frame.
+ if (!CallExitLoc || isRegionOfInterestModifiedInFrame(N))
+ return nullptr;
- return false;
+ CallEventRef<> Call =
+ BR.getStateManager().getCallEventManager().getCaller(SCtx, State);
+
+ // Region of interest corresponds to an IVar, exiting a method
+ // which could have written into that IVar, but did not.
+ if (const auto *MC = dyn_cast<ObjCMethodCall>(Call)) {
+ if (const auto *IvarR = dyn_cast<ObjCIvarRegion>(RegionOfInterest)) {
+ const MemRegion *SelfRegion = MC->getReceiverSVal().getAsRegion();
+ if (RegionOfInterest->isSubRegionOf(SelfRegion) &&
+ potentiallyWritesIntoIvar(Call->getRuntimeDefinition().getDecl(),
+ IvarR->getDecl()))
+ return maybeEmitNote(R, *Call, N, {}, SelfRegion, "self",
+ /*FirstIsReferenceType=*/false, 1);
}
- return false;
}
- /// Check and lazily calculate whether the region of interest is
- /// modified in the stack frame to which \p N belongs.
- /// The calculation is cached in FramesModifyingRegion.
- bool isRegionOfInterestModifiedInFrame(const ExplodedNode *N) {
- const LocationContext *Ctx = N->getLocationContext();
- const StackFrameContext *SCtx = Ctx->getStackFrame();
- if (!FramesModifyingCalculated.count(SCtx))
- findModifyingFrames(N);
- return FramesModifyingRegion.count(SCtx);
+ if (const auto *CCall = dyn_cast<CXXConstructorCall>(Call)) {
+ const MemRegion *ThisR = CCall->getCXXThisVal().getAsRegion();
+ if (RegionOfInterest->isSubRegionOf(ThisR) &&
+ !CCall->getDecl()->isImplicit())
+ return maybeEmitNote(R, *Call, N, {}, ThisR, "this",
+ /*FirstIsReferenceType=*/false, 1);
+
+ // Do not generate diagnostics for not modified parameters in
+ // constructors.
+ return nullptr;
}
+ ArrayRef<ParmVarDecl *> parameters = getCallParameters(Call);
+ for (unsigned I = 0; I < Call->getNumArgs() && I < parameters.size(); ++I) {
+ const ParmVarDecl *PVD = parameters[I];
+ SVal V = Call->getArgSVal(I);
+ bool ParamIsReferenceType = PVD->getType()->isReferenceType();
+ std::string ParamName = PVD->getNameAsString();
- /// Write to \c FramesModifyingRegion all stack frames along
- /// the path in the current stack frame which modify \c RegionOfInterest.
- void findModifyingFrames(const ExplodedNode *N) {
- assert(N->getLocationAs<CallExitBegin>());
- ProgramStateRef LastReturnState = N->getState();
- SVal ValueAtReturn = LastReturnState->getSVal(RegionOfInterest);
- const LocationContext *Ctx = N->getLocationContext();
- const StackFrameContext *OriginalSCtx = Ctx->getStackFrame();
+ int IndirectionLevel = 1;
+ QualType T = PVD->getType();
+ while (const MemRegion *MR = V.getAsRegion()) {
+ if (RegionOfInterest->isSubRegionOf(MR) && !isPointerToConst(T))
+ return maybeEmitNote(R, *Call, N, {}, MR, ParamName,
+ ParamIsReferenceType, IndirectionLevel);
- do {
- ProgramStateRef State = N->getState();
- auto CallExitLoc = N->getLocationAs<CallExitBegin>();
- if (CallExitLoc) {
- LastReturnState = State;
- ValueAtReturn = LastReturnState->getSVal(RegionOfInterest);
- }
-
- FramesModifyingCalculated.insert(
- N->getLocationContext()->getStackFrame());
-
- if (wasRegionOfInterestModifiedAt(RegionOfInterest, N, ValueAtReturn)) {
- const StackFrameContext *SCtx = N->getStackFrame();
- while (!SCtx->inTopFrame()) {
- auto p = FramesModifyingRegion.insert(SCtx);
- if (!p.second)
- break; // Frame and all its parents already inserted.
- SCtx = SCtx->getParent()->getStackFrame();
- }
- }
+ QualType PT = T->getPointeeType();
+ if (PT.isNull() || PT->isVoidType())
+ break;
- // Stop calculation at the call to the current function.
- if (auto CE = N->getLocationAs<CallEnter>())
- if (CE->getCalleeContext() == OriginalSCtx)
- break;
+ if (const RecordDecl *RD = PT->getAsRecordDecl())
+ if (Optional<RegionVector> P =
+ findRegionOfInterestInRecord(RD, State, MR))
+ return maybeEmitNote(R, *Call, N, *P, RegionOfInterest, ParamName,
+ ParamIsReferenceType, IndirectionLevel);
- N = N->getFirstPred();
- } while (N);
+ V = State->getSVal(MR, PT);
+ T = PT;
+ IndirectionLevel++;
+ }
}
- /// Get parameters associated with runtime definition in order
- /// to get the correct parameter name.
- ArrayRef<ParmVarDecl *> getCallParameters(CallEventRef<> Call) {
- // Use runtime definition, if available.
- RuntimeDefinition RD = Call->getRuntimeDefinition();
- if (const auto *FD = dyn_cast_or_null<FunctionDecl>(RD.getDecl()))
- return FD->parameters();
- if (const auto *MD = dyn_cast_or_null<ObjCMethodDecl>(RD.getDecl()))
- return MD->parameters();
+ return nullptr;
+}
- return Call->parameters();
- }
+void NoStoreFuncVisitor::findModifyingFrames(const ExplodedNode *N) {
+ assert(N->getLocationAs<CallExitBegin>());
+ ProgramStateRef LastReturnState = N->getState();
+ SVal ValueAtReturn = LastReturnState->getSVal(RegionOfInterest);
+ const LocationContext *Ctx = N->getLocationContext();
+ const StackFrameContext *OriginalSCtx = Ctx->getStackFrame();
- /// \return whether \p Ty points to a const type, or is a const reference.
- bool isPointerToConst(QualType Ty) {
- return !Ty->getPointeeType().isNull() &&
- Ty->getPointeeType().getCanonicalType().isConstQualified();
- }
+ do {
+ ProgramStateRef State = N->getState();
+ auto CallExitLoc = N->getLocationAs<CallExitBegin>();
+ if (CallExitLoc) {
+ LastReturnState = State;
+ ValueAtReturn = LastReturnState->getSVal(RegionOfInterest);
+ }
- /// \return Diagnostics piece for region not modified in the current function.
- std::shared_ptr<PathDiagnosticPiece>
- notModifiedDiagnostics(const LocationContext *Ctx, CallExitBegin &CallExitLoc,
- CallEventRef<> Call, const RegionVector &FieldChain,
- const MemRegion *MatchedRegion, StringRef FirstElement,
- bool FirstIsReferenceType, unsigned IndirectionLevel) {
-
- PathDiagnosticLocation L;
- if (const ReturnStmt *RS = CallExitLoc.getReturnStmt()) {
- L = PathDiagnosticLocation::createBegin(RS, SM, Ctx);
- } else {
- L = PathDiagnosticLocation(
- Call->getRuntimeDefinition().getDecl()->getSourceRange().getEnd(),
- SM);
+ FramesModifyingCalculated.insert(N->getLocationContext()->getStackFrame());
+
+ if (wasRegionOfInterestModifiedAt(RegionOfInterest, N, ValueAtReturn)) {
+ const StackFrameContext *SCtx = N->getStackFrame();
+ while (!SCtx->inTopFrame()) {
+ auto p = FramesModifyingRegion.insert(SCtx);
+ if (!p.second)
+ break; // Frame and all its parents already inserted.
+ SCtx = SCtx->getParent()->getStackFrame();
+ }
}
- SmallString<256> sbuf;
- llvm::raw_svector_ostream os(sbuf);
- os << "Returning without writing to '";
+ // Stop calculation at the call to the current function.
+ if (auto CE = N->getLocationAs<CallEnter>())
+ if (CE->getCalleeContext() == OriginalSCtx)
+ break;
- // Do not generate the note if failed to pretty-print.
- if (!prettyPrintRegionName(FirstElement, FirstIsReferenceType,
- MatchedRegion, FieldChain, IndirectionLevel, os))
- return nullptr;
+ N = N->getFirstPred();
+ } while (N);
+}
- os << "'";
- return std::make_shared<PathDiagnosticEventPiece>(L, os.str());
+std::shared_ptr<PathDiagnosticPiece> NoStoreFuncVisitor::maybeEmitNote(
+ BugReport &R, const CallEvent &Call, const ExplodedNode *N,
+ const RegionVector &FieldChain, const MemRegion *MatchedRegion,
+ StringRef FirstElement, bool FirstIsReferenceType,
+ unsigned IndirectionLevel) {
+ // Optimistically suppress uninitialized value bugs that result
+ // from system headers having a chance to initialize the value
+ // but failing to do so. It's too unlikely a system header's fault.
+ // It's much more likely a situation in which the function has a failure
+ // mode that the user decided not to check. If we want to hunt such
+ // omitted checks, we should provide an explicit function-specific note
+ // describing the precondition under which the function isn't supposed to
+ // initialize its out-parameter, and additionally check that such
+ // precondition can actually be fulfilled on the current path.
+ if (Call.isInSystemHeader()) {
+ // We make an exception for system header functions that have no branches.
+ // Such functions unconditionally fail to initialize the variable.
+ // If they call other functions that have more paths within them,
+ // this suppression would still apply when we visit these inner functions.
+ // One common example of a standard function that doesn't ever initialize
+ // its out parameter is operator placement new; it's up to the follow-up
+ // constructor (if any) to initialize the memory.
+ if (!N->getStackFrame()->getCFG()->isLinear())
+ R.markInvalid(getTag(), nullptr);
+ return nullptr;
}
- /// Pretty-print region \p MatchedRegion to \p os.
- /// \return Whether printing succeeded.
- bool prettyPrintRegionName(StringRef FirstElement, bool FirstIsReferenceType,
- const MemRegion *MatchedRegion,
- const RegionVector &FieldChain,
- int IndirectionLevel,
- llvm::raw_svector_ostream &os) {
+ PathDiagnosticLocation L =
+ PathDiagnosticLocation::create(N->getLocation(), SM);
+
+ // For now this shouldn't trigger, but once it does (as we add more
+ // functions to the body farm), we'll need to decide if these reports
+ // are worth suppressing as well.
+ if (!L.hasValidLocation())
+ return nullptr;
- if (FirstIsReferenceType)
- IndirectionLevel--;
+ SmallString<256> sbuf;
+ llvm::raw_svector_ostream os(sbuf);
+ os << "Returning without writing to '";
- RegionVector RegionSequence;
+ // Do not generate the note if failed to pretty-print.
+ if (!prettyPrintRegionName(FirstElement, FirstIsReferenceType, MatchedRegion,
+ FieldChain, IndirectionLevel, os))
+ return nullptr;
- // Add the regions in the reverse order, then reverse the resulting array.
- assert(RegionOfInterest->isSubRegionOf(MatchedRegion));
- const MemRegion *R = RegionOfInterest;
- while (R != MatchedRegion) {
- RegionSequence.push_back(R);
- R = cast<SubRegion>(R)->getSuperRegion();
- }
- std::reverse(RegionSequence.begin(), RegionSequence.end());
- RegionSequence.append(FieldChain.begin(), FieldChain.end());
+ os << "'";
+ return std::make_shared<PathDiagnosticEventPiece>(L, os.str());
+}
- StringRef Sep;
- for (const MemRegion *R : RegionSequence) {
+bool NoStoreFuncVisitor::prettyPrintRegionName(StringRef FirstElement,
+ bool FirstIsReferenceType,
+ const MemRegion *MatchedRegion,
+ const RegionVector &FieldChain,
+ int IndirectionLevel,
+ llvm::raw_svector_ostream &os) {
- // Just keep going up to the base region.
- // Element regions may appear due to casts.
- if (isa<CXXBaseObjectRegion>(R) || isa<CXXTempObjectRegion>(R))
- continue;
+ if (FirstIsReferenceType)
+ IndirectionLevel--;
- if (Sep.empty())
- Sep = prettyPrintFirstElement(FirstElement,
- /*MoreItemsExpected=*/true,
- IndirectionLevel, os);
+ RegionVector RegionSequence;
- os << Sep;
+ // Add the regions in the reverse order, then reverse the resulting array.
+ assert(RegionOfInterest->isSubRegionOf(MatchedRegion));
+ const MemRegion *R = RegionOfInterest;
+ while (R != MatchedRegion) {
+ RegionSequence.push_back(R);
+ R = cast<SubRegion>(R)->getSuperRegion();
+ }
+ std::reverse(RegionSequence.begin(), RegionSequence.end());
+ RegionSequence.append(FieldChain.begin(), FieldChain.end());
- // Can only reasonably pretty-print DeclRegions.
- if (!isa<DeclRegion>(R))
- return false;
+ StringRef Sep;
+ for (const MemRegion *R : RegionSequence) {
- const auto *DR = cast<DeclRegion>(R);
- Sep = DR->getValueType()->isAnyPointerType() ? "->" : ".";
- DR->getDecl()->getDeclName().print(os, PP);
- }
+ // Just keep going up to the base region.
+ // Element regions may appear due to casts.
+ if (isa<CXXBaseObjectRegion>(R) || isa<CXXTempObjectRegion>(R))
+ continue;
if (Sep.empty())
- prettyPrintFirstElement(FirstElement,
- /*MoreItemsExpected=*/false, IndirectionLevel,
- os);
- return true;
- }
+ Sep = prettyPrintFirstElement(FirstElement,
+ /*MoreItemsExpected=*/true,
+ IndirectionLevel, os);
- /// Print first item in the chain, return new separator.
- StringRef prettyPrintFirstElement(StringRef FirstElement,
- bool MoreItemsExpected,
- int IndirectionLevel,
- llvm::raw_svector_ostream &os) {
- StringRef Out = ".";
-
- if (IndirectionLevel > 0 && MoreItemsExpected) {
- IndirectionLevel--;
- Out = "->";
- }
+ os << Sep;
- if (IndirectionLevel > 0 && MoreItemsExpected)
- os << "(";
+ // Can only reasonably pretty-print DeclRegions.
+ if (!isa<DeclRegion>(R))
+ return false;
- for (int i=0; i<IndirectionLevel; i++)
- os << "*";
- os << FirstElement;
+ const auto *DR = cast<DeclRegion>(R);
+ Sep = DR->getValueType()->isAnyPointerType() ? "->" : ".";
+ DR->getDecl()->getDeclName().print(os, PP);
+ }
- if (IndirectionLevel > 0 && MoreItemsExpected)
- os << ")";
+ if (Sep.empty())
+ prettyPrintFirstElement(FirstElement,
+ /*MoreItemsExpected=*/false, IndirectionLevel, os);
+ return true;
+}
+
+StringRef NoStoreFuncVisitor::prettyPrintFirstElement(
+ StringRef FirstElement, bool MoreItemsExpected, int IndirectionLevel,
+ llvm::raw_svector_ostream &os) {
+ StringRef Out = ".";
- return Out;
+ if (IndirectionLevel > 0 && MoreItemsExpected) {
+ IndirectionLevel--;
+ Out = "->";
}
-};
+
+ if (IndirectionLevel > 0 && MoreItemsExpected)
+ os << "(";
+
+ for (int i = 0; i < IndirectionLevel; i++)
+ os << "*";
+ os << FirstElement;
+
+ if (IndirectionLevel > 0 && MoreItemsExpected)
+ os << ")";
+
+ return Out;
+}
+
+//===----------------------------------------------------------------------===//
+// Implementation of MacroNullReturnSuppressionVisitor.
+//===----------------------------------------------------------------------===//
+
+namespace {
/// Suppress null-pointer-dereference bugs where dereferenced null was returned
/// the macro.
@@ -718,6 +829,10 @@ private:
}
};
+} // end of anonymous namespace
+
+namespace {
+
/// Emits an extra note at the return statement of an interesting stack frame.
///
/// The returned value is marked as an interesting value, and if it's null,
@@ -769,16 +884,38 @@ public:
return;
// First, find when we processed the statement.
+ // If we work with a 'CXXNewExpr' that is going to be purged away before
+ // its call take place. We would catch that purge in the last condition
+ // as a 'StmtPoint' so we have to bypass it.
+ const bool BypassCXXNewExprEval = isa<CXXNewExpr>(S);
+
+ // This is moving forward when we enter into another context.
+ const StackFrameContext *CurrentSFC = Node->getStackFrame();
+
do {
- if (auto CEE = Node->getLocationAs<CallExitEnd>())
+ // If that is satisfied we found our statement as an inlined call.
+ if (Optional<CallExitEnd> CEE = Node->getLocationAs<CallExitEnd>())
if (CEE->getCalleeContext()->getCallSite() == S)
break;
- if (auto SP = Node->getLocationAs<StmtPoint>())
- if (SP->getStmt() == S)
- break;
+ // Try to move forward to the end of the call-chain.
Node = Node->getFirstPred();
- } while (Node);
+ if (!Node)
+ break;
+
+ const StackFrameContext *PredSFC = Node->getStackFrame();
+
+ // If that is satisfied we found our statement.
+ // FIXME: This code currently bypasses the call site for the
+ // conservatively evaluated allocator.
+ if (!BypassCXXNewExprEval)
+ if (Optional<StmtPoint> SP = Node->getLocationAs<StmtPoint>())
+ // See if we do not enter into another context.
+ if (SP->getStmt() == S && CurrentSFC == PredSFC)
+ break;
+
+ CurrentSFC = PredSFC;
+ } while (Node->getStackFrame() == CurrentSFC);
// Next, step over any post-statement checks.
while (Node && Node->getLocation().getAs<PostStmt>())
@@ -993,7 +1130,11 @@ public:
}
};
-} // namespace
+} // end of anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// Implementation of FindLastStoreBRVisitor.
+//===----------------------------------------------------------------------===//
void FindLastStoreBRVisitor::Profile(llvm::FoldingSetNodeID &ID) const {
static int tag = 0;
@@ -1188,7 +1329,7 @@ FindLastStoreBRVisitor::VisitNode(const ExplodedNode *Succ,
if (Succ->getState()->getSVal(R) != V)
return nullptr;
- if (Pred->getState()->getSVal(R) == V) {
+ if (hasVisibleUpdate(Pred, Pred->getState()->getSVal(R), Succ, V)) {
Optional<PostStore> PS = Succ->getLocationAs<PostStore>();
if (!PS || PS->getLocationValue() != R)
return nullptr;
@@ -1209,6 +1350,7 @@ FindLastStoreBRVisitor::VisitNode(const ExplodedNode *Succ,
// UndefinedVal.)
if (Optional<CallEnter> CE = Succ->getLocationAs<CallEnter>()) {
if (const auto *VR = dyn_cast<VarRegion>(R)) {
+
const auto *Param = cast<ParmVarDecl>(VR->getDecl());
ProgramStateManager &StateMgr = BRC.getStateManager();
@@ -1302,6 +1444,10 @@ FindLastStoreBRVisitor::VisitNode(const ExplodedNode *Succ,
return std::make_shared<PathDiagnosticEventPiece>(L, os.str());
}
+//===----------------------------------------------------------------------===//
+// Implementation of TrackConstraintBRVisitor.
+//===----------------------------------------------------------------------===//
+
void TrackConstraintBRVisitor::Profile(llvm::FoldingSetNodeID &ID) const {
static int tag = 0;
ID.AddPointer(&tag);
@@ -1374,6 +1520,10 @@ TrackConstraintBRVisitor::VisitNode(const ExplodedNode *N,
return nullptr;
}
+//===----------------------------------------------------------------------===//
+// Implementation of SuppressInlineDefensiveChecksVisitor.
+//===----------------------------------------------------------------------===//
+
SuppressInlineDefensiveChecksVisitor::
SuppressInlineDefensiveChecksVisitor(DefinedSVal Value, const ExplodedNode *N)
: V(Value) {
@@ -1446,7 +1596,7 @@ SuppressInlineDefensiveChecksVisitor::VisitNode(const ExplodedNode *Succ,
return nullptr;
CFGStmtMap *Map = CurLC->getAnalysisDeclContext()->getCFGStmtMap();
- CurTerminatorStmt = Map->getBlock(CurStmt)->getTerminator();
+ CurTerminatorStmt = Map->getBlock(CurStmt)->getTerminatorStmt();
} else {
return nullptr;
}
@@ -1469,6 +1619,114 @@ SuppressInlineDefensiveChecksVisitor::VisitNode(const ExplodedNode *Succ,
return nullptr;
}
+//===----------------------------------------------------------------------===//
+// TrackControlDependencyCondBRVisitor.
+//===----------------------------------------------------------------------===//
+
+namespace {
+/// Tracks the expressions that are a control dependency of the node that was
+/// supplied to the constructor.
+/// For example:
+///
+/// cond = 1;
+/// if (cond)
+/// 10 / 0;
+///
+/// An error is emitted at line 3. This visitor realizes that the branch
+/// on line 2 is a control dependency of line 3, and tracks it's condition via
+/// trackExpressionValue().
+class TrackControlDependencyCondBRVisitor final : public BugReporterVisitor {
+ const ExplodedNode *Origin;
+ ControlDependencyCalculator ControlDeps;
+ llvm::SmallSet<const CFGBlock *, 32> VisitedBlocks;
+
+public:
+ TrackControlDependencyCondBRVisitor(const ExplodedNode *O)
+ : Origin(O), ControlDeps(&O->getCFG()) {}
+
+ void Profile(llvm::FoldingSetNodeID &ID) const override {
+ static int x = 0;
+ ID.AddPointer(&x);
+ }
+
+ std::shared_ptr<PathDiagnosticPiece> VisitNode(const ExplodedNode *N,
+ BugReporterContext &BRC,
+ BugReport &BR) override;
+};
+} // end of anonymous namespace
+
+static CFGBlock *GetRelevantBlock(const ExplodedNode *Node) {
+ if (auto SP = Node->getLocationAs<StmtPoint>()) {
+ const Stmt *S = SP->getStmt();
+ assert(S);
+
+ return const_cast<CFGBlock *>(Node->getLocationContext()
+ ->getAnalysisDeclContext()->getCFGStmtMap()->getBlock(S));
+ }
+
+ return nullptr;
+}
+
+static std::shared_ptr<PathDiagnosticEventPiece>
+constructDebugPieceForTrackedCondition(const Expr *Cond,
+ const ExplodedNode *N,
+ BugReporterContext &BRC) {
+
+ if (BRC.getAnalyzerOptions().AnalysisDiagOpt == PD_NONE ||
+ !BRC.getAnalyzerOptions().ShouldTrackConditionsDebug)
+ return nullptr;
+
+ std::string ConditionText = Lexer::getSourceText(
+ CharSourceRange::getTokenRange(Cond->getSourceRange()),
+ BRC.getSourceManager(),
+ BRC.getASTContext().getLangOpts());
+
+ return std::make_shared<PathDiagnosticEventPiece>(
+ PathDiagnosticLocation::createBegin(
+ Cond, BRC.getSourceManager(), N->getLocationContext()),
+ (Twine() + "Tracking condition '" + ConditionText + "'").str());
+}
+
+std::shared_ptr<PathDiagnosticPiece>
+TrackControlDependencyCondBRVisitor::VisitNode(const ExplodedNode *N,
+ BugReporterContext &BRC,
+ BugReport &BR) {
+ // We can only reason about control dependencies within the same stack frame.
+ if (Origin->getStackFrame() != N->getStackFrame())
+ return nullptr;
+
+ CFGBlock *NB = GetRelevantBlock(N);
+
+ // Skip if we already inspected this block.
+ if (!VisitedBlocks.insert(NB).second)
+ return nullptr;
+
+ CFGBlock *OriginB = GetRelevantBlock(Origin);
+
+ // TODO: Cache CFGBlocks for each ExplodedNode.
+ if (!OriginB || !NB)
+ return nullptr;
+
+ if (ControlDeps.isControlDependent(OriginB, NB)) {
+ if (const Expr *Condition = NB->getLastCondition()) {
+ // Keeping track of the already tracked conditions on a visitor level
+ // isn't sufficient, because a new visitor is created for each tracked
+ // expression, hence the BugReport level set.
+ if (BR.addTrackedCondition(N)) {
+ bugreporter::trackExpressionValue(
+ N, Condition, BR, /*EnableNullFPSuppression=*/false);
+ return constructDebugPieceForTrackedCondition(Condition, N, BRC);
+ }
+ }
+ }
+
+ return nullptr;
+}
+
+//===----------------------------------------------------------------------===//
+// Implementation of trackExpressionValue.
+//===----------------------------------------------------------------------===//
+
static const MemRegion *getLocationRegionIfReference(const Expr *E,
const ExplodedNode *N) {
if (const auto *DR = dyn_cast<DeclRefExpr>(E)) {
@@ -1518,7 +1776,7 @@ static const Expr *peelOffOuterExpr(const Expr *Ex,
ProgramPoint ProgPoint = NI->getLocation();
if (Optional<BlockEdge> BE = ProgPoint.getAs<BlockEdge>()) {
const CFGBlock *srcBlk = BE->getSrc();
- if (const Stmt *term = srcBlk->getTerminator()) {
+ if (const Stmt *term = srcBlk->getTerminatorStmt()) {
if (term == CO) {
bool TookTrueBranch = (*(srcBlk->succ_begin()) == BE->getDst());
if (TookTrueBranch)
@@ -1581,12 +1839,26 @@ bool bugreporter::trackExpressionValue(const ExplodedNode *InputNode,
ProgramStateRef LVState = LVNode->getState();
+ // We only track expressions if we believe that they are important. Chances
+ // are good that control dependencies to the tracking point are also improtant
+ // because of this, let's explain why we believe control reached this point.
+ // TODO: Shouldn't we track control dependencies of every bug location, rather
+ // than only tracked expressions?
+ if (LVState->getAnalysisManager().getAnalyzerOptions().ShouldTrackConditions)
+ report.addVisitor(llvm::make_unique<TrackControlDependencyCondBRVisitor>(
+ InputNode));
+
// The message send could be nil due to the receiver being nil.
// At this point in the path, the receiver should be live since we are at the
// message send expr. If it is nil, start tracking it.
if (const Expr *Receiver = NilReceiverBRVisitor::getNilReceiver(Inner, LVNode))
trackExpressionValue(LVNode, Receiver, report, EnableNullFPSuppression);
+ // Track the index if this is an array subscript.
+ if (const auto *Arr = dyn_cast<ArraySubscriptExpr>(Inner))
+ trackExpressionValue(
+ LVNode, Arr->getIdx(), report, /*EnableNullFPSuppression*/ false);
+
// See if the expression we're interested refers to a variable.
// If so, we can track both its contents and constraints on its value.
if (ExplodedGraph::isInterestingLValueExpr(Inner)) {
@@ -1688,6 +1960,10 @@ bool bugreporter::trackExpressionValue(const ExplodedNode *InputNode,
return true;
}
+//===----------------------------------------------------------------------===//
+// Implementation of NulReceiverBRVisitor.
+//===----------------------------------------------------------------------===//
+
const Expr *NilReceiverBRVisitor::getNilReceiver(const Stmt *S,
const ExplodedNode *N) {
const auto *ME = dyn_cast<ObjCMessageExpr>(S);
@@ -1738,6 +2014,10 @@ NilReceiverBRVisitor::VisitNode(const ExplodedNode *N,
return std::make_shared<PathDiagnosticEventPiece>(L, OS.str());
}
+//===----------------------------------------------------------------------===//
+// Implementation of FindLastStoreBRVisitor.
+//===----------------------------------------------------------------------===//
+
// Registers every VarDecl inside a Stmt with a last store visitor.
void FindLastStoreBRVisitor::registerStatementVarDecls(BugReport &BR,
const Stmt *S,
@@ -1798,39 +2078,36 @@ ConditionBRVisitor::VisitNode(const ExplodedNode *N,
std::shared_ptr<PathDiagnosticPiece>
ConditionBRVisitor::VisitNodeImpl(const ExplodedNode *N,
BugReporterContext &BRC, BugReport &BR) {
- ProgramPoint progPoint = N->getLocation();
- ProgramStateRef CurrentState = N->getState();
- ProgramStateRef PrevState = N->getFirstPred()->getState();
-
- // Compare the GDMs of the state, because that is where constraints
- // are managed. Note that ensure that we only look at nodes that
- // were generated by the analyzer engine proper, not checkers.
- if (CurrentState->getGDM().getRoot() ==
- PrevState->getGDM().getRoot())
- return nullptr;
+ ProgramPoint ProgPoint = N->getLocation();
+ const std::pair<const ProgramPointTag *, const ProgramPointTag *> &Tags =
+ ExprEngine::geteagerlyAssumeBinOpBifurcationTags();
// If an assumption was made on a branch, it should be caught
// here by looking at the state transition.
- if (Optional<BlockEdge> BE = progPoint.getAs<BlockEdge>()) {
- const CFGBlock *srcBlk = BE->getSrc();
- if (const Stmt *term = srcBlk->getTerminator())
- return VisitTerminator(term, N, srcBlk, BE->getDst(), BR, BRC);
+ if (Optional<BlockEdge> BE = ProgPoint.getAs<BlockEdge>()) {
+ const CFGBlock *SrcBlock = BE->getSrc();
+ if (const Stmt *Term = SrcBlock->getTerminatorStmt()) {
+ // If the tag of the previous node is 'Eagerly Assume...' the current
+ // 'BlockEdge' has the same constraint information. We do not want to
+ // report the value as it is just an assumption on the predecessor node
+ // which will be caught in the next VisitNode() iteration as a 'PostStmt'.
+ const ProgramPointTag *PreviousNodeTag =
+ N->getFirstPred()->getLocation().getTag();
+ if (PreviousNodeTag == Tags.first || PreviousNodeTag == Tags.second)
+ return nullptr;
+
+ return VisitTerminator(Term, N, SrcBlock, BE->getDst(), BR, BRC);
+ }
return nullptr;
}
- if (Optional<PostStmt> PS = progPoint.getAs<PostStmt>()) {
- const std::pair<const ProgramPointTag *, const ProgramPointTag *> &tags =
- ExprEngine::geteagerlyAssumeBinOpBifurcationTags();
-
- const ProgramPointTag *tag = PS->getTag();
- if (tag == tags.first)
- return VisitTrueTest(cast<Expr>(PS->getStmt()), true,
- BRC, BR, N);
- if (tag == tags.second)
- return VisitTrueTest(cast<Expr>(PS->getStmt()), false,
- BRC, BR, N);
+ if (Optional<PostStmt> PS = ProgPoint.getAs<PostStmt>()) {
+ const ProgramPointTag *CurrentNodeTag = PS->getTag();
+ if (CurrentNodeTag != Tags.first && CurrentNodeTag != Tags.second)
+ return nullptr;
- return nullptr;
+ bool TookTrue = CurrentNodeTag == Tags.first;
+ return VisitTrueTest(cast<Expr>(PS->getStmt()), BRC, BR, N, TookTrue);
}
return nullptr;
@@ -1876,6 +2153,8 @@ std::shared_ptr<PathDiagnosticPiece> ConditionBRVisitor::VisitTerminator(
break;
}
+ Cond = Cond->IgnoreParens();
+
// However, when we encounter a logical operator as a branch condition,
// then the condition is actually its RHS, because LHS would be
// the condition for the logical operator terminator.
@@ -1887,18 +2166,30 @@ std::shared_ptr<PathDiagnosticPiece> ConditionBRVisitor::VisitTerminator(
assert(Cond);
assert(srcBlk->succ_size() == 2);
- const bool tookTrue = *(srcBlk->succ_begin()) == dstBlk;
- return VisitTrueTest(Cond, tookTrue, BRC, R, N);
+ const bool TookTrue = *(srcBlk->succ_begin()) == dstBlk;
+ return VisitTrueTest(Cond, BRC, R, N, TookTrue);
}
std::shared_ptr<PathDiagnosticPiece>
-ConditionBRVisitor::VisitTrueTest(const Expr *Cond, bool tookTrue,
- BugReporterContext &BRC, BugReport &R,
- const ExplodedNode *N) {
+ConditionBRVisitor::VisitTrueTest(const Expr *Cond, BugReporterContext &BRC,
+ BugReport &R, const ExplodedNode *N,
+ bool TookTrue) {
+ ProgramStateRef CurrentState = N->getState();
+ ProgramStateRef PrevState = N->getFirstPred()->getState();
+ const LocationContext *LCtx = N->getLocationContext();
+
+ // If the constraint information is changed between the current and the
+ // previous program state we assuming the newly seen constraint information.
+ // If we cannot evaluate the condition (and the constraints are the same)
+ // the analyzer has no information about the value and just assuming it.
+ bool IsAssuming =
+ !BRC.getStateManager().haveEqualConstraints(CurrentState, PrevState) ||
+ CurrentState->getSVal(Cond, LCtx).isUnknownOrUndef();
+
// These will be modified in code below, but we need to preserve the original
// values in case we want to throw the generic message.
const Expr *CondTmp = Cond;
- bool tookTrueTmp = tookTrue;
+ bool TookTrueTmp = TookTrue;
while (true) {
CondTmp = CondTmp->IgnoreParenCasts();
@@ -1907,18 +2198,23 @@ ConditionBRVisitor::VisitTrueTest(const Expr *Cond, bool tookTrue,
break;
case Stmt::BinaryOperatorClass:
if (auto P = VisitTrueTest(Cond, cast<BinaryOperator>(CondTmp),
- tookTrueTmp, BRC, R, N))
+ BRC, R, N, TookTrueTmp, IsAssuming))
return P;
break;
case Stmt::DeclRefExprClass:
if (auto P = VisitTrueTest(Cond, cast<DeclRefExpr>(CondTmp),
- tookTrueTmp, BRC, R, N))
+ BRC, R, N, TookTrueTmp, IsAssuming))
+ return P;
+ break;
+ case Stmt::MemberExprClass:
+ if (auto P = VisitTrueTest(Cond, cast<MemberExpr>(CondTmp),
+ BRC, R, N, TookTrueTmp, IsAssuming))
return P;
break;
case Stmt::UnaryOperatorClass: {
const auto *UO = cast<UnaryOperator>(CondTmp);
if (UO->getOpcode() == UO_LNot) {
- tookTrueTmp = !tookTrueTmp;
+ TookTrueTmp = !TookTrueTmp;
CondTmp = UO->getSubExpr();
continue;
}
@@ -1930,13 +2226,17 @@ ConditionBRVisitor::VisitTrueTest(const Expr *Cond, bool tookTrue,
// Condition too complex to explain? Just say something so that the user
// knew we've made some path decision at this point.
- const LocationContext *LCtx = N->getLocationContext();
+ // If it is too complex and we know the evaluation of the condition do not
+ // repeat the note from 'BugReporter.cpp'
+ if (!IsAssuming)
+ return nullptr;
+
PathDiagnosticLocation Loc(Cond, BRC.getSourceManager(), LCtx);
if (!Loc.isValid() || !Loc.asLocation().isValid())
return nullptr;
return std::make_shared<PathDiagnosticEventPiece>(
- Loc, tookTrue ? GenericTrueMessage : GenericFalseMessage);
+ Loc, TookTrue ? GenericTrueMessage : GenericFalseMessage);
}
bool ConditionBRVisitor::patternMatch(const Expr *Ex,
@@ -1945,47 +2245,27 @@ bool ConditionBRVisitor::patternMatch(const Expr *Ex,
BugReporterContext &BRC,
BugReport &report,
const ExplodedNode *N,
- Optional<bool> &prunable) {
+ Optional<bool> &prunable,
+ bool IsSameFieldName) {
const Expr *OriginalExpr = Ex;
Ex = Ex->IgnoreParenCasts();
- // Use heuristics to determine if Ex is a macro expending to a literal and
- // if so, use the macro's name.
- SourceLocation LocStart = Ex->getBeginLoc();
- SourceLocation LocEnd = Ex->getEndLoc();
- if (LocStart.isMacroID() && LocEnd.isMacroID() &&
- (isa<GNUNullExpr>(Ex) ||
- isa<ObjCBoolLiteralExpr>(Ex) ||
- isa<CXXBoolLiteralExpr>(Ex) ||
- isa<IntegerLiteral>(Ex) ||
- isa<FloatingLiteral>(Ex))) {
- StringRef StartName = Lexer::getImmediateMacroNameForDiagnostics(LocStart,
- BRC.getSourceManager(), BRC.getASTContext().getLangOpts());
- StringRef EndName = Lexer::getImmediateMacroNameForDiagnostics(LocEnd,
- BRC.getSourceManager(), BRC.getASTContext().getLangOpts());
- bool beginAndEndAreTheSameMacro = StartName.equals(EndName);
-
- bool partOfParentMacro = false;
- if (ParentEx->getBeginLoc().isMacroID()) {
- StringRef PName = Lexer::getImmediateMacroNameForDiagnostics(
- ParentEx->getBeginLoc(), BRC.getSourceManager(),
- BRC.getASTContext().getLangOpts());
- partOfParentMacro = PName.equals(StartName);
- }
-
- if (beginAndEndAreTheSameMacro && !partOfParentMacro ) {
- // Get the location of the macro name as written by the caller.
- SourceLocation Loc = LocStart;
- while (LocStart.isMacroID()) {
- Loc = LocStart;
- LocStart = BRC.getSourceManager().getImmediateMacroCallerLoc(LocStart);
+ if (isa<GNUNullExpr>(Ex) || isa<ObjCBoolLiteralExpr>(Ex) ||
+ isa<CXXBoolLiteralExpr>(Ex) || isa<IntegerLiteral>(Ex) ||
+ isa<FloatingLiteral>(Ex)) {
+ // Use heuristics to determine if the expression is a macro
+ // expanding to a literal and if so, use the macro's name.
+ SourceLocation BeginLoc = OriginalExpr->getBeginLoc();
+ SourceLocation EndLoc = OriginalExpr->getEndLoc();
+ if (BeginLoc.isMacroID() && EndLoc.isMacroID()) {
+ SourceManager &SM = BRC.getSourceManager();
+ const LangOptions &LO = BRC.getASTContext().getLangOpts();
+ if (Lexer::isAtStartOfMacroExpansion(BeginLoc, SM, LO) &&
+ Lexer::isAtEndOfMacroExpansion(EndLoc, SM, LO)) {
+ CharSourceRange R = Lexer::getAsCharRange({BeginLoc, EndLoc}, SM, LO);
+ Out << Lexer::getSourceText(R, SM, LO);
+ return false;
}
- StringRef MacroName = Lexer::getImmediateMacroNameForDiagnostics(
- Loc, BRC.getSourceManager(), BRC.getASTContext().getLangOpts());
-
- // Return the macro name.
- Out << MacroName;
- return false;
}
}
@@ -2032,23 +2312,43 @@ bool ConditionBRVisitor::patternMatch(const Expr *Ex,
return false;
}
+ if (const auto *ME = dyn_cast<MemberExpr>(Ex)) {
+ if (!IsSameFieldName)
+ Out << "field '" << ME->getMemberDecl()->getName() << '\'';
+ else
+ Out << '\''
+ << Lexer::getSourceText(
+ CharSourceRange::getTokenRange(Ex->getSourceRange()),
+ BRC.getSourceManager(), BRC.getASTContext().getLangOpts(), 0)
+ << '\'';
+ }
+
return false;
}
-std::shared_ptr<PathDiagnosticPiece>
-ConditionBRVisitor::VisitTrueTest(const Expr *Cond, const BinaryOperator *BExpr,
- const bool tookTrue, BugReporterContext &BRC,
- BugReport &R, const ExplodedNode *N) {
+std::shared_ptr<PathDiagnosticPiece> ConditionBRVisitor::VisitTrueTest(
+ const Expr *Cond, const BinaryOperator *BExpr, BugReporterContext &BRC,
+ BugReport &R, const ExplodedNode *N, bool TookTrue, bool IsAssuming) {
bool shouldInvert = false;
Optional<bool> shouldPrune;
+ // Check if the field name of the MemberExprs is ambiguous. Example:
+ // " 'a.d' is equal to 'h.d' " in 'test/Analysis/null-deref-path-notes.cpp'.
+ bool IsSameFieldName = false;
+ if (const auto *LhsME =
+ dyn_cast<MemberExpr>(BExpr->getLHS()->IgnoreParenCasts()))
+ if (const auto *RhsME =
+ dyn_cast<MemberExpr>(BExpr->getRHS()->IgnoreParenCasts()))
+ IsSameFieldName = LhsME->getMemberDecl()->getName() ==
+ RhsME->getMemberDecl()->getName();
+
SmallString<128> LhsString, RhsString;
{
llvm::raw_svector_ostream OutLHS(LhsString), OutRHS(RhsString);
- const bool isVarLHS = patternMatch(BExpr->getLHS(), BExpr, OutLHS,
- BRC, R, N, shouldPrune);
- const bool isVarRHS = patternMatch(BExpr->getRHS(), BExpr, OutRHS,
- BRC, R, N, shouldPrune);
+ const bool isVarLHS = patternMatch(BExpr->getLHS(), BExpr, OutLHS, BRC, R,
+ N, shouldPrune, IsSameFieldName);
+ const bool isVarRHS = patternMatch(BExpr->getRHS(), BExpr, OutRHS, BRC, R,
+ N, shouldPrune, IsSameFieldName);
shouldInvert = !isVarLHS && isVarRHS;
}
@@ -2058,8 +2358,8 @@ ConditionBRVisitor::VisitTrueTest(const Expr *Cond, const BinaryOperator *BExpr,
if (BinaryOperator::isAssignmentOp(Op)) {
// For assignment operators, all that we care about is that the LHS
// evaluates to "true" or "false".
- return VisitConditionVariable(LhsString, BExpr->getLHS(), tookTrue,
- BRC, R, N);
+ return VisitConditionVariable(LhsString, BExpr->getLHS(), BRC, R, N,
+ TookTrue);
}
// For non-assignment operations, we require that we can understand
@@ -2071,7 +2371,8 @@ ConditionBRVisitor::VisitTrueTest(const Expr *Cond, const BinaryOperator *BExpr,
// Should we invert the strings if the LHS is not a variable name?
SmallString<256> buf;
llvm::raw_svector_ostream Out(buf);
- Out << "Assuming " << (shouldInvert ? RhsString : LhsString) << " is ";
+ Out << (IsAssuming ? "Assuming " : "")
+ << (shouldInvert ? RhsString : LhsString) << " is ";
// Do we need to invert the opcode?
if (shouldInvert)
@@ -2083,7 +2384,7 @@ ConditionBRVisitor::VisitTrueTest(const Expr *Cond, const BinaryOperator *BExpr,
case BO_GE: Op = BO_LE; break;
}
- if (!tookTrue)
+ if (!TookTrue)
switch (Op) {
case BO_EQ: Op = BO_NE; break;
case BO_NE: Op = BO_EQ; break;
@@ -2110,15 +2411,24 @@ ConditionBRVisitor::VisitTrueTest(const Expr *Cond, const BinaryOperator *BExpr,
Out << (shouldInvert ? LhsString : RhsString);
const LocationContext *LCtx = N->getLocationContext();
PathDiagnosticLocation Loc(Cond, BRC.getSourceManager(), LCtx);
- auto event = std::make_shared<PathDiagnosticEventPiece>(Loc, Out.str());
+
+ // Convert 'field ...' to 'Field ...' if it is a MemberExpr.
+ std::string Message = Out.str();
+ Message[0] = toupper(Message[0]);
+
+ // If we know the value create a pop-up note.
+ if (!IsAssuming)
+ return std::make_shared<PathDiagnosticPopUpPiece>(Loc, Message);
+
+ auto event = std::make_shared<PathDiagnosticEventPiece>(Loc, Message);
if (shouldPrune.hasValue())
event->setPrunable(shouldPrune.getValue());
return event;
}
std::shared_ptr<PathDiagnosticPiece> ConditionBRVisitor::VisitConditionVariable(
- StringRef LhsString, const Expr *CondVarExpr, const bool tookTrue,
- BugReporterContext &BRC, BugReport &report, const ExplodedNode *N) {
+ StringRef LhsString, const Expr *CondVarExpr, BugReporterContext &BRC,
+ BugReport &report, const ExplodedNode *N, bool TookTrue) {
// FIXME: If there's already a constraint tracker for this variable,
// we shouldn't emit anything here (c.f. the double note in
// test/Analysis/inlining/path-notes.c)
@@ -2126,17 +2436,7 @@ std::shared_ptr<PathDiagnosticPiece> ConditionBRVisitor::VisitConditionVariable(
llvm::raw_svector_ostream Out(buf);
Out << "Assuming " << LhsString << " is ";
- QualType Ty = CondVarExpr->getType();
-
- if (Ty->isPointerType())
- Out << (tookTrue ? "not null" : "null");
- else if (Ty->isObjCObjectPointerType())
- Out << (tookTrue ? "not nil" : "nil");
- else if (Ty->isBooleanType())
- Out << (tookTrue ? "true" : "false");
- else if (Ty->isIntegralOrEnumerationType())
- Out << (tookTrue ? "non-zero" : "zero");
- else
+ if (!printValue(CondVarExpr, Out, N, TookTrue, /*IsAssuming=*/true))
return nullptr;
const LocationContext *LCtx = N->getLocationContext();
@@ -2156,34 +2456,29 @@ std::shared_ptr<PathDiagnosticPiece> ConditionBRVisitor::VisitConditionVariable(
return event;
}
-std::shared_ptr<PathDiagnosticPiece>
-ConditionBRVisitor::VisitTrueTest(const Expr *Cond, const DeclRefExpr *DR,
- const bool tookTrue, BugReporterContext &BRC,
- BugReport &report, const ExplodedNode *N) {
- const auto *VD = dyn_cast<VarDecl>(DR->getDecl());
+std::shared_ptr<PathDiagnosticPiece> ConditionBRVisitor::VisitTrueTest(
+ const Expr *Cond, const DeclRefExpr *DRE, BugReporterContext &BRC,
+ BugReport &report, const ExplodedNode *N, bool TookTrue, bool IsAssuming) {
+ const auto *VD = dyn_cast<VarDecl>(DRE->getDecl());
if (!VD)
return nullptr;
SmallString<256> Buf;
llvm::raw_svector_ostream Out(Buf);
- Out << "Assuming '" << VD->getDeclName() << "' is ";
+ Out << (IsAssuming ? "Assuming '" : "'") << VD->getDeclName() << "' is ";
- QualType VDTy = VD->getType();
-
- if (VDTy->isPointerType())
- Out << (tookTrue ? "non-null" : "null");
- else if (VDTy->isObjCObjectPointerType())
- Out << (tookTrue ? "non-nil" : "nil");
- else if (VDTy->isScalarType())
- Out << (tookTrue ? "not equal to 0" : "0");
- else
+ if (!printValue(DRE, Out, N, TookTrue, IsAssuming))
return nullptr;
const LocationContext *LCtx = N->getLocationContext();
PathDiagnosticLocation Loc(Cond, BRC.getSourceManager(), LCtx);
- auto event = std::make_shared<PathDiagnosticEventPiece>(Loc, Out.str());
+ // If we know the value create a pop-up note.
+ if (!IsAssuming)
+ return std::make_shared<PathDiagnosticPopUpPiece>(Loc, Out.str());
+
+ auto event = std::make_shared<PathDiagnosticEventPiece>(Loc, Out.str());
const ProgramState *state = N->getState().get();
if (const MemRegion *R = state->getLValue(VD, LCtx).getAsRegion()) {
if (report.isInteresting(R))
@@ -2197,6 +2492,67 @@ ConditionBRVisitor::VisitTrueTest(const Expr *Cond, const DeclRefExpr *DR,
return std::move(event);
}
+std::shared_ptr<PathDiagnosticPiece> ConditionBRVisitor::VisitTrueTest(
+ const Expr *Cond, const MemberExpr *ME, BugReporterContext &BRC,
+ BugReport &report, const ExplodedNode *N, bool TookTrue, bool IsAssuming) {
+ SmallString<256> Buf;
+ llvm::raw_svector_ostream Out(Buf);
+
+ Out << (IsAssuming ? "Assuming field '" : "Field '")
+ << ME->getMemberDecl()->getName() << "' is ";
+
+ if (!printValue(ME, Out, N, TookTrue, IsAssuming))
+ return nullptr;
+
+ const LocationContext *LCtx = N->getLocationContext();
+ PathDiagnosticLocation Loc(Cond, BRC.getSourceManager(), LCtx);
+ if (!Loc.isValid() || !Loc.asLocation().isValid())
+ return nullptr;
+
+ // If we know the value create a pop-up note.
+ if (!IsAssuming)
+ return std::make_shared<PathDiagnosticPopUpPiece>(Loc, Out.str());
+
+ return std::make_shared<PathDiagnosticEventPiece>(Loc, Out.str());
+}
+
+bool ConditionBRVisitor::printValue(const Expr *CondVarExpr, raw_ostream &Out,
+ const ExplodedNode *N, bool TookTrue,
+ bool IsAssuming) {
+ QualType Ty = CondVarExpr->getType();
+
+ if (Ty->isPointerType()) {
+ Out << (TookTrue ? "non-null" : "null");
+ return true;
+ }
+
+ if (Ty->isObjCObjectPointerType()) {
+ Out << (TookTrue ? "non-nil" : "nil");
+ return true;
+ }
+
+ if (!Ty->isIntegralOrEnumerationType())
+ return false;
+
+ Optional<const llvm::APSInt *> IntValue;
+ if (!IsAssuming)
+ IntValue = getConcreteIntegerValue(CondVarExpr, N);
+
+ if (IsAssuming || !IntValue.hasValue()) {
+ if (Ty->isBooleanType())
+ Out << (TookTrue ? "true" : "false");
+ else
+ Out << (TookTrue ? "not equal to 0" : "0");
+ } else {
+ if (Ty->isBooleanType())
+ Out << (IntValue.getValue()->getBoolValue() ? "true" : "false");
+ else
+ Out << *IntValue.getValue();
+ }
+
+ return true;
+}
+
const char *const ConditionBRVisitor::GenericTrueMessage =
"Assuming the condition is true";
const char *const ConditionBRVisitor::GenericFalseMessage =
@@ -2208,6 +2564,10 @@ bool ConditionBRVisitor::isPieceMessageGeneric(
Piece->getString() == GenericFalseMessage;
}
+//===----------------------------------------------------------------------===//
+// Implementation of LikelyFalsePositiveSuppressionBRVisitor.
+//===----------------------------------------------------------------------===//
+
void LikelyFalsePositiveSuppressionBRVisitor::finalizeVisitor(
BugReporterContext &BRC, const ExplodedNode *N, BugReport &BR) {
// Here we suppress false positives coming from system headers. This list is
@@ -2290,6 +2650,10 @@ void LikelyFalsePositiveSuppressionBRVisitor::finalizeVisitor(
}
}
+//===----------------------------------------------------------------------===//
+// Implementation of UndefOrNullArgVisitor.
+//===----------------------------------------------------------------------===//
+
std::shared_ptr<PathDiagnosticPiece>
UndefOrNullArgVisitor::VisitNode(const ExplodedNode *N,
BugReporterContext &BRC, BugReport &BR) {
@@ -2340,78 +2704,9 @@ UndefOrNullArgVisitor::VisitNode(const ExplodedNode *N,
return nullptr;
}
-std::shared_ptr<PathDiagnosticPiece>
-CXXSelfAssignmentBRVisitor::VisitNode(const ExplodedNode *Succ,
- BugReporterContext &BRC, BugReport &) {
- if (Satisfied)
- return nullptr;
-
- const auto Edge = Succ->getLocation().getAs<BlockEdge>();
- if (!Edge.hasValue())
- return nullptr;
-
- auto Tag = Edge->getTag();
- if (!Tag)
- return nullptr;
-
- if (Tag->getTagDescription() != "cplusplus.SelfAssignment")
- return nullptr;
-
- Satisfied = true;
-
- const auto *Met =
- dyn_cast<CXXMethodDecl>(Succ->getCodeDecl().getAsFunction());
- assert(Met && "Not a C++ method.");
- assert((Met->isCopyAssignmentOperator() || Met->isMoveAssignmentOperator()) &&
- "Not a copy/move assignment operator.");
-
- const auto *LCtx = Edge->getLocationContext();
-
- const auto &State = Succ->getState();
- auto &SVB = State->getStateManager().getSValBuilder();
-
- const auto Param =
- State->getSVal(State->getRegion(Met->getParamDecl(0), LCtx));
- const auto This =
- State->getSVal(SVB.getCXXThis(Met, LCtx->getStackFrame()));
-
- auto L = PathDiagnosticLocation::create(Met, BRC.getSourceManager());
-
- if (!L.isValid() || !L.asLocation().isValid())
- return nullptr;
-
- SmallString<256> Buf;
- llvm::raw_svector_ostream Out(Buf);
-
- Out << "Assuming " << Met->getParamDecl(0)->getName() <<
- ((Param == This) ? " == " : " != ") << "*this";
-
- auto Piece = std::make_shared<PathDiagnosticEventPiece>(L, Out.str());
- Piece->addRange(Met->getSourceRange());
-
- return std::move(Piece);
-}
-
-std::shared_ptr<PathDiagnosticPiece>
-TaintBugVisitor::VisitNode(const ExplodedNode *N,
- BugReporterContext &BRC, BugReport &) {
-
- // Find the ExplodedNode where the taint was first introduced
- if (!N->getState()->isTainted(V) || N->getFirstPred()->getState()->isTainted(V))
- return nullptr;
-
- const Stmt *S = PathDiagnosticLocation::getStmt(N);
- if (!S)
- return nullptr;
-
- const LocationContext *NCtx = N->getLocationContext();
- PathDiagnosticLocation L =
- PathDiagnosticLocation::createBegin(S, BRC.getSourceManager(), NCtx);
- if (!L.isValid() || !L.asLocation().isValid())
- return nullptr;
-
- return std::make_shared<PathDiagnosticEventPiece>(L, "Taint originated here");
-}
+//===----------------------------------------------------------------------===//
+// Implementation of FalsePositiveRefutationBRVisitor.
+//===----------------------------------------------------------------------===//
FalsePositiveRefutationBRVisitor::FalsePositiveRefutationBRVisitor()
: Constraints(ConstraintRangeTy::Factory().getEmptyMap()) {}
@@ -2422,7 +2717,7 @@ void FalsePositiveRefutationBRVisitor::finalizeVisitor(
VisitNode(EndPathNode, BRC, BR);
// Create a refutation manager
- SMTSolverRef RefutationSolver = CreateZ3Solver();
+ llvm::SMTSolverRef RefutationSolver = llvm::CreateZ3Solver();
ASTContext &Ctx = BRC.getASTContext();
// Add constraints to the solver
@@ -2430,7 +2725,7 @@ void FalsePositiveRefutationBRVisitor::finalizeVisitor(
const SymbolRef Sym = I.first;
auto RangeIt = I.second.begin();
- SMTExprRef Constraints = SMTConv::getRangeExpr(
+ llvm::SMTExprRef Constraints = SMTConv::getRangeExpr(
RefutationSolver, Ctx, Sym, RangeIt->From(), RangeIt->To(),
/*InRange=*/true);
while ((++RangeIt) != I.second.end()) {
@@ -2477,3 +2772,33 @@ void FalsePositiveRefutationBRVisitor::Profile(
static int Tag = 0;
ID.AddPointer(&Tag);
}
+
+//===----------------------------------------------------------------------===//
+// Implementation of TagVisitor.
+//===----------------------------------------------------------------------===//
+
+int NoteTag::Kind = 0;
+
+void TagVisitor::Profile(llvm::FoldingSetNodeID &ID) const {
+ static int Tag = 0;
+ ID.AddPointer(&Tag);
+}
+
+std::shared_ptr<PathDiagnosticPiece>
+TagVisitor::VisitNode(const ExplodedNode *N, BugReporterContext &BRC,
+ BugReport &R) {
+ ProgramPoint PP = N->getLocation();
+ const NoteTag *T = dyn_cast_or_null<NoteTag>(PP.getTag());
+ if (!T)
+ return nullptr;
+
+ if (Optional<std::string> Msg = T->generateMessage(BRC, R)) {
+ PathDiagnosticLocation Loc =
+ PathDiagnosticLocation::create(PP, BRC.getSourceManager());
+ auto Piece = std::make_shared<PathDiagnosticEventPiece>(Loc, *Msg);
+ Piece->setPrunable(T->isPrunable());
+ return Piece;
+ }
+
+ return nullptr;
+}
diff --git a/lib/StaticAnalyzer/Core/CallEvent.cpp b/lib/StaticAnalyzer/Core/CallEvent.cpp
index 0e7f31502e81..a5f7500e6307 100644
--- a/lib/StaticAnalyzer/Core/CallEvent.cpp
+++ b/lib/StaticAnalyzer/Core/CallEvent.cpp
@@ -1,9 +1,8 @@
//===- CallEvent.cpp - Wrapper for all function and method calls ----------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -357,20 +356,32 @@ bool CallEvent::isCalled(const CallDescription &CD) const {
// FIXME: Add ObjC Message support.
if (getKind() == CE_ObjCMessage)
return false;
+
+ const IdentifierInfo *II = getCalleeIdentifier();
+ if (!II)
+ return false;
+ const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(getDecl());
+ if (!FD)
+ return false;
+
+ if (CD.Flags & CDF_MaybeBuiltin) {
+ return CheckerContext::isCLibraryFunction(FD, CD.getFunctionName()) &&
+ (!CD.RequiredArgs || CD.RequiredArgs <= getNumArgs());
+ }
+
if (!CD.IsLookupDone) {
CD.IsLookupDone = true;
CD.II = &getState()->getStateManager().getContext().Idents.get(
CD.getFunctionName());
}
- const IdentifierInfo *II = getCalleeIdentifier();
- if (!II || II != CD.II)
+
+ if (II != CD.II)
return false;
- const Decl *D = getDecl();
// If CallDescription provides prefix names, use them to improve matching
// accuracy.
- if (CD.QualifiedName.size() > 1 && D) {
- const DeclContext *Ctx = D->getDeclContext();
+ if (CD.QualifiedName.size() > 1 && FD) {
+ const DeclContext *Ctx = FD->getDeclContext();
// See if we'll be able to match them all.
size_t NumUnmatched = CD.QualifiedName.size() - 1;
for (; Ctx && isa<NamedDecl>(Ctx); Ctx = Ctx->getParent()) {
@@ -394,8 +405,7 @@ bool CallEvent::isCalled(const CallDescription &CD) const {
return false;
}
- return (CD.RequiredArgs == CallDescription::NoArgRequirement ||
- CD.RequiredArgs == getNumArgs());
+ return (!CD.RequiredArgs || CD.RequiredArgs == getNumArgs());
}
SVal CallEvent::getArgSVal(unsigned Index) const {
@@ -756,8 +766,11 @@ RuntimeDefinition CXXInstanceCall::getRuntimeDefinition() const {
// Does the decl that we found have an implementation?
const FunctionDecl *Definition;
- if (!Result->hasBody(Definition))
+ if (!Result->hasBody(Definition)) {
+ if (!DynType.canBeASubClass())
+ return AnyFunctionCall::getRuntimeDefinition();
return {};
+ }
// We found a definition. If we're not sure that this devirtualization is
// actually what will happen at runtime, make sure to provide the region so
diff --git a/lib/StaticAnalyzer/Core/Checker.cpp b/lib/StaticAnalyzer/Core/Checker.cpp
index 72bfd84b40a3..f4e6f909d764 100644
--- a/lib/StaticAnalyzer/Core/Checker.cpp
+++ b/lib/StaticAnalyzer/Core/Checker.cpp
@@ -1,9 +1,8 @@
//== Checker.cpp - Registration mechanism for checkers -----------*- C++ -*--=//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/StaticAnalyzer/Core/CheckerContext.cpp b/lib/StaticAnalyzer/Core/CheckerContext.cpp
index 6cf931abbddd..725ff1002e29 100644
--- a/lib/StaticAnalyzer/Core/CheckerContext.cpp
+++ b/lib/StaticAnalyzer/Core/CheckerContext.cpp
@@ -1,9 +1,8 @@
//== CheckerContext.cpp - Context info for path-sensitive checkers-----------=//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/StaticAnalyzer/Core/CheckerHelpers.cpp b/lib/StaticAnalyzer/Core/CheckerHelpers.cpp
index e73a22ae3981..34cdc9db699d 100644
--- a/lib/StaticAnalyzer/Core/CheckerHelpers.cpp
+++ b/lib/StaticAnalyzer/Core/CheckerHelpers.cpp
@@ -1,9 +1,8 @@
//===---- CheckerHelpers.cpp - Helper functions for checkers ----*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/StaticAnalyzer/Core/CheckerManager.cpp b/lib/StaticAnalyzer/Core/CheckerManager.cpp
index 688c47e984cc..27d5797b4cbc 100644
--- a/lib/StaticAnalyzer/Core/CheckerManager.cpp
+++ b/lib/StaticAnalyzer/Core/CheckerManager.cpp
@@ -1,9 +1,8 @@
//===- CheckerManager.cpp - Static Analyzer Checker Manager ---------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -15,7 +14,9 @@
#include "clang/AST/DeclBase.h"
#include "clang/AST/Stmt.h"
#include "clang/Analysis/ProgramPoint.h"
+#include "clang/Basic/JsonSupport.h"
#include "clang/Basic/LLVM.h"
+#include "clang/Driver/DriverDiagnostic.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
@@ -59,6 +60,15 @@ void CheckerManager::finishedCheckerRegistration() {
#endif
}
+void CheckerManager::reportInvalidCheckerOptionValue(
+ const CheckerBase *C, StringRef OptionName, StringRef ExpectedValueDesc) {
+
+ Context.getDiagnostics()
+ .Report(diag::err_analyzer_checker_option_invalid_input)
+ << (llvm::Twine() + C->getTagDescription() + ":" + OptionName).str()
+ << ExpectedValueDesc;
+}
+
//===----------------------------------------------------------------------===//
// Functions for running checkers for AST traversing..
//===----------------------------------------------------------------------===//
@@ -641,7 +651,6 @@ void CheckerManager::runCheckersForEvalCall(ExplodedNodeSet &Dst,
const ExplodedNodeSet &Src,
const CallEvent &Call,
ExprEngine &Eng) {
- const CallExpr *CE = cast<CallExpr>(Call.getOriginExpr());
for (const auto Pred : Src) {
bool anyEvaluated = false;
@@ -650,16 +659,19 @@ void CheckerManager::runCheckersForEvalCall(ExplodedNodeSet &Dst,
// Check if any of the EvalCall callbacks can evaluate the call.
for (const auto EvalCallChecker : EvalCallCheckers) {
- ProgramPoint::Kind K = ProgramPoint::PostStmtKind;
- const ProgramPoint &L =
- ProgramPoint::getProgramPoint(CE, K, Pred->getLocationContext(),
- EvalCallChecker.Checker);
+ // TODO: Support the situation when the call doesn't correspond
+ // to any Expr.
+ ProgramPoint L = ProgramPoint::getProgramPoint(
+ cast<CallExpr>(Call.getOriginExpr()),
+ ProgramPoint::PostStmtKind,
+ Pred->getLocationContext(),
+ EvalCallChecker.Checker);
bool evaluated = false;
{ // CheckerContext generates transitions(populates checkDest) on
// destruction, so introduce the scope to make sure it gets properly
// populated.
CheckerContext C(B, Eng, Pred, L);
- evaluated = EvalCallChecker(CE, C);
+ evaluated = EvalCallChecker(Call, C);
}
assert(!(evaluated && anyEvaluated)
&& "There are more than one checkers evaluating the call");
@@ -689,11 +701,73 @@ void CheckerManager::runCheckersOnEndOfTranslationUnit(
EndOfTranslationUnitChecker(TU, mgr, BR);
}
-void CheckerManager::runCheckersForPrintState(raw_ostream &Out,
- ProgramStateRef State,
- const char *NL, const char *Sep) {
- for (const auto &CheckerTag : CheckerTags)
- CheckerTag.second->printState(Out, State, NL, Sep);
+void CheckerManager::runCheckersForPrintStateJson(raw_ostream &Out,
+ ProgramStateRef State,
+ const char *NL,
+ unsigned int Space,
+ bool IsDot) const {
+ Indent(Out, Space, IsDot) << "\"checker_messages\": ";
+
+ // Create a temporary stream to see whether we have any message.
+ SmallString<1024> TempBuf;
+ llvm::raw_svector_ostream TempOut(TempBuf);
+ unsigned int InnerSpace = Space + 2;
+
+ // Create the new-line in JSON with enough space.
+ SmallString<128> NewLine;
+ llvm::raw_svector_ostream NLOut(NewLine);
+ NLOut << "\", " << NL; // Inject the ending and a new line
+ Indent(NLOut, InnerSpace, IsDot) << "\""; // then begin the next message.
+
+ ++Space;
+ bool HasMessage = false;
+
+ // Store the last CheckerTag.
+ const void *LastCT = nullptr;
+ for (const auto &CT : CheckerTags) {
+ // See whether the current checker has a message.
+ CT.second->printState(TempOut, State, /*NL=*/NewLine.c_str(), /*Sep=*/"");
+
+ if (TempBuf.empty())
+ continue;
+
+ if (!HasMessage) {
+ Out << '[' << NL;
+ HasMessage = true;
+ }
+
+ LastCT = &CT;
+ TempBuf.clear();
+ }
+
+ for (const auto &CT : CheckerTags) {
+ // See whether the current checker has a message.
+ CT.second->printState(TempOut, State, /*NL=*/NewLine.c_str(), /*Sep=*/"");
+
+ if (TempBuf.empty())
+ continue;
+
+ Indent(Out, Space, IsDot)
+ << "{ \"checker\": \"" << CT.second->getCheckName().getName()
+ << "\", \"messages\": [" << NL;
+ Indent(Out, InnerSpace, IsDot)
+ << '\"' << TempBuf.str().trim() << '\"' << NL;
+ Indent(Out, Space, IsDot) << "]}";
+
+ if (&CT != LastCT)
+ Out << ',';
+ Out << NL;
+
+ TempBuf.clear();
+ }
+
+ // It is the last element of the 'program_state' so do not add a comma.
+ if (HasMessage)
+ Indent(Out, --Space, IsDot) << "]";
+ else
+ Out << "null";
+
+ Out << NL;
}
//===----------------------------------------------------------------------===//
diff --git a/lib/StaticAnalyzer/Core/CommonBugCategories.cpp b/lib/StaticAnalyzer/Core/CommonBugCategories.cpp
index cdae3ef0116a..54501314386a 100644
--- a/lib/StaticAnalyzer/Core/CommonBugCategories.cpp
+++ b/lib/StaticAnalyzer/Core/CommonBugCategories.cpp
@@ -1,9 +1,8 @@
//=--- CommonBugCategories.cpp - Provides common issue categories -*- C++ -*-=//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/StaticAnalyzer/Core/ConstraintManager.cpp b/lib/StaticAnalyzer/Core/ConstraintManager.cpp
index ef9c44c51be4..d642c3530268 100644
--- a/lib/StaticAnalyzer/Core/ConstraintManager.cpp
+++ b/lib/StaticAnalyzer/Core/ConstraintManager.cpp
@@ -1,9 +1,8 @@
//===- ConstraintManager.cpp - Constraints on symbolic values. ------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/StaticAnalyzer/Core/CoreEngine.cpp b/lib/StaticAnalyzer/Core/CoreEngine.cpp
index 196854cb09da..94cf74de8293 100644
--- a/lib/StaticAnalyzer/Core/CoreEngine.cpp
+++ b/lib/StaticAnalyzer/Core/CoreEngine.cpp
@@ -1,9 +1,8 @@
//===- CoreEngine.cpp - Path-Sensitive Dataflow Engine --------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -217,6 +216,25 @@ void CoreEngine::HandleBlockEdge(const BlockEdge &L, ExplodedNode *Pred) {
LC->getDecl(),
LC->getCFG()->getNumBlockIDs());
+ // Display a prunable path note to the user if it's a virtual bases branch
+ // and we're taking the path that skips virtual base constructors.
+ if (L.getSrc()->getTerminator().isVirtualBaseBranch() &&
+ L.getDst() == *L.getSrc()->succ_begin()) {
+ ProgramPoint P = L.withTag(getNoteTags().makeNoteTag(
+ [](BugReporterContext &, BugReport &) -> std::string {
+ // TODO: Just call out the name of the most derived class
+ // when we know it.
+ return "Virtual base initialization skipped because "
+ "it has already been handled by the most derived class";
+ }, /*IsPrunable=*/true));
+ // Perform the transition.
+ ExplodedNodeSet Dst;
+ NodeBuilder Bldr(Pred, Dst, BuilderCtx);
+ Pred = Bldr.generateNode(P, Pred->getState(), Pred);
+ if (!Pred)
+ return;
+ }
+
// Check if we are entering the EXIT block.
if (Blk == &(L.getLocationContext()->getCFG()->getExit())) {
assert(L.getLocationContext()->getCFG()->getExit().empty() &&
@@ -276,14 +294,14 @@ void CoreEngine::HandleBlockEntrance(const BlockEntrance &L,
}
void CoreEngine::HandleBlockExit(const CFGBlock * B, ExplodedNode *Pred) {
- if (const Stmt *Term = B->getTerminator()) {
+ if (const Stmt *Term = B->getTerminatorStmt()) {
switch (Term->getStmtClass()) {
default:
llvm_unreachable("Analysis for this terminator not implemented.");
case Stmt::CXXBindTemporaryExprClass:
HandleCleanupTemporaryBranch(
- cast<CXXBindTemporaryExpr>(B->getTerminator().getStmt()), B, Pred);
+ cast<CXXBindTemporaryExpr>(Term), B, Pred);
return;
// Model static initializers.
@@ -378,9 +396,19 @@ void CoreEngine::HandleBlockExit(const CFGBlock * B, ExplodedNode *Pred) {
case Stmt::WhileStmtClass:
HandleBranch(cast<WhileStmt>(Term)->getCond(), Term, B, Pred);
return;
+
+ case Stmt::GCCAsmStmtClass:
+ assert(cast<GCCAsmStmt>(Term)->isAsmGoto() && "Encountered GCCAsmStmt without labels");
+ // TODO: Handle jumping to labels
+ return;
}
}
+ if (B->getTerminator().isVirtualBaseBranch()) {
+ HandleVirtualBaseBranch(B, Pred);
+ return;
+ }
+
assert(B->succ_size() == 1 &&
"Blocks with no terminator should have at most 1 successor.");
@@ -440,6 +468,29 @@ void CoreEngine::HandlePostStmt(const CFGBlock *B, unsigned StmtIdx,
}
}
+void CoreEngine::HandleVirtualBaseBranch(const CFGBlock *B,
+ ExplodedNode *Pred) {
+ const LocationContext *LCtx = Pred->getLocationContext();
+ if (const auto *CallerCtor = dyn_cast_or_null<CXXConstructExpr>(
+ LCtx->getStackFrame()->getCallSite())) {
+ switch (CallerCtor->getConstructionKind()) {
+ case CXXConstructExpr::CK_NonVirtualBase:
+ case CXXConstructExpr::CK_VirtualBase: {
+ BlockEdge Loc(B, *B->succ_begin(), LCtx);
+ HandleBlockEdge(Loc, Pred);
+ return;
+ }
+ default:
+ break;
+ }
+ }
+
+ // We either don't see a parent stack frame because we're in the top frame,
+ // or the parent stack frame doesn't initialize our virtual bases.
+ BlockEdge Loc(B, *(B->succ_begin() + 1), LCtx);
+ HandleBlockEdge(Loc, Pred);
+}
+
/// generateNode - Utility method to generate nodes, hook up successors,
/// and add nodes to the worklist.
void CoreEngine::generateNode(const ProgramPoint &Loc,
diff --git a/lib/StaticAnalyzer/Core/DynamicTypeMap.cpp b/lib/StaticAnalyzer/Core/DynamicTypeMap.cpp
index da7854df1def..79424452240d 100644
--- a/lib/StaticAnalyzer/Core/DynamicTypeMap.cpp
+++ b/lib/StaticAnalyzer/Core/DynamicTypeMap.cpp
@@ -1,9 +1,8 @@
//===- DynamicTypeMap.cpp - Dynamic Type Info related APIs ----------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -14,6 +13,7 @@
//===----------------------------------------------------------------------===//
#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicTypeMap.h"
+#include "clang/Basic/JsonSupport.h"
#include "clang/Basic/LLVM.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
@@ -36,7 +36,7 @@ DynamicTypeInfo getDynamicTypeInfo(ProgramStateRef State,
// Otherwise, fall back to what we know about the region.
if (const auto *TR = dyn_cast<TypedRegion>(Reg))
- return DynamicTypeInfo(TR->getLocationType(), /*CanBeSubclass=*/false);
+ return DynamicTypeInfo(TR->getLocationType(), /*CanBeSub=*/false);
if (const auto *SR = dyn_cast<SymbolicRegion>(Reg)) {
SymbolRef Sym = SR->getSymbol();
@@ -54,27 +54,38 @@ ProgramStateRef setDynamicTypeInfo(ProgramStateRef State, const MemRegion *Reg,
return NewState;
}
-void printDynamicTypeInfo(ProgramStateRef State, raw_ostream &Out,
- const char *NL, const char *Sep) {
- bool First = true;
- for (const auto &I : State->get<DynamicTypeMap>()) {
- if (First) {
- Out << NL << "Dynamic types of regions:" << NL;
- First = false;
- }
- const MemRegion *MR = I.first;
- const DynamicTypeInfo &DTI = I.second;
- Out << MR << " : ";
+void printDynamicTypeInfoJson(raw_ostream &Out, ProgramStateRef State,
+ const char *NL, unsigned int Space, bool IsDot) {
+ Indent(Out, Space, IsDot) << "\"dynamic_types\": ";
+
+ const DynamicTypeMapTy &DTM = State->get<DynamicTypeMap>();
+ if (DTM.isEmpty()) {
+ Out << "null," << NL;
+ return;
+ }
+
+ ++Space;
+ Out << '[' << NL;
+ for (DynamicTypeMapTy::iterator I = DTM.begin(); I != DTM.end(); ++I) {
+ const MemRegion *MR = I->first;
+ const DynamicTypeInfo &DTI = I->second;
+ Out << "{ \"region\": \"" << MR << "\", \"dyn_type\": ";
if (DTI.isValid()) {
- Out << DTI.getType()->getPointeeType().getAsString();
- if (DTI.canBeASubClass()) {
- Out << " (or its subclass)";
- }
+ Out << '\"' << DTI.getType()->getPointeeType().getAsString()
+ << "\", \"sub_classable\": "
+ << (DTI.canBeASubClass() ? "true" : "false");
} else {
- Out << "Invalid type info";
+ Out << "null"; // Invalid type info
}
+ Out << "}";
+
+ if (std::next(I) != DTM.end())
+ Out << ',';
Out << NL;
}
+
+ --Space;
+ Indent(Out, Space, IsDot) << "]," << NL;
}
void *ProgramStateTrait<DynamicTypeMap>::GDMIndex() {
diff --git a/lib/StaticAnalyzer/Core/Environment.cpp b/lib/StaticAnalyzer/Core/Environment.cpp
index b45f93b6dde8..551c89b04db4 100644
--- a/lib/StaticAnalyzer/Core/Environment.cpp
+++ b/lib/StaticAnalyzer/Core/Environment.cpp
@@ -1,9 +1,8 @@
//===- Environment.cpp - Map from Stmt* to Locations/Values ---------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -19,6 +18,7 @@
#include "clang/Analysis/AnalysisDeclContext.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/JsonSupport.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
@@ -200,43 +200,86 @@ EnvironmentManager::removeDeadBindings(Environment Env,
return NewEnv;
}
-void Environment::print(raw_ostream &Out, const char *NL,
- const char *Sep,
- const ASTContext &Context,
- const LocationContext *WithLC) const {
- if (ExprBindings.isEmpty())
+void Environment::printJson(raw_ostream &Out, const ASTContext &Ctx,
+ const LocationContext *LCtx, const char *NL,
+ unsigned int Space, bool IsDot) const {
+ Indent(Out, Space, IsDot) << "\"environment\": ";
+
+ if (ExprBindings.isEmpty()) {
+ Out << "null," << NL;
return;
+ }
- if (!WithLC) {
+ ++Space;
+ if (!LCtx) {
// Find the freshest location context.
llvm::SmallPtrSet<const LocationContext *, 16> FoundContexts;
- for (auto I : *this) {
+ for (const auto &I : *this) {
const LocationContext *LC = I.first.getLocationContext();
if (FoundContexts.count(LC) == 0) {
// This context is fresher than all other contexts so far.
- WithLC = LC;
+ LCtx = LC;
for (const LocationContext *LCI = LC; LCI; LCI = LCI->getParent())
FoundContexts.insert(LCI);
}
}
}
- assert(WithLC);
+ assert(LCtx);
+
+ Out << "{ \"pointer\": \"" << (const void *)LCtx->getStackFrame()
+ << "\", \"items\": [" << NL;
+ PrintingPolicy PP = Ctx.getPrintingPolicy();
- PrintingPolicy PP = Context.getPrintingPolicy();
+ LCtx->printJson(Out, NL, Space, IsDot, [&](const LocationContext *LC) {
+ // LCtx items begin
+ bool HasItem = false;
+ unsigned int InnerSpace = Space + 1;
- Out << NL << "Expressions by stack frame:" << NL;
- WithLC->dumpStack(Out, "", NL, Sep, [&](const LocationContext *LC) {
- for (auto I : ExprBindings) {
- if (I.first.getLocationContext() != LC)
+ // Store the last ExprBinding which we will print.
+ BindingsTy::iterator LastI = ExprBindings.end();
+ for (BindingsTy::iterator I = ExprBindings.begin(); I != ExprBindings.end();
+ ++I) {
+ if (I->first.getLocationContext() != LC)
continue;
- const Stmt *S = I.first.getStmt();
+ if (!HasItem) {
+ HasItem = true;
+ Out << '[' << NL;
+ }
+
+ const Stmt *S = I->first.getStmt();
+ (void)S;
assert(S != nullptr && "Expected non-null Stmt");
- Out << "(LC" << LC->getID() << ", S" << S->getID(Context) << ") ";
- S->printPretty(Out, /*Helper=*/nullptr, PP);
- Out << " : " << I.second << NL;
+ LastI = I;
+ }
+
+ for (BindingsTy::iterator I = ExprBindings.begin(); I != ExprBindings.end();
+ ++I) {
+ if (I->first.getLocationContext() != LC)
+ continue;
+
+ const Stmt *S = I->first.getStmt();
+ Indent(Out, InnerSpace, IsDot)
+ << "{ \"stmt_id\": " << S->getID(Ctx) << ", \"pretty\": ";
+ S->printJson(Out, nullptr, PP, /*AddQuotes=*/true);
+
+ Out << ", \"value\": ";
+ I->second.printJson(Out, /*AddQuotes=*/true);
+
+ Out << " }";
+
+ if (I != LastI)
+ Out << ',';
+ Out << NL;
}
+
+ if (HasItem)
+ Indent(Out, --InnerSpace, IsDot) << ']';
+ else
+ Out << "null ";
});
+
+ Indent(Out, --Space, IsDot) << "]}," << NL;
}
diff --git a/lib/StaticAnalyzer/Core/ExplodedGraph.cpp b/lib/StaticAnalyzer/Core/ExplodedGraph.cpp
index d6bcbb96b55f..c86b1436baab 100644
--- a/lib/StaticAnalyzer/Core/ExplodedGraph.cpp
+++ b/lib/StaticAnalyzer/Core/ExplodedGraph.cpp
@@ -1,9 +1,8 @@
//===- ExplodedGraph.cpp - Local, Path-Sens. "Exploded Graph" -------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/StaticAnalyzer/Core/ExprEngine.cpp b/lib/StaticAnalyzer/Core/ExprEngine.cpp
index 151eef56fece..1fef5b3c1edd 100644
--- a/lib/StaticAnalyzer/Core/ExprEngine.cpp
+++ b/lib/StaticAnalyzer/Core/ExprEngine.cpp
@@ -1,9 +1,8 @@
//===- ExprEngine.cpp - Path-Sensitive Expression-Level Dataflow ----------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -34,6 +33,7 @@
#include "clang/Analysis/ConstructionContext.h"
#include "clang/Analysis/ProgramPoint.h"
#include "clang/Basic/IdentifierTable.h"
+#include "clang/Basic/JsonSupport.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/PrettyStackTrace.h"
@@ -142,21 +142,34 @@ public:
return getLocationContext()->getDecl()->getASTContext();
}
- void print(llvm::raw_ostream &OS, PrinterHelper *Helper, PrintingPolicy &PP) {
- OS << "(LC" << getLocationContext()->getID() << ',';
- if (const Stmt *S = getItem().getStmtOrNull())
- OS << 'S' << S->getID(getASTContext());
+ void printJson(llvm::raw_ostream &Out, PrinterHelper *Helper,
+ PrintingPolicy &PP) const {
+ const Stmt *S = getItem().getStmtOrNull();
+ const CXXCtorInitializer *I = nullptr;
+ if (!S)
+ I = getItem().getCXXCtorInitializer();
+
+ if (S)
+ Out << "\"stmt_id\": " << S->getID(getASTContext());
else
- OS << 'I' << getItem().getCXXCtorInitializer()->getID(getASTContext());
- OS << ',' << getItem().getKindAsString();
+ Out << "\"init_id\": " << I->getID(getASTContext());
+
+ // Kind
+ Out << ", \"kind\": \"" << getItem().getKindAsString()
+ << "\", \"argument_index\": ";
+
if (getItem().getKind() == ConstructionContextItem::ArgumentKind)
- OS << " #" << getItem().getIndex();
- OS << ") ";
- if (const Stmt *S = getItem().getStmtOrNull()) {
- S->printPretty(OS, Helper, PP);
+ Out << getItem().getIndex();
+ else
+ Out << "null";
+
+ // Pretty-print
+ Out << ", \"pretty\": ";
+
+ if (S) {
+ S->printJson(Out, Helper, PP, /*AddQuotes=*/true);
} else {
- const CXXCtorInitializer *I = getItem().getCXXCtorInitializer();
- OS << I->getAnyMember()->getNameAsString();
+ Out << '\"' << I->getAnyMember()->getNameAsString() << '\"';
}
}
@@ -198,9 +211,13 @@ ExprEngine::ExprEngine(cross_tu::CrossTranslationUnitContext &CTU,
mgr.getConstraintManagerCreator(), G.getAllocator(),
this),
SymMgr(StateMgr.getSymbolManager()),
- svalBuilder(StateMgr.getSValBuilder()), ObjCNoRet(mgr.getASTContext()),
+ MRMgr(StateMgr.getRegionManager()),
+ svalBuilder(StateMgr.getSValBuilder()),
+ ObjCNoRet(mgr.getASTContext()),
BR(mgr, *this),
- VisitedCallees(VisitedCalleesIn), HowToInline(HowToInlineIn) {
+ VisitedCallees(VisitedCalleesIn),
+ HowToInline(HowToInlineIn)
+ {
unsigned TrimInterval = mgr.options.GraphTrimInterval;
if (TrimInterval != 0) {
// Enable eager node reclamation when constructing the ExplodedGraph.
@@ -208,10 +225,6 @@ ExprEngine::ExprEngine(cross_tu::CrossTranslationUnitContext &CTU,
}
}
-ExprEngine::~ExprEngine() {
- BR.FlushReports();
-}
-
//===----------------------------------------------------------------------===//
// Utility methods.
//===----------------------------------------------------------------------===//
@@ -538,36 +551,73 @@ ExprEngine::processRegionChanges(ProgramStateRef state,
LCtx, Call);
}
-static void printObjectsUnderConstructionForContext(raw_ostream &Out,
- ProgramStateRef State,
- const char *NL,
- const LocationContext *LC) {
+static void
+printObjectsUnderConstructionJson(raw_ostream &Out, ProgramStateRef State,
+ const char *NL, const LocationContext *LCtx,
+ unsigned int Space = 0, bool IsDot = false) {
PrintingPolicy PP =
- LC->getAnalysisDeclContext()->getASTContext().getPrintingPolicy();
- for (auto I : State->get<ObjectsUnderConstruction>()) {
- ConstructedObjectKey Key = I.first;
+ LCtx->getAnalysisDeclContext()->getASTContext().getPrintingPolicy();
+
+ ++Space;
+ bool HasItem = false;
+
+ // Store the last key.
+ const ConstructedObjectKey *LastKey = nullptr;
+ for (const auto &I : State->get<ObjectsUnderConstruction>()) {
+ const ConstructedObjectKey &Key = I.first;
+ if (Key.getLocationContext() != LCtx)
+ continue;
+
+ if (!HasItem) {
+ Out << "[" << NL;
+ HasItem = true;
+ }
+
+ LastKey = &Key;
+ }
+
+ for (const auto &I : State->get<ObjectsUnderConstruction>()) {
+ const ConstructedObjectKey &Key = I.first;
SVal Value = I.second;
- if (Key.getLocationContext() != LC)
+ if (Key.getLocationContext() != LCtx)
continue;
- Key.print(Out, nullptr, PP);
- Out << " : " << Value << NL;
+
+ Indent(Out, Space, IsDot) << "{ ";
+ Key.printJson(Out, nullptr, PP);
+ Out << ", \"value\": \"" << Value << "\" }";
+
+ if (&Key != LastKey)
+ Out << ',';
+ Out << NL;
+ }
+
+ if (HasItem)
+ Indent(Out, --Space, IsDot) << ']'; // End of "location_context".
+ else {
+ Out << "null ";
}
}
-void ExprEngine::printState(raw_ostream &Out, ProgramStateRef State,
- const char *NL, const char *Sep,
- const LocationContext *LCtx) {
- if (LCtx) {
- if (!State->get<ObjectsUnderConstruction>().isEmpty()) {
- Out << Sep << "Objects under construction:" << NL;
+void ExprEngine::printJson(raw_ostream &Out, ProgramStateRef State,
+ const LocationContext *LCtx, const char *NL,
+ unsigned int Space, bool IsDot) const {
+ Indent(Out, Space, IsDot) << "\"constructing_objects\": ";
- LCtx->dumpStack(Out, "", NL, Sep, [&](const LocationContext *LC) {
- printObjectsUnderConstructionForContext(Out, State, NL, LC);
- });
- }
+ if (LCtx && !State->get<ObjectsUnderConstruction>().isEmpty()) {
+ ++Space;
+ Out << '[' << NL;
+ LCtx->printJson(Out, NL, Space, IsDot, [&](const LocationContext *LC) {
+ printObjectsUnderConstructionJson(Out, State, NL, LC, Space, IsDot);
+ });
+
+ --Space;
+ Indent(Out, Space, IsDot) << "]," << NL; // End of "constructing_objects".
+ } else {
+ Out << "null," << NL;
}
- getCheckerManager().runCheckersForPrintState(Out, State, NL, Sep);
+ getCheckerManager().runCheckersForPrintStateJson(Out, State, NL, Space,
+ IsDot);
}
void ExprEngine::processEndWorklist() {
@@ -1338,6 +1388,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::NoInitExprClass:
case Stmt::SizeOfPackExprClass:
case Stmt::StringLiteralClass:
+ case Stmt::SourceLocExprClass:
case Stmt::ObjCStringLiteralClass:
case Stmt::CXXPseudoDestructorExprClass:
case Stmt::SubstNonTypeTemplateParmExprClass:
@@ -1517,7 +1568,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
ProgramStateRef NewState =
createTemporaryRegionIfNeeded(State, LCtx, OCE->getArg(0));
if (NewState != State) {
- Pred = Bldr.generateNode(OCE, Pred, NewState, /*Tag=*/nullptr,
+ Pred = Bldr.generateNode(OCE, Pred, NewState, /*tag=*/nullptr,
ProgramPoint::PreStmtKind);
// Did we cache out?
if (!Pred)
@@ -1636,6 +1687,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::CXXReinterpretCastExprClass:
case Stmt::CXXConstCastExprClass:
case Stmt::CXXFunctionalCastExprClass:
+ case Stmt::BuiltinBitCastExprClass:
case Stmt::ObjCBridgedCastExprClass: {
Bldr.takeNodes(Pred);
const auto *C = cast<CastExpr>(S);
@@ -1858,7 +1910,7 @@ void ExprEngine::processCFGBlockEntrance(const BlockEdge &L,
// other constraints) then consider completely unrolling it.
if(AMgr.options.ShouldUnrollLoops) {
unsigned maxBlockVisitOnPath = AMgr.options.maxBlockVisitOnPath;
- const Stmt *Term = nodeBuilder.getContext().getBlock()->getTerminator();
+ const Stmt *Term = nodeBuilder.getContext().getBlock()->getTerminatorStmt();
if (Term) {
ProgramStateRef NewState = updateLoopStack(Term, AMgr.getASTContext(),
Pred, maxBlockVisitOnPath);
@@ -1879,7 +1931,7 @@ void ExprEngine::processCFGBlockEntrance(const BlockEdge &L,
unsigned int BlockCount = nodeBuilder.getContext().blockCount();
if (BlockCount == AMgr.options.maxBlockVisitOnPath - 1 &&
AMgr.options.ShouldWidenLoops) {
- const Stmt *Term = nodeBuilder.getContext().getBlock()->getTerminator();
+ const Stmt *Term = nodeBuilder.getContext().getBlock()->getTerminatorStmt();
if (!(Term &&
(isa<ForStmt>(Term) || isa<WhileStmt>(Term) || isa<DoStmt>(Term))))
return;
@@ -2004,8 +2056,8 @@ static const Stmt *ResolveCondition(const Stmt *Condition,
if (!BO || !BO->isLogicalOp())
return Condition;
- assert(!B->getTerminator().isTemporaryDtorsBranch() &&
- "Temporary destructor branches handled by processBindTemporary.");
+ assert(B->getTerminator().isStmtBranch() &&
+ "Other kinds of branches are handled separately!");
// For logical operations, we still have the case where some branches
// use the traditional "merge" approach and others sink the branch
@@ -2258,7 +2310,6 @@ void ExprEngine::processEndOfFunction(NodeBuilderContext& BC,
Pred->getStackFrame()->getParent()));
PrettyStackTraceLocationContext CrashInfo(Pred->getLocationContext());
- StateMgr.EndPath(Pred->getState());
ExplodedNodeSet Dst;
if (Pred->getLocationContext()->inTopFrame()) {
@@ -2620,43 +2671,39 @@ void ExprEngine::VisitAtomicExpr(const AtomicExpr *AE, ExplodedNode *Pred,
getCheckerManager().runCheckersForPostStmt(Dst, AfterInvalidateSet, AE, *this);
}
-// A value escapes in three possible cases:
+// A value escapes in four possible cases:
// (1) We are binding to something that is not a memory region.
-// (2) We are binding to a MemrRegion that does not have stack storage.
-// (3) We are binding to a MemRegion with stack storage that the store
+// (2) We are binding to a MemRegion that does not have stack storage.
+// (3) We are binding to a top-level parameter region with a non-trivial
+// destructor. We won't see the destructor during analysis, but it's there.
+// (4) We are binding to a MemRegion with stack storage that the store
// does not understand.
-ProgramStateRef ExprEngine::processPointerEscapedOnBind(ProgramStateRef State,
- SVal Loc,
- SVal Val,
- const LocationContext *LCtx) {
- // Are we storing to something that causes the value to "escape"?
- bool escapes = true;
-
- // TODO: Move to StoreManager.
- if (Optional<loc::MemRegionVal> regionLoc = Loc.getAs<loc::MemRegionVal>()) {
- escapes = !regionLoc->getRegion()->hasStackStorage();
-
- if (!escapes) {
- // To test (3), generate a new state with the binding added. If it is
- // the same state, then it escapes (since the store cannot represent
- // the binding).
- // Do this only if we know that the store is not supposed to generate the
- // same state.
- SVal StoredVal = State->getSVal(regionLoc->getRegion());
- if (StoredVal != Val)
- escapes = (State == (State->bindLoc(*regionLoc, Val, LCtx)));
- }
- }
-
- // If our store can represent the binding and we aren't storing to something
- // that doesn't have local storage then just return and have the simulation
- // state continue as is.
- if (!escapes)
- return State;
+ProgramStateRef
+ExprEngine::processPointerEscapedOnBind(ProgramStateRef State, SVal Loc,
+ SVal Val, const LocationContext *LCtx) {
+
+ // Cases (1) and (2).
+ const MemRegion *MR = Loc.getAsRegion();
+ if (!MR || !MR->hasStackStorage())
+ return escapeValue(State, Val, PSK_EscapeOnBind);
+
+ // Case (3).
+ if (const auto *VR = dyn_cast<VarRegion>(MR->getBaseRegion()))
+ if (VR->hasStackParametersStorage() && VR->getStackFrame()->inTopFrame())
+ if (const auto *RD = VR->getValueType()->getAsCXXRecordDecl())
+ if (!RD->hasTrivialDestructor())
+ return escapeValue(State, Val, PSK_EscapeOnBind);
+
+ // Case (4): in order to test that, generate a new state with the binding
+ // added. If it is the same state, then it escapes (since the store cannot
+ // represent the binding).
+ // Do this only if we know that the store is not supposed to generate the
+ // same state.
+ SVal StoredVal = State->getSVal(MR);
+ if (StoredVal != Val)
+ if (State == (State->bindLoc(loc::MemRegionVal(MR), Val, LCtx)))
+ return escapeValue(State, Val, PSK_EscapeOnBind);
- // Otherwise, find all symbols referenced by 'val' that we are tracking
- // and stop tracking them.
- State = escapeValue(State, Val, PSK_EscapeOnBind);
return State;
}
@@ -2959,7 +3006,8 @@ struct DOTGraphTraits<ExplodedGraph*> : public DefaultDOTGraphTraits {
for (const auto &EQ : EQClasses) {
for (const BugReport &Report : EQ) {
- if (Report.getErrorNode() == N)
+ if (Report.getErrorNode()->getState() == N->getState() &&
+ Report.getErrorNode()->getLocation() == N->getLocation())
return true;
}
}
@@ -2995,57 +3043,63 @@ struct DOTGraphTraits<ExplodedGraph*> : public DefaultDOTGraphTraits {
return false;
}
- static std::string getNodeAttributes(const ExplodedNode *N,
- ExplodedGraph *) {
- SmallVector<StringRef, 10> Out;
- auto Noop = [](const ExplodedNode*){};
- if (traverseHiddenNodes(N, Noop, Noop, &nodeHasBugReport)) {
- Out.push_back("style=filled");
- Out.push_back("fillcolor=red");
- }
-
- if (traverseHiddenNodes(N, Noop, Noop,
- [](const ExplodedNode *C) { return C->isSink(); }))
- Out.push_back("color=blue");
- return llvm::join(Out, ",");
- }
-
static bool isNodeHidden(const ExplodedNode *N) {
return N->isTrivial();
}
static std::string getNodeLabel(const ExplodedNode *N, ExplodedGraph *G){
- std::string sbuf;
- llvm::raw_string_ostream Out(sbuf);
+ std::string Buf;
+ llvm::raw_string_ostream Out(Buf);
+ const bool IsDot = true;
+ const unsigned int Space = 1;
ProgramStateRef State = N->getState();
+ auto Noop = [](const ExplodedNode*){};
+ bool HasReport = traverseHiddenNodes(
+ N, Noop, Noop, &nodeHasBugReport);
+ bool IsSink = traverseHiddenNodes(
+ N, Noop, Noop, [](const ExplodedNode *N) { return N->isSink(); });
+
+ Out << "{ \"node_id\": " << N->getID(G) << ", \"pointer\": \""
+ << (const void *)N << "\", \"state_id\": " << State->getID()
+ << ", \"has_report\": " << (HasReport ? "true" : "false")
+ << ", \"is_sink\": " << (IsSink ? "true" : "false")
+ << ",\\l";
+
+ Indent(Out, Space, IsDot) << "\"program_points\": [\\l";
+
// Dump program point for all the previously skipped nodes.
traverseHiddenNodes(
N,
[&](const ExplodedNode *OtherNode) {
- OtherNode->getLocation().print(/*CR=*/"\\l", Out);
+ Indent(Out, Space + 1, IsDot) << "{ ";
+ OtherNode->getLocation().printJson(Out, /*NL=*/"\\l");
+ Out << ", \"tag\": ";
if (const ProgramPointTag *Tag = OtherNode->getLocation().getTag())
- Out << "\\lTag:" << Tag->getTagDescription();
- if (N->isSink())
- Out << "\\lNode is sink\\l";
- if (nodeHasBugReport(N))
- Out << "\\lBug report attached\\l";
+ Out << '\"' << Tag->getTagDescription() << "\" }";
+ else
+ Out << "null }";
},
- [&](const ExplodedNode *) { Out << "\\l--------\\l"; },
+ // Adds a comma and a new-line between each program point.
+ [&](const ExplodedNode *) { Out << ",\\l"; },
[&](const ExplodedNode *) { return false; });
- Out << "\\l\\|";
-
- Out << "StateID: ST" << State->getID() << ", NodeID: N" << N->getID(G)
- << " <" << (const void *)N << ">\\|";
+ Out << "\\l"; // Adds a new-line to the last program point.
+ Indent(Out, Space, IsDot) << "],\\l";
bool SameAsAllPredecessors =
std::all_of(N->pred_begin(), N->pred_end(), [&](const ExplodedNode *P) {
return P->getState() == State;
});
- if (!SameAsAllPredecessors)
- State->printDOT(Out, N->getLocationContext());
+
+ if (!SameAsAllPredecessors) {
+ State->printDOT(Out, N->getLocationContext(), Space);
+ } else {
+ Indent(Out, Space, IsDot) << "\"program_state\": null";
+ }
+
+ Out << "\\l}\\l";
return Out.str();
}
};
diff --git a/lib/StaticAnalyzer/Core/ExprEngineC.cpp b/lib/StaticAnalyzer/Core/ExprEngineC.cpp
index b980628878e9..f436650fbdd9 100644
--- a/lib/StaticAnalyzer/Core/ExprEngineC.cpp
+++ b/lib/StaticAnalyzer/Core/ExprEngineC.cpp
@@ -1,9 +1,8 @@
//=-- ExprEngineC.cpp - ExprEngine support for C expressions ----*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -101,6 +100,10 @@ void ExprEngine::VisitBinaryOperator(const BinaryOperator* B,
SVal Result = evalBinOp(state, Op, LeftV, RightV, B->getType());
if (!Result.isUnknown()) {
state = state->BindExpr(B, LCtx, Result);
+ } else {
+ // If we cannot evaluate the operation escape the operands.
+ state = escapeValue(state, LeftV, PSK_EscapeOther);
+ state = escapeValue(state, RightV, PSK_EscapeOther);
}
Bldr.generateNode(B, *it, state);
@@ -377,9 +380,9 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
case CK_Dependent:
case CK_ArrayToPointerDecay:
case CK_BitCast:
+ case CK_LValueToRValueBitCast:
case CK_AddressSpaceConversion:
case CK_BooleanToSignedIntegral:
- case CK_NullToPointer:
case CK_IntegralToPointer:
case CK_PointerToIntegral: {
SVal V = state->getSVal(Ex, LCtx);
@@ -416,7 +419,9 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
case CK_IntToOCLSampler:
case CK_LValueBitCast:
case CK_FixedPointCast:
- case CK_FixedPointToBoolean: {
+ case CK_FixedPointToBoolean:
+ case CK_FixedPointToIntegral:
+ case CK_IntegralToFixedPoint: {
state =
handleLValueBitCast(state, Ex, LCtx, T, ExTy, CastE, Bldr, Pred);
continue;
@@ -502,6 +507,12 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
Bldr.generateNode(CastE, Pred, state);
continue;
}
+ case CK_NullToPointer: {
+ SVal V = svalBuilder.makeNull();
+ state = state->BindExpr(CastE, LCtx, V);
+ Bldr.generateNode(CastE, Pred, state);
+ continue;
+ }
case CK_NullToMemberPointer: {
SVal V = svalBuilder.getMemberPointer(nullptr);
state = state->BindExpr(CastE, LCtx, V);
@@ -626,6 +637,21 @@ void ExprEngine::VisitDeclStmt(const DeclStmt *DS, ExplodedNode *Pred,
void ExprEngine::VisitLogicalExpr(const BinaryOperator* B, ExplodedNode *Pred,
ExplodedNodeSet &Dst) {
+ // This method acts upon CFG elements for logical operators && and ||
+ // and attaches the value (true or false) to them as expressions.
+ // It doesn't produce any state splits.
+ // If we made it that far, we're past the point when we modeled the short
+ // circuit. It means that we should have precise knowledge about whether
+ // we've short-circuited. If we did, we already know the value we need to
+ // bind. If we didn't, the value of the RHS (casted to the boolean type)
+ // is the answer.
+ // Currently this method tries to figure out whether we've short-circuited
+ // by looking at the ExplodedGraph. This method is imperfect because there
+ // could inevitably have been merges that would have resulted in multiple
+ // potential path traversal histories. We bail out when we fail.
+ // Due to this ambiguity, a more reliable solution would have been to
+ // track the short circuit operation history path-sensitively until
+ // we evaluate the respective logical operator.
assert(B->getOpcode() == BO_LAnd ||
B->getOpcode() == BO_LOr);
@@ -647,10 +673,20 @@ void ExprEngine::VisitLogicalExpr(const BinaryOperator* B, ExplodedNode *Pred,
ProgramPoint P = N->getLocation();
assert(P.getAs<PreStmt>()|| P.getAs<PreStmtPurgeDeadSymbols>());
(void) P;
- assert(N->pred_size() == 1);
+ if (N->pred_size() != 1) {
+ // We failed to track back where we came from.
+ Bldr.generateNode(B, Pred, state);
+ return;
+ }
N = *N->pred_begin();
}
- assert(N->pred_size() == 1);
+
+ if (N->pred_size() != 1) {
+ // We failed to track back where we came from.
+ Bldr.generateNode(B, Pred, state);
+ return;
+ }
+
N = *N->pred_begin();
BlockEdge BE = N->getLocation().castAs<BlockEdge>();
SVal X;
@@ -703,7 +739,7 @@ void ExprEngine::VisitInitListExpr(const InitListExpr *IE,
QualType T = getContext().getCanonicalType(IE->getType());
unsigned NumInitElements = IE->getNumInits();
- if (!IE->isGLValue() &&
+ if (!IE->isGLValue() && !IE->isTransparent() &&
(T->isArrayType() || T->isRecordType() || T->isVectorType() ||
T->isAnyComplexType())) {
llvm::ImmutableList<SVal> vals = getBasicVals().getEmptySValList();
diff --git a/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp b/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
index 6445b9df5a58..1cbd09ea5793 100644
--- a/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
+++ b/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
@@ -1,9 +1,8 @@
//===- ExprEngineCXX.cpp - ExprEngine support for C++ -----------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -197,6 +196,12 @@ std::pair<ProgramStateRef, SVal> ExprEngine::prepareForObjectConstruction(
// able to find construction context at all.
break;
}
+ if (isa<BlockInvocationContext>(CallerLCtx)) {
+ // Unwrap block invocation contexts. They're mostly part of
+ // the current stack frame.
+ CallerLCtx = CallerLCtx->getParent();
+ assert(!isa<BlockInvocationContext>(CallerLCtx));
+ }
return prepareForObjectConstruction(
cast<Expr>(SFC->getCallSite()), State, CallerLCtx,
RTC->getConstructionContext(), CallOpts);
@@ -423,25 +428,20 @@ void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *CE,
prepareForObjectConstruction(CE, State, LCtx, CC, CallOpts);
break;
}
- case CXXConstructExpr::CK_VirtualBase:
+ case CXXConstructExpr::CK_VirtualBase: {
// Make sure we are not calling virtual base class initializers twice.
// Only the most-derived object should initialize virtual base classes.
- if (const Stmt *Outer = LCtx->getStackFrame()->getCallSite()) {
- const CXXConstructExpr *OuterCtor = dyn_cast<CXXConstructExpr>(Outer);
- if (OuterCtor) {
- switch (OuterCtor->getConstructionKind()) {
- case CXXConstructExpr::CK_NonVirtualBase:
- case CXXConstructExpr::CK_VirtualBase:
- // Bail out!
- destNodes.Add(Pred);
- return;
- case CXXConstructExpr::CK_Complete:
- case CXXConstructExpr::CK_Delegating:
- break;
- }
- }
- }
+ const auto *OuterCtor = dyn_cast_or_null<CXXConstructExpr>(
+ LCtx->getStackFrame()->getCallSite());
+ assert(
+ (!OuterCtor ||
+ OuterCtor->getConstructionKind() == CXXConstructExpr::CK_Complete ||
+ OuterCtor->getConstructionKind() == CXXConstructExpr::CK_Delegating) &&
+ ("This virtual base should have already been initialized by "
+ "the most derived class!"));
+ (void)OuterCtor;
LLVM_FALLTHROUGH;
+ }
case CXXConstructExpr::CK_NonVirtualBase:
// In C++17, classes with non-virtual bases may be aggregates, so they would
// be initialized as aggregates without a constructor call, so we may have
@@ -604,6 +604,7 @@ void ExprEngine::VisitCXXDestructor(QualType ObjectType,
ExplodedNode *Pred,
ExplodedNodeSet &Dst,
const EvalCallOptions &CallOpts) {
+ assert(S && "A destructor without a trigger!");
const LocationContext *LCtx = Pred->getLocationContext();
ProgramStateRef State = Pred->getState();
@@ -611,6 +612,19 @@ void ExprEngine::VisitCXXDestructor(QualType ObjectType,
assert(RecordDecl && "Only CXXRecordDecls should have destructors");
const CXXDestructorDecl *DtorDecl = RecordDecl->getDestructor();
+ // FIXME: There should always be a Decl, otherwise the destructor call
+ // shouldn't have been added to the CFG in the first place.
+ if (!DtorDecl) {
+ // Skip the invalid destructor. We cannot simply return because
+ // it would interrupt the analysis instead.
+ static SimpleProgramPointTag T("ExprEngine", "SkipInvalidDestructor");
+ // FIXME: PostImplicitCall with a null decl may crash elsewhere anyway.
+ PostImplicitCall PP(/*Decl=*/nullptr, S->getEndLoc(), LCtx, &T);
+ NodeBuilder Bldr(Pred, Dst, *currBldrCtx);
+ Bldr.generateNode(PP, Pred->getState(), Pred);
+ return;
+ }
+
CallEventManager &CEMgr = getStateManager().getCallEventManager();
CallEventRef<CXXDestructorCall> Call =
CEMgr.getCXXDestructorCall(DtorDecl, S, Dest, IsBaseDtor, State, LCtx);
@@ -629,7 +643,6 @@ void ExprEngine::VisitCXXDestructor(QualType ObjectType,
I != E; ++I)
defaultEvalCall(Bldr, *I, *Call, CallOpts);
- ExplodedNodeSet DstPostCall;
getCheckerManager().runCheckersForPostCall(Dst, DstInvalidated,
*Call, *this);
}
diff --git a/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp b/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
index 758195d8d911..b935e3afe34b 100644
--- a/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
+++ b/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
@@ -1,9 +1,8 @@
//=-- ExprEngineCallAndReturn.cpp - Support for call/return -----*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -328,30 +327,30 @@ void ExprEngine::processCallExit(ExplodedNode *CEBNode) {
ExplodedNodeSet DstPostPostCallCallback;
getCheckerManager().runCheckersForPostCall(DstPostPostCallCallback,
CEENode, *UpdatedCall, *this,
- /*WasInlined=*/true);
+ /*wasInlined=*/true);
for (auto I : DstPostPostCallCallback) {
getCheckerManager().runCheckersForNewAllocator(
CNE,
*getObjectUnderConstruction(I->getState(), CNE,
calleeCtx->getParent()),
DstPostCall, I, *this,
- /*WasInlined=*/true);
+ /*wasInlined=*/true);
}
} else {
getCheckerManager().runCheckersForPostCall(DstPostCall, CEENode,
*UpdatedCall, *this,
- /*WasInlined=*/true);
+ /*wasInlined=*/true);
}
ExplodedNodeSet Dst;
if (const ObjCMethodCall *Msg = dyn_cast<ObjCMethodCall>(Call)) {
getCheckerManager().runCheckersForPostObjCMessage(Dst, DstPostCall, *Msg,
*this,
- /*WasInlined=*/true);
+ /*wasInlined=*/true);
} else if (CE &&
!(isa<CXXNewExpr>(CE) && // Called when visiting CXXNewExpr.
AMgr.getAnalyzerOptions().MayInlineCXXAllocator)) {
getCheckerManager().runCheckersForPostStmt(Dst, DstPostCall, CE,
- *this, /*WasInlined=*/true);
+ *this, /*wasInlined=*/true);
} else {
Dst.insert(DstPostCall);
}
@@ -365,6 +364,26 @@ void ExprEngine::processCallExit(ExplodedNode *CEBNode) {
}
}
+bool ExprEngine::isSmall(AnalysisDeclContext *ADC) const {
+ // When there are no branches in the function, it means that there's no
+ // exponential complexity introduced by inlining such function.
+ // Such functions also don't trigger various fundamental problems
+ // with our inlining mechanism, such as the problem of
+ // inlined defensive checks. Hence isLinear().
+ const CFG *Cfg = ADC->getCFG();
+ return Cfg->isLinear() || Cfg->size() <= AMgr.options.AlwaysInlineSize;
+}
+
+bool ExprEngine::isLarge(AnalysisDeclContext *ADC) const {
+ const CFG *Cfg = ADC->getCFG();
+ return Cfg->size() >= AMgr.options.MinCFGSizeTreatFunctionsAsLarge;
+}
+
+bool ExprEngine::isHuge(AnalysisDeclContext *ADC) const {
+ const CFG *Cfg = ADC->getCFG();
+ return Cfg->getNumBlockIDs() > AMgr.options.MaxInlinableSize;
+}
+
void ExprEngine::examineStackFrames(const Decl *D, const LocationContext *LCtx,
bool &IsRecursive, unsigned &StackDepth) {
IsRecursive = false;
@@ -385,8 +404,7 @@ void ExprEngine::examineStackFrames(const Decl *D, const LocationContext *LCtx,
// Do not count the small functions when determining the stack depth.
AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(DI);
- const CFG *CalleeCFG = CalleeADC->getCFG();
- if (CalleeCFG->getNumBlockIDs() > AMgr.options.AlwaysInlineSize)
+ if (!isSmall(CalleeADC))
++StackDepth;
}
LCtx = LCtx->getParent();
@@ -616,12 +634,19 @@ ProgramStateRef ExprEngine::bindReturnValue(const CallEvent &Call,
std::tie(State, Target) =
prepareForObjectConstruction(Call.getOriginExpr(), State, LCtx,
RTC->getConstructionContext(), CallOpts);
- assert(Target.getAsRegion());
- // Invalidate the region so that it didn't look uninitialized. Don't notify
- // the checkers.
- State = State->invalidateRegions(Target.getAsRegion(), E, Count, LCtx,
- /* CausedByPointerEscape=*/false, nullptr,
- &Call, nullptr);
+ const MemRegion *TargetR = Target.getAsRegion();
+ assert(TargetR);
+ // Invalidate the region so that it didn't look uninitialized. If this is
+ // a field or element constructor, we do not want to invalidate
+ // the whole structure. Pointer escape is meaningless because
+ // the structure is a product of conservative evaluation
+ // and therefore contains nothing interesting at this point.
+ RegionAndSymbolInvalidationTraits ITraits;
+ ITraits.setTrait(TargetR,
+ RegionAndSymbolInvalidationTraits::TK_DoNotInvalidateSuperRegion);
+ State = State->invalidateRegions(TargetR, E, Count, LCtx,
+ /* CausesPointerEscape=*/false, nullptr,
+ &Call, &ITraits);
R = State->getSVal(Target.castAs<Loc>(), E->getType());
} else {
@@ -833,8 +858,7 @@ static bool isCXXSharedPtrDtor(const FunctionDecl *FD) {
/// This checks static properties of the function, such as its signature and
/// CFG, to determine whether the analyzer should ever consider inlining it,
/// in any context.
-static bool mayInlineDecl(AnalysisManager &AMgr,
- AnalysisDeclContext *CalleeADC) {
+bool ExprEngine::mayInlineDecl(AnalysisDeclContext *CalleeADC) const {
AnalyzerOptions &Opts = AMgr.getAnalyzerOptions();
// FIXME: Do not inline variadic calls.
if (CallEvent::isVariadic(CalleeADC->getDecl()))
@@ -879,7 +903,7 @@ static bool mayInlineDecl(AnalysisManager &AMgr,
return false;
// Do not inline large functions.
- if (CalleeCFG->getNumBlockIDs() > Opts.MaxInlinableSize)
+ if (isHuge(CalleeADC))
return false;
// It is possible that the live variables analysis cannot be
@@ -919,7 +943,7 @@ bool ExprEngine::shouldInlineCall(const CallEvent &Call, const Decl *D,
} else {
// We haven't actually checked the static properties of this function yet.
// Do that now, and record our decision in the function summaries.
- if (mayInlineDecl(getAnalysisManager(), CalleeADC)) {
+ if (mayInlineDecl(CalleeADC)) {
Engine.FunctionSummaries->markMayInline(D);
} else {
Engine.FunctionSummaries->markShouldNotInline(D);
@@ -940,29 +964,23 @@ bool ExprEngine::shouldInlineCall(const CallEvent &Call, const Decl *D,
return false;
}
- const CFG *CalleeCFG = CalleeADC->getCFG();
-
// Do not inline if recursive or we've reached max stack frame count.
bool IsRecursive = false;
unsigned StackDepth = 0;
examineStackFrames(D, Pred->getLocationContext(), IsRecursive, StackDepth);
if ((StackDepth >= Opts.InlineMaxStackDepth) &&
- ((CalleeCFG->getNumBlockIDs() > Opts.AlwaysInlineSize)
- || IsRecursive))
+ (!isSmall(CalleeADC) || IsRecursive))
return false;
// Do not inline large functions too many times.
if ((Engine.FunctionSummaries->getNumTimesInlined(D) >
Opts.MaxTimesInlineLarge) &&
- CalleeCFG->getNumBlockIDs() >=
- Opts.MinCFGSizeTreatFunctionsAsLarge) {
+ isLarge(CalleeADC)) {
NumReachedInlineCountMax++;
return false;
}
- if (HowToInline == Inline_Minimal &&
- (CalleeCFG->getNumBlockIDs() > Opts.AlwaysInlineSize
- || IsRecursive))
+ if (HowToInline == Inline_Minimal && (!isSmall(CalleeADC) || IsRecursive))
return false;
return true;
diff --git a/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp b/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp
index 6b8402f621e0..eb9a0be2e5d6 100644
--- a/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp
+++ b/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp
@@ -1,9 +1,8 @@
//=-- ExprEngineObjC.cpp - ExprEngine support for Objective-C ---*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/StaticAnalyzer/Core/FunctionSummary.cpp b/lib/StaticAnalyzer/Core/FunctionSummary.cpp
index 94edd84d15d2..2b9a45133bba 100644
--- a/lib/StaticAnalyzer/Core/FunctionSummary.cpp
+++ b/lib/StaticAnalyzer/Core/FunctionSummary.cpp
@@ -1,9 +1,8 @@
//===- FunctionSummary.cpp - Stores summaries of functions. ---------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp b/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
index fc82f1176942..64c42699fcf3 100644
--- a/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
+++ b/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
@@ -1,9 +1,8 @@
//===- HTMLDiagnostics.cpp - HTML Diagnostics for Paths -------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -91,8 +90,9 @@ public:
const PathDiagnosticMacroPiece& P,
unsigned num);
- void HandlePiece(Rewriter& R, FileID BugFileID,
- const PathDiagnosticPiece& P, unsigned num, unsigned max);
+ void HandlePiece(Rewriter &R, FileID BugFileID, const PathDiagnosticPiece &P,
+ const std::vector<SourceRange> &PopUpRanges, unsigned num,
+ unsigned max);
void HighlightRange(Rewriter& R, FileID BugFileID, SourceRange Range,
const char *HighlightStart = "<span class=\"mrange\">",
@@ -274,7 +274,7 @@ std::string HTMLDiagnostics::GenerateHTML(const PathDiagnostic& D, Rewriter &R,
std::vector<FileID> FileIDs;
for (auto I : path) {
FileID FID = I->getLocation().asLocation().getExpansionLoc().getFileID();
- if (std::find(FileIDs.begin(), FileIDs.end(), FID) != FileIDs.end())
+ if (llvm::is_contained(FileIDs, FID))
continue;
FileIDs.push_back(FID);
@@ -606,6 +606,53 @@ window.addEventListener("keydown", function (event) {
)<<<";
}
+static void
+HandlePopUpPieceStartTag(Rewriter &R,
+ const std::vector<SourceRange> &PopUpRanges) {
+ for (const auto &Range : PopUpRanges) {
+ html::HighlightRange(R, Range.getBegin(), Range.getEnd(), "",
+ "<table class='variable_popup'><tbody>",
+ /*IsTokenRange=*/false);
+ }
+}
+
+static void HandlePopUpPieceEndTag(Rewriter &R,
+ const PathDiagnosticPopUpPiece &Piece,
+ std::vector<SourceRange> &PopUpRanges,
+ unsigned int LastReportedPieceIndex,
+ unsigned int PopUpPieceIndex) {
+ SmallString<256> Buf;
+ llvm::raw_svector_ostream Out(Buf);
+
+ SourceRange Range(Piece.getLocation().asRange());
+
+ // Write out the path indices with a right arrow and the message as a row.
+ Out << "<tr><td valign='top'><div class='PathIndex PathIndexPopUp'>"
+ << LastReportedPieceIndex;
+
+ // Also annotate the state transition with extra indices.
+ Out << '.' << PopUpPieceIndex;
+
+ Out << "</div></td><td>" << Piece.getString() << "</td></tr>";
+
+ // If no report made at this range mark the variable and add the end tags.
+ if (std::find(PopUpRanges.begin(), PopUpRanges.end(), Range) ==
+ PopUpRanges.end()) {
+ // Store that we create a report at this range.
+ PopUpRanges.push_back(Range);
+
+ Out << "</tbody></table></span>";
+ html::HighlightRange(R, Range.getBegin(), Range.getEnd(),
+ "<span class='variable'>", Buf.c_str(),
+ /*IsTokenRange=*/false);
+
+ // Otherwise inject just the new row at the end of the range.
+ } else {
+ html::HighlightRange(R, Range.getBegin(), Range.getEnd(), "", Buf.c_str(),
+ /*IsTokenRange=*/false);
+ }
+}
+
void HTMLDiagnostics::RewriteFile(Rewriter &R,
const PathPieces& path, FileID FID) {
// Process the path.
@@ -616,39 +663,80 @@ void HTMLDiagnostics::RewriteFile(Rewriter &R,
[](const std::shared_ptr<PathDiagnosticPiece> &p) {
return isa<PathDiagnosticNotePiece>(*p);
});
+ unsigned PopUpPieceCount =
+ std::count_if(path.begin(), path.end(),
+ [](const std::shared_ptr<PathDiagnosticPiece> &p) {
+ return isa<PathDiagnosticPopUpPiece>(*p);
+ });
- unsigned TotalRegularPieces = TotalPieces - TotalNotePieces;
+ unsigned TotalRegularPieces = TotalPieces - TotalNotePieces - PopUpPieceCount;
unsigned NumRegularPieces = TotalRegularPieces;
unsigned NumNotePieces = TotalNotePieces;
+ // Stores the count of the regular piece indices.
+ std::map<int, int> IndexMap;
+ // Stores the different ranges where we have reported something.
+ std::vector<SourceRange> PopUpRanges;
for (auto I = path.rbegin(), E = path.rend(); I != E; ++I) {
- if (isa<PathDiagnosticNotePiece>(I->get())) {
+ const auto &Piece = *I->get();
+
+ if (isa<PathDiagnosticPopUpPiece>(Piece)) {
+ ++IndexMap[NumRegularPieces];
+ } else if (isa<PathDiagnosticNotePiece>(Piece)) {
// This adds diagnostic bubbles, but not navigation.
// Navigation through note pieces would be added later,
// as a separate pass through the piece list.
- HandlePiece(R, FID, **I, NumNotePieces, TotalNotePieces);
+ HandlePiece(R, FID, Piece, PopUpRanges, NumNotePieces, TotalNotePieces);
--NumNotePieces;
} else {
- HandlePiece(R, FID, **I, NumRegularPieces, TotalRegularPieces);
+ HandlePiece(R, FID, Piece, PopUpRanges, NumRegularPieces,
+ TotalRegularPieces);
--NumRegularPieces;
}
}
- // Add line numbers, header, footer, etc.
+ // Secondary indexing if we are having multiple pop-ups between two notes.
+ // (e.g. [(13) 'a' is 'true']; [(13.1) 'b' is 'false']; [(13.2) 'c' is...)
+ NumRegularPieces = TotalRegularPieces;
+ for (auto I = path.rbegin(), E = path.rend(); I != E; ++I) {
+ const auto &Piece = *I->get();
+
+ if (const auto *PopUpP = dyn_cast<PathDiagnosticPopUpPiece>(&Piece)) {
+ int PopUpPieceIndex = IndexMap[NumRegularPieces];
+
+ // Pop-up pieces needs the index of the last reported piece and its count
+ // how many times we report to handle multiple reports on the same range.
+ // This marks the variable, adds the </table> end tag and the message
+ // (list element) as a row. The <table> start tag will be added after the
+ // rows has been written out. Note: It stores every different range.
+ HandlePopUpPieceEndTag(R, *PopUpP, PopUpRanges, NumRegularPieces,
+ PopUpPieceIndex);
+
+ if (PopUpPieceIndex > 0)
+ --IndexMap[NumRegularPieces];
+
+ } else if (!isa<PathDiagnosticNotePiece>(Piece)) {
+ --NumRegularPieces;
+ }
+ }
+
+ // Add the <table> start tag of pop-up pieces based on the stored ranges.
+ HandlePopUpPieceStartTag(R, PopUpRanges);
+ // Add line numbers, header, footer, etc.
html::EscapeText(R, FID);
html::AddLineNumbers(R, FID);
// If we have a preprocessor, relex the file and syntax highlight.
// We might not have a preprocessor if we come from a deserialized AST file,
// for example.
-
html::SyntaxHighlight(R, FID, PP);
html::HighlightMacros(R, FID, PP);
}
-void HTMLDiagnostics::HandlePiece(Rewriter& R, FileID BugFileID,
- const PathDiagnosticPiece& P,
+void HTMLDiagnostics::HandlePiece(Rewriter &R, FileID BugFileID,
+ const PathDiagnosticPiece &P,
+ const std::vector<SourceRange> &PopUpRanges,
unsigned num, unsigned max) {
// For now, just draw a box above the line in question, and emit the
// warning.
@@ -690,9 +778,7 @@ void HTMLDiagnostics::HandlePiece(Rewriter& R, FileID BugFileID,
bool IsNote = false;
bool SuppressIndex = (max == 1);
switch (P.getKind()) {
- case PathDiagnosticPiece::Call:
- llvm_unreachable("Calls and extra notes should already be handled");
- case PathDiagnosticPiece::Event: Kind = "Event"; break;
+ case PathDiagnosticPiece::Event: Kind = "Event"; break;
case PathDiagnosticPiece::ControlFlow: Kind = "Control"; break;
// Setting Kind to "Control" is intentional.
case PathDiagnosticPiece::Macro: Kind = "Control"; break;
@@ -701,6 +787,9 @@ void HTMLDiagnostics::HandlePiece(Rewriter& R, FileID BugFileID,
IsNote = true;
SuppressIndex = true;
break;
+ case PathDiagnosticPiece::Call:
+ case PathDiagnosticPiece::PopUp:
+ llvm_unreachable("Calls and extra notes should already be handled");
}
std::string sbuf;
@@ -860,8 +949,14 @@ void HTMLDiagnostics::HandlePiece(Rewriter& R, FileID BugFileID,
// Now highlight the ranges.
ArrayRef<SourceRange> Ranges = P.getRanges();
- for (const auto &Range : Ranges)
+ for (const auto &Range : Ranges) {
+ // If we have already highlighted the range as a pop-up there is no work.
+ if (std::find(PopUpRanges.begin(), PopUpRanges.end(), Range) !=
+ PopUpRanges.end())
+ continue;
+
HighlightRange(R, LPosInfo.first, Range);
+ }
}
static void EmitAlphaCounter(raw_ostream &os, unsigned n) {
diff --git a/lib/StaticAnalyzer/Core/IssueHash.cpp b/lib/StaticAnalyzer/Core/IssueHash.cpp
index 6c55c61dd399..e7497f3fbdaa 100644
--- a/lib/StaticAnalyzer/Core/IssueHash.cpp
+++ b/lib/StaticAnalyzer/Core/IssueHash.cpp
@@ -1,9 +1,8 @@
//===---------- IssueHash.cpp - Generate identification hashes --*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "clang/StaticAnalyzer/Core/IssueHash.h"
@@ -121,7 +120,7 @@ static std::string GetEnclosingDeclContextSignature(const Decl *D) {
return "";
}
-static StringRef GetNthLineOfFile(llvm::MemoryBuffer *Buffer, int Line) {
+static StringRef GetNthLineOfFile(const llvm::MemoryBuffer *Buffer, int Line) {
if (!Buffer)
return "";
@@ -145,7 +144,7 @@ static std::string NormalizeLine(const SourceManager &SM, FullSourceLoc &L,
col++;
SourceLocation StartOfLine =
SM.translateLineCol(SM.getFileID(L), L.getExpansionLineNumber(), col);
- llvm::MemoryBuffer *Buffer =
+ const llvm::MemoryBuffer *Buffer =
SM.getBuffer(SM.getFileID(StartOfLine), StartOfLine);
if (!Buffer)
return {};
diff --git a/lib/StaticAnalyzer/Core/LoopUnrolling.cpp b/lib/StaticAnalyzer/Core/LoopUnrolling.cpp
index da4574c61515..9838249ae82c 100644
--- a/lib/StaticAnalyzer/Core/LoopUnrolling.cpp
+++ b/lib/StaticAnalyzer/Core/LoopUnrolling.cpp
@@ -1,9 +1,8 @@
//===--- LoopUnrolling.cpp - Unroll loops -----------------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
@@ -235,7 +234,7 @@ bool madeNewBranch(ExplodedNode *N, const Stmt *LoopStmt) {
ProgramPoint P = N->getLocation();
if (Optional<BlockEntrance> BE = P.getAs<BlockEntrance>())
- S = BE->getBlock()->getTerminator();
+ S = BE->getBlock()->getTerminatorStmt();
if (S == LoopStmt)
return false;
diff --git a/lib/StaticAnalyzer/Core/LoopWidening.cpp b/lib/StaticAnalyzer/Core/LoopWidening.cpp
index 8f6cb9a6b09e..9a7b1a24b819 100644
--- a/lib/StaticAnalyzer/Core/LoopWidening.cpp
+++ b/lib/StaticAnalyzer/Core/LoopWidening.cpp
@@ -1,9 +1,8 @@
//===--- LoopWidening.cpp - Widen loops -------------------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
diff --git a/lib/StaticAnalyzer/Core/MemRegion.cpp b/lib/StaticAnalyzer/Core/MemRegion.cpp
index 9a1d4d73c20b..f763701af7fb 100644
--- a/lib/StaticAnalyzer/Core/MemRegion.cpp
+++ b/lib/StaticAnalyzer/Core/MemRegion.cpp
@@ -1,9 +1,8 @@
//===- MemRegion.cpp - Abstract memory regions for static analysis --------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -845,6 +844,7 @@ getStackOrCaptureRegionForDeclContext(const LocationContext *LC,
const VarRegion* MemRegionManager::getVarRegion(const VarDecl *D,
const LocationContext *LC) {
+ D = D->getCanonicalDecl();
const MemRegion *sReg = nullptr;
if (D->hasGlobalStorage() && !D->isStaticLocal()) {
@@ -931,6 +931,7 @@ const VarRegion* MemRegionManager::getVarRegion(const VarDecl *D,
const VarRegion *MemRegionManager::getVarRegion(const VarDecl *D,
const MemRegion *superR) {
+ D = D->getCanonicalDecl();
return getSubRegion<VarRegion>(D, superR);
}
@@ -1008,6 +1009,7 @@ MemRegionManager::getElementRegion(QualType elementType, NonLoc Idx,
const FunctionCodeRegion *
MemRegionManager::getFunctionCodeRegion(const NamedDecl *FD) {
+ // To think: should we canonicalize the declaration here?
return getSubRegion<FunctionCodeRegion>(FD, getCodeRegion());
}
diff --git a/lib/StaticAnalyzer/Core/PathDiagnostic.cpp b/lib/StaticAnalyzer/Core/PathDiagnostic.cpp
index 3e93bb6a7c4f..54fbd6a5bc49 100644
--- a/lib/StaticAnalyzer/Core/PathDiagnostic.cpp
+++ b/lib/StaticAnalyzer/Core/PathDiagnostic.cpp
@@ -1,9 +1,8 @@
//===- PathDiagnostic.cpp - Path-Specific Diagnostic Handling -------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -91,6 +90,8 @@ PathDiagnosticMacroPiece::~PathDiagnosticMacroPiece() = default;
PathDiagnosticNotePiece::~PathDiagnosticNotePiece() = default;
+PathDiagnosticPopUpPiece::~PathDiagnosticPopUpPiece() = default;
+
void PathPieces::flattenTo(PathPieces &Primary, PathPieces &Current,
bool ShouldFlattenMacros) const {
for (auto &Piece : *this) {
@@ -120,6 +121,7 @@ void PathPieces::flattenTo(PathPieces &Primary, PathPieces &Current,
case PathDiagnosticPiece::Event:
case PathDiagnosticPiece::ControlFlow:
case PathDiagnosticPiece::Note:
+ case PathDiagnosticPiece::PopUp:
Current.push_back(Piece);
break;
}
@@ -370,15 +372,16 @@ static Optional<bool> comparePiece(const PathDiagnosticPiece &X,
case PathDiagnosticPiece::ControlFlow:
return compareControlFlow(cast<PathDiagnosticControlFlowPiece>(X),
cast<PathDiagnosticControlFlowPiece>(Y));
- case PathDiagnosticPiece::Event:
- case PathDiagnosticPiece::Note:
- return None;
case PathDiagnosticPiece::Macro:
return compareMacro(cast<PathDiagnosticMacroPiece>(X),
cast<PathDiagnosticMacroPiece>(Y));
case PathDiagnosticPiece::Call:
return compareCall(cast<PathDiagnosticCallPiece>(X),
cast<PathDiagnosticCallPiece>(Y));
+ case PathDiagnosticPiece::Event:
+ case PathDiagnosticPiece::Note:
+ case PathDiagnosticPiece::PopUp:
+ return None;
}
llvm_unreachable("all cases handled");
}
@@ -572,6 +575,8 @@ static SourceLocation getValidSourceLocation(const Stmt* S,
} while (!L.isValid());
}
+ // FIXME: Ironically, this assert actually fails in some cases.
+ //assert(L.isValid());
return L;
}
@@ -672,7 +677,15 @@ PathDiagnosticLocation::createConditionalColonLoc(
PathDiagnosticLocation
PathDiagnosticLocation::createMemberLoc(const MemberExpr *ME,
const SourceManager &SM) {
- return PathDiagnosticLocation(ME->getMemberLoc(), SM, SingleLocK);
+
+ assert(ME->getMemberLoc().isValid() || ME->getBeginLoc().isValid());
+
+ // In some cases, getMemberLoc isn't valid -- in this case we'll return with
+ // some other related valid SourceLocation.
+ if (ME->getMemberLoc().isValid())
+ return PathDiagnosticLocation(ME->getMemberLoc(), SM, SingleLocK);
+
+ return PathDiagnosticLocation(ME->getBeginLoc(), SM, SingleLocK);
}
PathDiagnosticLocation
@@ -715,7 +728,24 @@ PathDiagnosticLocation::create(const ProgramPoint& P,
const Stmt* S = nullptr;
if (Optional<BlockEdge> BE = P.getAs<BlockEdge>()) {
const CFGBlock *BSrc = BE->getSrc();
- S = BSrc->getTerminatorCondition();
+ if (BSrc->getTerminator().isVirtualBaseBranch()) {
+ // TODO: VirtualBaseBranches should also appear for destructors.
+ // In this case we should put the diagnostic at the end of decl.
+ return PathDiagnosticLocation::createBegin(
+ P.getLocationContext()->getDecl(), SMng);
+
+ } else {
+ S = BSrc->getTerminatorCondition();
+ if (!S) {
+ // If the BlockEdge has no terminator condition statement but its
+ // source is the entry of the CFG (e.g. a checker crated the branch at
+ // the beginning of a function), use the function's declaration instead.
+ assert(BSrc == &BSrc->getParent()->getEntry() && "CFGBlock has no "
+ "TerminatorCondition and is not the enrty block of the CFG");
+ return PathDiagnosticLocation::createBegin(
+ P.getLocationContext()->getDecl(), SMng);
+ }
+ }
} else if (Optional<StmtPoint> SP = P.getAs<StmtPoint>()) {
S = SP->getStmt();
if (P.getAs<PostStmtPurgeDeadSymbols>())
@@ -735,6 +765,12 @@ PathDiagnosticLocation::create(const ProgramPoint& P,
return getLocationForCaller(CEE->getCalleeContext(),
CEE->getLocationContext(),
SMng);
+ } else if (auto CEB = P.getAs<CallExitBegin>()) {
+ if (const ReturnStmt *RS = CEB->getReturnStmt())
+ return PathDiagnosticLocation::createBegin(RS, SMng,
+ CEB->getLocationContext());
+ return PathDiagnosticLocation(
+ CEB->getLocationContext()->getDecl()->getSourceRange().getEnd(), SMng);
} else if (Optional<BlockEntrance> BE = P.getAs<BlockEntrance>()) {
CFGElement BlockFront = BE->getBlock()->front();
if (auto StmtElt = BlockFront.getAs<CFGStmt>()) {
@@ -744,6 +780,9 @@ PathDiagnosticLocation::create(const ProgramPoint& P,
NewAllocElt->getAllocatorExpr()->getBeginLoc(), SMng);
}
llvm_unreachable("Unexpected CFG element at front of block");
+ } else if (Optional<FunctionExitPoint> FE = P.getAs<FunctionExitPoint>()) {
+ return PathDiagnosticLocation(FE->getStmt(), SMng,
+ FE->getLocationContext());
} else {
llvm_unreachable("Unexpected ProgramPoint");
}
@@ -779,7 +818,7 @@ const Stmt *PathDiagnosticLocation::getStmt(const ExplodedNode *N) {
if (auto SP = P.getAs<StmtPoint>())
return SP->getStmt();
if (auto BE = P.getAs<BlockEdge>())
- return BE->getSrc()->getTerminator();
+ return BE->getSrc()->getTerminatorStmt();
if (auto CE = P.getAs<CallEnter>())
return CE->getCallExpr();
if (auto CEE = P.getAs<CallExitEnd>())
@@ -1255,6 +1294,10 @@ void PathDiagnosticNotePiece::Profile(llvm::FoldingSetNodeID &ID) const {
PathDiagnosticSpotPiece::Profile(ID);
}
+void PathDiagnosticPopUpPiece::Profile(llvm::FoldingSetNodeID &ID) const {
+ PathDiagnosticSpotPiece::Profile(ID);
+}
+
void PathDiagnostic::Profile(llvm::FoldingSetNodeID &ID) const {
ID.Add(getLocation());
ID.AddString(BugType);
@@ -1380,6 +1423,13 @@ LLVM_DUMP_METHOD void PathDiagnosticNotePiece::dump() const {
getLocation().dump();
}
+LLVM_DUMP_METHOD void PathDiagnosticPopUpPiece::dump() const {
+ llvm::errs() << "POP-UP\n--------------\n";
+ llvm::errs() << getString() << "\n";
+ llvm::errs() << " ---- at ----\n";
+ getLocation().dump();
+}
+
LLVM_DUMP_METHOD void PathDiagnosticLocation::dump() const {
if (!isValid()) {
llvm::errs() << "<INVALID>\n";
diff --git a/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp b/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
index db4cf76578d8..838751279297 100644
--- a/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
+++ b/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
@@ -1,9 +1,8 @@
//===--- PlistDiagnostics.cpp - Plist Diagnostics for Paths -----*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -23,6 +22,7 @@
#include "clang/StaticAnalyzer/Core/IssueHash.h"
#include "clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h"
#include "llvm/ADT/Statistic.h"
+#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Casting.h"
@@ -120,6 +120,9 @@ private:
case PathDiagnosticPiece::Note:
ReportNote(o, cast<PathDiagnosticNotePiece>(P), indent);
break;
+ case PathDiagnosticPiece::PopUp:
+ ReportPopUp(o, cast<PathDiagnosticPopUpPiece>(P), indent);
+ break;
}
}
@@ -138,6 +141,9 @@ private:
unsigned indent, unsigned depth);
void ReportNote(raw_ostream &o, const PathDiagnosticNotePiece& P,
unsigned indent);
+
+ void ReportPopUp(raw_ostream &o, const PathDiagnosticPopUpPiece &P,
+ unsigned indent);
};
} // end of anonymous namespace
@@ -397,6 +403,34 @@ void PlistPrinter::ReportNote(raw_ostream &o, const PathDiagnosticNotePiece& P,
Indent(o, indent); o << "</dict>\n";
}
+void PlistPrinter::ReportPopUp(raw_ostream &o,
+ const PathDiagnosticPopUpPiece &P,
+ unsigned indent) {
+ const SourceManager &SM = PP.getSourceManager();
+
+ Indent(o, indent) << "<dict>\n";
+ ++indent;
+
+ Indent(o, indent) << "<key>kind</key><string>pop-up</string>\n";
+
+ // Output the location.
+ FullSourceLoc L = P.getLocation().asLocation();
+
+ Indent(o, indent) << "<key>location</key>\n";
+ EmitLocation(o, SM, L, FM, indent);
+
+ // Output the ranges (if any).
+ ArrayRef<SourceRange> Ranges = P.getRanges();
+ EmitRanges(o, Ranges, indent);
+
+ // Output the text.
+ EmitMessage(o, P.getString(), indent);
+
+ // Finish up.
+ --indent;
+ Indent(o, indent) << "</dict>\n";
+}
+
//===----------------------------------------------------------------------===//
// Static function definitions.
//===----------------------------------------------------------------------===//
@@ -714,7 +748,7 @@ void PlistDiagnostics::FlushDiagnosticsImpl(
}
// Finish.
- o << "</dict>\n</plist>";
+ o << "</dict>\n</plist>\n";
}
//===----------------------------------------------------------------------===//
@@ -777,10 +811,20 @@ public:
/// As we expand the last line, we'll immediately replace PRINT(str) with
/// print(x). The information that both 'str' and 'x' refers to the same string
/// is an information we have to forward, hence the argument \p PrevArgs.
-static std::string getMacroNameAndPrintExpansion(TokenPrinter &Printer,
- SourceLocation MacroLoc,
- const Preprocessor &PP,
- const MacroArgMap &PrevArgs);
+///
+/// To avoid infinite recursion we maintain the already processed tokens in
+/// a set. This is carried as a parameter through the recursive calls. The set
+/// is extended with the currently processed token and after processing it, the
+/// token is removed. If the token is already in the set, then recursion stops:
+///
+/// #define f(y) x
+/// #define x f(x)
+static std::string getMacroNameAndPrintExpansion(
+ TokenPrinter &Printer,
+ SourceLocation MacroLoc,
+ const Preprocessor &PP,
+ const MacroArgMap &PrevArgs,
+ llvm::SmallPtrSet<IdentifierInfo *, 8> &AlreadyProcessedTokens);
/// Retrieves the name of the macro and what it's arguments expand into
/// at \p ExpanLoc.
@@ -829,19 +873,38 @@ static ExpansionInfo getExpandedMacro(SourceLocation MacroLoc,
llvm::SmallString<200> ExpansionBuf;
llvm::raw_svector_ostream OS(ExpansionBuf);
TokenPrinter Printer(OS, PP);
+ llvm::SmallPtrSet<IdentifierInfo*, 8> AlreadyProcessedTokens;
+
std::string MacroName =
- getMacroNameAndPrintExpansion(Printer, MacroLoc, PP, MacroArgMap{});
+ getMacroNameAndPrintExpansion(Printer, MacroLoc, PP, MacroArgMap{},
+ AlreadyProcessedTokens);
return { MacroName, OS.str() };
}
-static std::string getMacroNameAndPrintExpansion(TokenPrinter &Printer,
- SourceLocation MacroLoc,
- const Preprocessor &PP,
- const MacroArgMap &PrevArgs) {
+static std::string getMacroNameAndPrintExpansion(
+ TokenPrinter &Printer,
+ SourceLocation MacroLoc,
+ const Preprocessor &PP,
+ const MacroArgMap &PrevArgs,
+ llvm::SmallPtrSet<IdentifierInfo *, 8> &AlreadyProcessedTokens) {
const SourceManager &SM = PP.getSourceManager();
MacroNameAndArgs Info = getMacroNameAndArgs(SM.getExpansionLoc(MacroLoc), PP);
+ IdentifierInfo* IDInfo = PP.getIdentifierInfo(Info.Name);
+
+ // TODO: If the macro definition contains another symbol then this function is
+ // called recursively. In case this symbol is the one being defined, it will
+ // be an infinite recursion which is stopped by this "if" statement. However,
+ // in this case we don't get the full expansion text in the Plist file. See
+ // the test file where "value" is expanded to "garbage_" instead of
+ // "garbage_value".
+ if (AlreadyProcessedTokens.find(IDInfo) != AlreadyProcessedTokens.end())
+ return Info.Name;
+ AlreadyProcessedTokens.insert(IDInfo);
+
+ if (!Info.MI)
+ return Info.Name;
// Manually expand its arguments from the previous macro.
Info.Args.expandFromPrevMacro(PrevArgs);
@@ -863,14 +926,15 @@ static std::string getMacroNameAndPrintExpansion(TokenPrinter &Printer,
// If this token is a macro that should be expanded inside the current
// macro.
- if (const MacroInfo *MI =
- getMacroInfoForLocation(PP, SM, II, T.getLocation())) {
- getMacroNameAndPrintExpansion(Printer, T.getLocation(), PP, Info.Args);
+ if (getMacroInfoForLocation(PP, SM, II, T.getLocation())) {
+ getMacroNameAndPrintExpansion(Printer, T.getLocation(), PP, Info.Args,
+ AlreadyProcessedTokens);
// If this is a function-like macro, skip its arguments, as
// getExpandedMacro() already printed them. If this is the case, let's
// first jump to the '(' token.
- if (MI->getNumParams() != 0)
+ auto N = std::next(It);
+ if (N != E && N->is(tok::l_paren))
It = getMatchingRParen(++It, E);
continue;
}
@@ -897,8 +961,17 @@ static std::string getMacroNameAndPrintExpansion(TokenPrinter &Printer,
}
getMacroNameAndPrintExpansion(Printer, ArgIt->getLocation(), PP,
- Info.Args);
- if (MI->getNumParams() != 0)
+ Info.Args, AlreadyProcessedTokens);
+ // Peek the next token if it is a tok::l_paren. This way we can decide
+ // if this is the application or just a reference to a function maxro
+ // symbol:
+ //
+ // #define apply(f) ...
+ // #define func(x) ...
+ // apply(func)
+ // apply(func(42))
+ auto N = std::next(ArgIt);
+ if (N != ArgEnd && N->is(tok::l_paren))
ArgIt = getMatchingRParen(++ArgIt, ArgEnd);
}
continue;
@@ -909,6 +982,8 @@ static std::string getMacroNameAndPrintExpansion(TokenPrinter &Printer,
Printer.printToken(T);
}
+ AlreadyProcessedTokens.erase(IDInfo);
+
return Info.Name;
}
@@ -937,7 +1012,14 @@ static MacroNameAndArgs getMacroNameAndArgs(SourceLocation ExpanLoc,
assert(II && "Failed to acquire the IndetifierInfo for the macro!");
const MacroInfo *MI = getMacroInfoForLocation(PP, SM, II, ExpanLoc);
- assert(MI && "The macro must've been defined at it's expansion location!");
+ // assert(MI && "The macro must've been defined at it's expansion location!");
+ //
+ // We should always be able to obtain the MacroInfo in a given TU, but if
+ // we're running the analyzer with CTU, the Preprocessor won't contain the
+ // directive history (or anything for that matter) from another TU.
+ // TODO: assert when we're not running with CTU.
+ if (!MI)
+ return { MacroName, MI, {} };
// Acquire the macro's arguments.
//
@@ -951,8 +1033,16 @@ static MacroNameAndArgs getMacroNameAndArgs(SourceLocation ExpanLoc,
return { MacroName, MI, {} };
RawLexer.LexFromRawLexer(TheTok);
- assert(TheTok.is(tok::l_paren) &&
- "The token after the macro's identifier token should be '('!");
+ // When this is a token which expands to another macro function then its
+ // parentheses are not at its expansion locaiton. For example:
+ //
+ // #define foo(x) int bar() { return x; }
+ // #define apply_zero(f) f(0)
+ // apply_zero(foo)
+ // ^
+ // This is not a tok::l_paren, but foo is a function.
+ if (TheTok.isNot(tok::l_paren))
+ return { MacroName, MI, {} };
MacroArgMap Args;
diff --git a/lib/StaticAnalyzer/Core/PrettyStackTraceLocationContext.h b/lib/StaticAnalyzer/Core/PrettyStackTraceLocationContext.h
index 4bb694819c2a..c71ee3bd4286 100644
--- a/lib/StaticAnalyzer/Core/PrettyStackTraceLocationContext.h
+++ b/lib/StaticAnalyzer/Core/PrettyStackTraceLocationContext.h
@@ -1,9 +1,8 @@
//==- PrettyStackTraceLocationContext.h - show analysis backtrace --*- C++ -*-//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -33,9 +32,9 @@ public:
assert(LCtx);
}
- void print(raw_ostream &OS) const override {
- OS << "While analyzing stack: \n";
- LCtx->dumpStack(OS, "\t");
+ void print(raw_ostream &Out) const override {
+ Out << "While analyzing stack: \n";
+ LCtx->dumpStack(Out);
}
};
diff --git a/lib/StaticAnalyzer/Core/ProgramState.cpp b/lib/StaticAnalyzer/Core/ProgramState.cpp
index 2e2e2ec94f39..a1ca0b1b84bf 100644
--- a/lib/StaticAnalyzer/Core/ProgramState.cpp
+++ b/lib/StaticAnalyzer/Core/ProgramState.cpp
@@ -1,9 +1,8 @@
//= ProgramState.cpp - Path-Sensitive "State" for tracking values --*- C++ -*--=
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -11,14 +10,14 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/Analysis/CFG.h"
+#include "clang/Basic/JsonSupport.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicTypeMap.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/TaintManager.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicTypeMap.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
@@ -442,53 +441,40 @@ void ProgramState::setStore(const StoreRef &newStore) {
// State pretty-printing.
//===----------------------------------------------------------------------===//
-void ProgramState::print(raw_ostream &Out,
- const char *NL, const char *Sep,
- const LocationContext *LC) const {
- // Print the store.
+void ProgramState::printJson(raw_ostream &Out, const LocationContext *LCtx,
+ const char *NL, unsigned int Space,
+ bool IsDot) const {
+ Indent(Out, Space, IsDot) << "\"program_state\": {" << NL;
+ ++Space;
+
ProgramStateManager &Mgr = getStateManager();
- const ASTContext &Context = getStateManager().getContext();
- Mgr.getStoreManager().print(getStore(), Out, NL);
+
+ // Print the store.
+ Mgr.getStoreManager().printJson(Out, getStore(), NL, Space, IsDot);
// Print out the environment.
- Env.print(Out, NL, Sep, Context, LC);
+ Env.printJson(Out, Mgr.getContext(), LCtx, NL, Space, IsDot);
// Print out the constraints.
- Mgr.getConstraintManager().print(this, Out, NL, Sep);
+ Mgr.getConstraintManager().printJson(Out, this, NL, Space, IsDot);
// Print out the tracked dynamic types.
- printDynamicTypeInfo(this, Out, NL, Sep);
-
- // Print out tainted symbols.
- printTaint(Out, NL);
+ printDynamicTypeInfoJson(Out, this, NL, Space, IsDot);
// Print checker-specific data.
- Mgr.getOwningEngine().printState(Out, this, NL, Sep, LC);
-}
-
-void ProgramState::printDOT(raw_ostream &Out,
- const LocationContext *LC) const {
- print(Out, "\\l", "\\|", LC);
-}
+ Mgr.getOwningEngine().printJson(Out, this, LCtx, NL, Space, IsDot);
-LLVM_DUMP_METHOD void ProgramState::dump() const {
- print(llvm::errs());
+ --Space;
+ Indent(Out, Space, IsDot) << '}';
}
-void ProgramState::printTaint(raw_ostream &Out,
- const char *NL) const {
- TaintMapImpl TM = get<TaintMap>();
-
- if (!TM.isEmpty())
- Out <<"Tainted symbols:" << NL;
-
- for (TaintMapImpl::iterator I = TM.begin(), E = TM.end(); I != E; ++I) {
- Out << I->first << " : " << I->second << NL;
- }
+void ProgramState::printDOT(raw_ostream &Out, const LocationContext *LCtx,
+ unsigned int Space) const {
+ printJson(Out, LCtx, /*NL=*/"\\l", Space, /*IsDot=*/true);
}
-void ProgramState::dumpTaint() const {
- printTaint(llvm::errs());
+LLVM_DUMP_METHOD void ProgramState::dump() const {
+ printJson(llvm::errs());
}
AnalysisManager& ProgramState::getAnalysisManager() const {
@@ -658,166 +644,3 @@ bool ProgramState::scanReachableSymbols(
}
return true;
}
-
-ProgramStateRef ProgramState::addTaint(const Stmt *S,
- const LocationContext *LCtx,
- TaintTagType Kind) const {
- if (const Expr *E = dyn_cast_or_null<Expr>(S))
- S = E->IgnoreParens();
-
- return addTaint(getSVal(S, LCtx), Kind);
-}
-
-ProgramStateRef ProgramState::addTaint(SVal V,
- TaintTagType Kind) const {
- SymbolRef Sym = V.getAsSymbol();
- if (Sym)
- return addTaint(Sym, Kind);
-
- // If the SVal represents a structure, try to mass-taint all values within the
- // structure. For now it only works efficiently on lazy compound values that
- // were conjured during a conservative evaluation of a function - either as
- // return values of functions that return structures or arrays by value, or as
- // values of structures or arrays passed into the function by reference,
- // directly or through pointer aliasing. Such lazy compound values are
- // characterized by having exactly one binding in their captured store within
- // their parent region, which is a conjured symbol default-bound to the base
- // region of the parent region.
- if (auto LCV = V.getAs<nonloc::LazyCompoundVal>()) {
- if (Optional<SVal> binding = getStateManager().StoreMgr->getDefaultBinding(*LCV)) {
- if (SymbolRef Sym = binding->getAsSymbol())
- return addPartialTaint(Sym, LCV->getRegion(), Kind);
- }
- }
-
- const MemRegion *R = V.getAsRegion();
- return addTaint(R, Kind);
-}
-
-ProgramStateRef ProgramState::addTaint(const MemRegion *R,
- TaintTagType Kind) const {
- if (const SymbolicRegion *SR = dyn_cast_or_null<SymbolicRegion>(R))
- return addTaint(SR->getSymbol(), Kind);
- return this;
-}
-
-ProgramStateRef ProgramState::addTaint(SymbolRef Sym,
- TaintTagType Kind) const {
- // If this is a symbol cast, remove the cast before adding the taint. Taint
- // is cast agnostic.
- while (const SymbolCast *SC = dyn_cast<SymbolCast>(Sym))
- Sym = SC->getOperand();
-
- ProgramStateRef NewState = set<TaintMap>(Sym, Kind);
- assert(NewState);
- return NewState;
-}
-
-ProgramStateRef ProgramState::addPartialTaint(SymbolRef ParentSym,
- const SubRegion *SubRegion,
- TaintTagType Kind) const {
- // Ignore partial taint if the entire parent symbol is already tainted.
- if (contains<TaintMap>(ParentSym) && *get<TaintMap>(ParentSym) == Kind)
- return this;
-
- // Partial taint applies if only a portion of the symbol is tainted.
- if (SubRegion == SubRegion->getBaseRegion())
- return addTaint(ParentSym, Kind);
-
- const TaintedSubRegions *SavedRegs = get<DerivedSymTaint>(ParentSym);
- TaintedSubRegions Regs =
- SavedRegs ? *SavedRegs : stateMgr->TSRFactory.getEmptyMap();
-
- Regs = stateMgr->TSRFactory.add(Regs, SubRegion, Kind);
- ProgramStateRef NewState = set<DerivedSymTaint>(ParentSym, Regs);
- assert(NewState);
- return NewState;
-}
-
-bool ProgramState::isTainted(const Stmt *S, const LocationContext *LCtx,
- TaintTagType Kind) const {
- if (const Expr *E = dyn_cast_or_null<Expr>(S))
- S = E->IgnoreParens();
-
- SVal val = getSVal(S, LCtx);
- return isTainted(val, Kind);
-}
-
-bool ProgramState::isTainted(SVal V, TaintTagType Kind) const {
- if (const SymExpr *Sym = V.getAsSymExpr())
- return isTainted(Sym, Kind);
- if (const MemRegion *Reg = V.getAsRegion())
- return isTainted(Reg, Kind);
- return false;
-}
-
-bool ProgramState::isTainted(const MemRegion *Reg, TaintTagType K) const {
- if (!Reg)
- return false;
-
- // Element region (array element) is tainted if either the base or the offset
- // are tainted.
- if (const ElementRegion *ER = dyn_cast<ElementRegion>(Reg))
- return isTainted(ER->getSuperRegion(), K) || isTainted(ER->getIndex(), K);
-
- if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(Reg))
- return isTainted(SR->getSymbol(), K);
-
- if (const SubRegion *ER = dyn_cast<SubRegion>(Reg))
- return isTainted(ER->getSuperRegion(), K);
-
- return false;
-}
-
-bool ProgramState::isTainted(SymbolRef Sym, TaintTagType Kind) const {
- if (!Sym)
- return false;
-
- // Traverse all the symbols this symbol depends on to see if any are tainted.
- for (SymExpr::symbol_iterator SI = Sym->symbol_begin(), SE =Sym->symbol_end();
- SI != SE; ++SI) {
- if (!isa<SymbolData>(*SI))
- continue;
-
- if (const TaintTagType *Tag = get<TaintMap>(*SI)) {
- if (*Tag == Kind)
- return true;
- }
-
- if (const SymbolDerived *SD = dyn_cast<SymbolDerived>(*SI)) {
- // If this is a SymbolDerived with a tainted parent, it's also tainted.
- if (isTainted(SD->getParentSymbol(), Kind))
- return true;
-
- // If this is a SymbolDerived with the same parent symbol as another
- // tainted SymbolDerived and a region that's a sub-region of that tainted
- // symbol, it's also tainted.
- if (const TaintedSubRegions *Regs =
- get<DerivedSymTaint>(SD->getParentSymbol())) {
- const TypedValueRegion *R = SD->getRegion();
- for (auto I : *Regs) {
- // FIXME: The logic to identify tainted regions could be more
- // complete. For example, this would not currently identify
- // overlapping fields in a union as tainted. To identify this we can
- // check for overlapping/nested byte offsets.
- if (Kind == I.second && R->isSubRegionOf(I.first))
- return true;
- }
- }
- }
-
- // If memory region is tainted, data is also tainted.
- if (const SymbolRegionValue *SRV = dyn_cast<SymbolRegionValue>(*SI)) {
- if (isTainted(SRV->getRegion(), Kind))
- return true;
- }
-
- // If this is a SymbolCast from a tainted value, it's also tainted.
- if (const SymbolCast *SC = dyn_cast<SymbolCast>(*SI)) {
- if (isTainted(SC->getOperand(), Kind))
- return true;
- }
- }
-
- return false;
-}
diff --git a/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp b/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
index d9b58d0f5185..64724227395d 100644
--- a/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
+++ b/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
@@ -1,9 +1,8 @@
//== RangeConstraintManager.cpp - Manage range constraints.------*- C++ -*--==//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -12,6 +11,7 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/Basic/JsonSupport.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/APSIntType.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
@@ -174,6 +174,22 @@ RangeSet RangeSet::Intersect(BasicValueFactory &BV, Factory &F,
return newRanges;
}
+// Returns a set containing the values in the receiving set, intersected with
+// the range set passed as parameter.
+RangeSet RangeSet::Intersect(BasicValueFactory &BV, Factory &F,
+ const RangeSet &Other) const {
+ PrimRangeSet newRanges = F.getEmptySet();
+
+ for (iterator i = Other.begin(), e = Other.end(); i != e; ++i) {
+ RangeSet newPiece = Intersect(BV, F, i->From(), i->To());
+ for (iterator j = newPiece.begin(), ee = newPiece.end(); j != ee; ++j) {
+ newRanges = F.add(newRanges, *j);
+ }
+ }
+
+ return newRanges;
+}
+
// Turn all [A, B] ranges to [-B, -A]. Ranges [MIN, B] are turned to range set
// [MIN, MIN] U [-B, MAX], when MIN and MAX are the minimal and the maximal
// signed values of the type.
@@ -231,6 +247,11 @@ public:
// Implementation for interface from ConstraintManager.
//===------------------------------------------------------------------===//
+ bool haveEqualConstraints(ProgramStateRef S1,
+ ProgramStateRef S2) const override {
+ return S1->get<ConstraintRange>() == S2->get<ConstraintRange>();
+ }
+
bool canReasonAbout(SVal X) const override;
ConditionTruthVal checkNull(ProgramStateRef State, SymbolRef Sym) override;
@@ -241,8 +262,8 @@ public:
ProgramStateRef removeDeadBindings(ProgramStateRef State,
SymbolReaper &SymReaper) override;
- void print(ProgramStateRef State, raw_ostream &Out, const char *nl,
- const char *sep) override;
+ void printJson(raw_ostream &Out, ProgramStateRef State, const char *NL = "\n",
+ unsigned int Space = 0, bool IsDot = false) const override;
//===------------------------------------------------------------------===//
// Implementation for interface from RangedConstraintManager.
@@ -457,14 +478,21 @@ static RangeSet applyBitwiseConstraints(
RangeSet RangeConstraintManager::getRange(ProgramStateRef State,
SymbolRef Sym) {
- if (ConstraintRangeTy::data_type *V = State->get<ConstraintRange>(Sym))
- return *V;
-
- BasicValueFactory &BV = getBasicVals();
+ ConstraintRangeTy::data_type *V = State->get<ConstraintRange>(Sym);
// If Sym is a difference of symbols A - B, then maybe we have range set
// stored for B - A.
- if (const RangeSet *R = getRangeForMinusSymbol(State, Sym))
+ BasicValueFactory &BV = getBasicVals();
+ const RangeSet *R = getRangeForMinusSymbol(State, Sym);
+
+ // If we have range set stored for both A - B and B - A then calculate the
+ // effective range set by intersecting the range set for A - B and the
+ // negated range set of B - A.
+ if (V && R)
+ return V->Intersect(BV, F, R->Negate(BV, F));
+ if (V)
+ return *V;
+ if (R)
return R->Negate(BV, F);
// Lazily generate a new RangeSet representing all possible values for the
@@ -727,25 +755,35 @@ ProgramStateRef RangeConstraintManager::assumeSymOutsideInclusiveRange(
return New.isEmpty() ? nullptr : State->set<ConstraintRange>(Sym, New);
}
-//===------------------------------------------------------------------------===
+//===----------------------------------------------------------------------===//
// Pretty-printing.
-//===------------------------------------------------------------------------===/
-
-void RangeConstraintManager::print(ProgramStateRef St, raw_ostream &Out,
- const char *nl, const char *sep) {
+//===----------------------------------------------------------------------===//
- ConstraintRangeTy Ranges = St->get<ConstraintRange>();
+void RangeConstraintManager::printJson(raw_ostream &Out, ProgramStateRef State,
+ const char *NL, unsigned int Space,
+ bool IsDot) const {
+ ConstraintRangeTy Constraints = State->get<ConstraintRange>();
- if (Ranges.isEmpty()) {
- Out << nl << sep << "Ranges are empty." << nl;
+ Indent(Out, Space, IsDot) << "\"constraints\": ";
+ if (Constraints.isEmpty()) {
+ Out << "null," << NL;
return;
}
- Out << nl << sep << "Ranges of symbol values:";
- for (ConstraintRangeTy::iterator I = Ranges.begin(), E = Ranges.end(); I != E;
- ++I) {
- Out << nl << ' ' << I.getKey() << " : ";
+ ++Space;
+ Out << '[' << NL;
+ for (ConstraintRangeTy::iterator I = Constraints.begin();
+ I != Constraints.end(); ++I) {
+ Indent(Out, Space, IsDot)
+ << "{ \"symbol\": \"" << I.getKey() << "\", \"range\": \"";
I.getData().print(Out);
+ Out << "\" }";
+
+ if (std::next(I) != Constraints.end())
+ Out << ',';
+ Out << NL;
}
- Out << nl;
+
+ --Space;
+ Indent(Out, Space, IsDot) << "]," << NL;
}
diff --git a/lib/StaticAnalyzer/Core/RangedConstraintManager.cpp b/lib/StaticAnalyzer/Core/RangedConstraintManager.cpp
index 146dc20ad021..4748c106eb55 100644
--- a/lib/StaticAnalyzer/Core/RangedConstraintManager.cpp
+++ b/lib/StaticAnalyzer/Core/RangedConstraintManager.cpp
@@ -1,9 +1,8 @@
//== RangedConstraintManager.cpp --------------------------------*- C++ -*--==//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/StaticAnalyzer/Core/RegionStore.cpp b/lib/StaticAnalyzer/Core/RegionStore.cpp
index b2339be4f263..d2aea1fd92dd 100644
--- a/lib/StaticAnalyzer/Core/RegionStore.cpp
+++ b/lib/StaticAnalyzer/Core/RegionStore.cpp
@@ -1,9 +1,8 @@
//== RegionStore.cpp - Field-sensitive store model --------------*- C++ -*--==//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -20,6 +19,7 @@
#include "clang/ASTMatchers/ASTMatchFinder.h"
#include "clang/Analysis/Analyses/LiveVariables.h"
#include "clang/Analysis/AnalysisDeclContext.h"
+#include "clang/Basic/JsonSupport.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
@@ -121,25 +121,21 @@ BindingKey BindingKey::Make(const MemRegion *R, Kind k) {
}
namespace llvm {
- static inline
- raw_ostream &operator<<(raw_ostream &os, BindingKey K) {
- os << '(' << K.getRegion();
- if (!K.hasSymbolicOffset())
- os << ',' << K.getOffset();
- os << ',' << (K.isDirect() ? "direct" : "default")
- << ')';
- return os;
- }
-
- template <typename T> struct isPodLike;
- template <> struct isPodLike<BindingKey> {
- static const bool value = true;
- };
-} // end llvm namespace
-
-#ifndef NDEBUG
+static inline raw_ostream &operator<<(raw_ostream &Out, BindingKey K) {
+ Out << "\"kind\": \"" << (K.isDirect() ? "Direct" : "Default")
+ << "\", \"offset\": ";
+
+ if (!K.hasSymbolicOffset())
+ Out << K.getOffset();
+ else
+ Out << "null";
+
+ return Out;
+}
+
+} // namespace llvm
+
LLVM_DUMP_METHOD void BindingKey::dump() const { llvm::errs() << *this; }
-#endif
//===----------------------------------------------------------------------===//
// Actual Store type.
@@ -211,18 +207,34 @@ public:
return asImmutableMap().getRootWithoutRetain();
}
- void dump(raw_ostream &OS, const char *nl) const {
- for (iterator I = begin(), E = end(); I != E; ++I) {
- const ClusterBindings &Cluster = I.getData();
- for (ClusterBindings::iterator CI = Cluster.begin(), CE = Cluster.end();
- CI != CE; ++CI) {
- OS << ' ' << CI.getKey() << " : " << CI.getData() << nl;
- }
- OS << nl;
- }
+ void printJson(raw_ostream &Out, const char *NL = "\n",
+ unsigned int Space = 0, bool IsDot = false) const {
+ for (iterator I = begin(); I != end(); ++I) {
+ // TODO: We might need a .printJson for I.getKey() as well.
+ Indent(Out, Space, IsDot)
+ << "{ \"cluster\": \"" << I.getKey() << "\", \"pointer\": \""
+ << (const void *)I.getKey() << "\", \"items\": [" << NL;
+
+ ++Space;
+ const ClusterBindings &CB = I.getData();
+ for (ClusterBindings::iterator CI = CB.begin(); CI != CB.end(); ++CI) {
+ Indent(Out, Space, IsDot) << "{ " << CI.getKey() << ", \"value\": ";
+ CI.getData().printJson(Out, /*AddQuotes=*/true);
+ Out << " }";
+ if (std::next(CI) != CB.end())
+ Out << ',';
+ Out << NL;
+ }
+
+ --Space;
+ Indent(Out, Space, IsDot) << "]}";
+ if (std::next(I) != end())
+ Out << ',';
+ Out << NL;
+ }
}
- LLVM_DUMP_METHOD void dump() const { dump(llvm::errs(), "\n"); }
+ LLVM_DUMP_METHOD void dump() const { printJson(llvm::errs()); }
};
} // end anonymous namespace
@@ -599,7 +611,8 @@ public: // Part of public interface to class.
RBFactory.getTreeFactory());
}
- void print(Store store, raw_ostream &Out, const char* nl) override;
+ void printJson(raw_ostream &Out, Store S, const char *NL = "\n",
+ unsigned int Space = 0, bool IsDot = false) const override;
void iterBindings(Store store, BindingsHandler& f) override {
RegionBindingsRef B = getRegionBindings(store);
@@ -1240,7 +1253,7 @@ RegionStoreManager::invalidateGlobalRegion(MemRegion::Kind K,
// Bind the globals memory space to a new symbol that we will use to derive
// the bindings for all globals.
const GlobalsSpaceRegion *GS = MRMgr.getGlobalsRegion(K);
- SVal V = svalBuilder.conjureSymbolVal(/* SymbolTag = */ (const void*) GS, Ex, LCtx,
+ SVal V = svalBuilder.conjureSymbolVal(/* symbolTag = */ (const void*) GS, Ex, LCtx,
/* type does not matter */ Ctx.IntTy,
Count);
@@ -1660,7 +1673,7 @@ SVal RegionStoreManager::getBindingForElement(RegionBindingsConstRef B,
const VarDecl *VD = VR->getDecl();
// Either the array or the array element has to be const.
if (VD->getType().isConstQualified() || R->getElementType().isConstQualified()) {
- if (const Expr *Init = VD->getInit()) {
+ if (const Expr *Init = VD->getAnyInitializer()) {
if (const auto *InitList = dyn_cast<InitListExpr>(Init)) {
// The array index has to be known.
if (auto CI = R->getIndex().getAs<nonloc::ConcreteInt>()) {
@@ -1750,7 +1763,7 @@ SVal RegionStoreManager::getBindingForField(RegionBindingsConstRef B,
unsigned Index = FD->getFieldIndex();
// Either the record variable or the field has to be const qualified.
if (RecordVarTy.isConstQualified() || Ty.isConstQualified())
- if (const Expr *Init = VD->getInit())
+ if (const Expr *Init = VD->getAnyInitializer())
if (const auto *InitList = dyn_cast<InitListExpr>(Init)) {
if (Index < InitList->getNumInits()) {
if (const Expr *FieldInit = InitList->getInit(Index))
@@ -1932,7 +1945,10 @@ SVal RegionStoreManager::getBindingForVar(RegionBindingsConstRef B,
const VarRegion *R) {
// Check if the region has a binding.
- if (const Optional<SVal> &V = B.getDirectBinding(R))
+ if (Optional<SVal> V = B.getDirectBinding(R))
+ return *V;
+
+ if (Optional<SVal> V = B.getDefaultBinding(R))
return *V;
// Lazily derive a value for the VarRegion.
@@ -1945,7 +1961,7 @@ SVal RegionStoreManager::getBindingForVar(RegionBindingsConstRef B,
// Is 'VD' declared constant? If so, retrieve the constant value.
if (VD->getType().isConstQualified()) {
- if (const Expr *Init = VD->getInit()) {
+ if (const Expr *Init = VD->getAnyInitializer()) {
if (Optional<SVal> V = svalBuilder.getConstantVal(Init))
return *V;
@@ -2339,12 +2355,64 @@ RegionBindingsRef RegionStoreManager::bindStruct(RegionBindingsConstRef B,
if (V.isUnknown() || !V.getAs<nonloc::CompoundVal>())
return bindAggregate(B, R, UnknownVal());
+ // The raw CompoundVal is essentially a symbolic InitListExpr: an (immutable)
+ // list of other values. It appears pretty much only when there's an actual
+ // initializer list expression in the program, and the analyzer tries to
+ // unwrap it as soon as possible.
+ // This code is where such unwrap happens: when the compound value is put into
+ // the object that it was supposed to initialize (it's an *initializer* list,
+ // after all), instead of binding the whole value to the whole object, we bind
+ // sub-values to sub-objects. Sub-values may themselves be compound values,
+ // and in this case the procedure becomes recursive.
+ // FIXME: The annoying part about compound values is that they don't carry
+ // any sort of information about which value corresponds to which sub-object.
+ // It's simply a list of values in the middle of nowhere; we expect to match
+ // them to sub-objects, essentially, "by index": first value binds to
+ // the first field, second value binds to the second field, etc.
+ // It would have been much safer to organize non-lazy compound values as
+ // a mapping from fields/bases to values.
const nonloc::CompoundVal& CV = V.castAs<nonloc::CompoundVal>();
nonloc::CompoundVal::iterator VI = CV.begin(), VE = CV.end();
- RecordDecl::field_iterator FI, FE;
RegionBindingsRef NewB(B);
+ // In C++17 aggregates may have base classes, handle those as well.
+ // They appear before fields in the initializer list / compound value.
+ if (const auto *CRD = dyn_cast<CXXRecordDecl>(RD)) {
+ // If the object was constructed with a constructor, its value is a
+ // LazyCompoundVal. If it's a raw CompoundVal, it means that we're
+ // performing aggregate initialization. The only exception from this
+ // rule is sending an Objective-C++ message that returns a C++ object
+ // to a nil receiver; in this case the semantics is to return a
+ // zero-initialized object even if it's a C++ object that doesn't have
+ // this sort of constructor; the CompoundVal is empty in this case.
+ assert((CRD->isAggregate() || (Ctx.getLangOpts().ObjC && VI == VE)) &&
+ "Non-aggregates are constructed with a constructor!");
+
+ for (const auto &B : CRD->bases()) {
+ // (Multiple inheritance is fine though.)
+ assert(!B.isVirtual() && "Aggregates cannot have virtual base classes!");
+
+ if (VI == VE)
+ break;
+
+ QualType BTy = B.getType();
+ assert(BTy->isStructureOrClassType() && "Base classes must be classes!");
+
+ const CXXRecordDecl *BRD = BTy->getAsCXXRecordDecl();
+ assert(BRD && "Base classes must be C++ classes!");
+
+ const CXXBaseObjectRegion *BR =
+ MRMgr.getCXXBaseObjectRegion(BRD, R, /*IsVirtual=*/false);
+
+ NewB = bindStruct(NewB, BR, *VI);
+
+ ++VI;
+ }
+ }
+
+ RecordDecl::field_iterator FI, FE;
+
for (FI = RD->field_begin(), FE = RD->field_end(); FI != FE; ++FI) {
if (VI == VE)
@@ -2391,10 +2459,7 @@ RegionStoreManager::bindAggregate(RegionBindingsConstRef B,
namespace {
class RemoveDeadBindingsWorker
: public ClusterAnalysis<RemoveDeadBindingsWorker> {
- using ChildrenListTy = SmallVector<const SymbolDerived *, 4>;
- using MapParentsToDerivedTy = llvm::DenseMap<SymbolRef, ChildrenListTy>;
-
- MapParentsToDerivedTy ParentsToDerived;
+ SmallVector<const SymbolicRegion *, 12> Postponed;
SymbolReaper &SymReaper;
const StackFrameContext *CurrentLCtx;
@@ -2415,10 +2480,8 @@ public:
bool AddToWorkList(const MemRegion *R);
+ bool UpdatePostponed();
void VisitBinding(SVal V);
-
-private:
- void populateWorklistFromSymbol(SymbolRef s);
};
}
@@ -2438,11 +2501,10 @@ void RemoveDeadBindingsWorker::VisitAddedToCluster(const MemRegion *baseR,
}
if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(baseR)) {
- if (SymReaper.isLive(SR->getSymbol())) {
+ if (SymReaper.isLive(SR->getSymbol()))
AddToWorkList(SR, &C);
- } else if (const auto *SD = dyn_cast<SymbolDerived>(SR->getSymbol())) {
- ParentsToDerived[SD->getParentSymbol()].push_back(SD);
- }
+ else
+ Postponed.push_back(SR);
return;
}
@@ -2455,7 +2517,7 @@ void RemoveDeadBindingsWorker::VisitAddedToCluster(const MemRegion *baseR,
// CXXThisRegion in the current or parent location context is live.
if (const CXXThisRegion *TR = dyn_cast<CXXThisRegion>(baseR)) {
const auto *StackReg =
- cast<StackArgumentsSpaceRegion>(TR->getSuperRegion());
+ cast<StackArgumentsSpaceRegion>(TR->getSuperRegion());
const StackFrameContext *RegCtx = StackReg->getStackFrame();
if (CurrentLCtx &&
(RegCtx == CurrentLCtx || RegCtx->isParentOf(CurrentLCtx)))
@@ -2499,15 +2561,6 @@ void RemoveDeadBindingsWorker::VisitBinding(SVal V) {
// If V is a region, then add it to the worklist.
if (const MemRegion *R = V.getAsRegion()) {
AddToWorkList(R);
-
- if (const auto *TVR = dyn_cast<TypedValueRegion>(R)) {
- DefinedOrUnknownSVal RVS =
- RM.getSValBuilder().getRegionValueSymbolVal(TVR);
- if (const MemRegion *SR = RVS.getAsRegion()) {
- AddToWorkList(SR);
- }
- }
-
SymReaper.markLive(R);
// All regions captured by a block are also live.
@@ -2521,30 +2574,25 @@ void RemoveDeadBindingsWorker::VisitBinding(SVal V) {
// Update the set of live symbols.
- for (auto SI = V.symbol_begin(), SE = V.symbol_end(); SI != SE; ++SI) {
- populateWorklistFromSymbol(*SI);
-
- for (const auto *SD : ParentsToDerived[*SI])
- populateWorklistFromSymbol(SD);
-
+ for (auto SI = V.symbol_begin(), SE = V.symbol_end(); SI!=SE; ++SI)
SymReaper.markLive(*SI);
- }
}
-void RemoveDeadBindingsWorker::populateWorklistFromSymbol(SymbolRef S) {
- if (const auto *SD = dyn_cast<SymbolData>(S)) {
- if (Loc::isLocType(SD->getType()) && !SymReaper.isLive(SD)) {
- const SymbolicRegion *SR = RM.getRegionManager().getSymbolicRegion(SD);
-
- if (B.contains(SR))
- AddToWorkList(SR);
+bool RemoveDeadBindingsWorker::UpdatePostponed() {
+ // See if any postponed SymbolicRegions are actually live now, after
+ // having done a scan.
+ bool Changed = false;
- const SymbolicRegion *SHR =
- RM.getRegionManager().getSymbolicHeapRegion(SD);
- if (B.contains(SHR))
- AddToWorkList(SHR);
+ for (auto I = Postponed.begin(), E = Postponed.end(); I != E; ++I) {
+ if (const SymbolicRegion *SR = *I) {
+ if (SymReaper.isLive(SR->getSymbol())) {
+ Changed |= AddToWorkList(SR);
+ *I = nullptr;
+ }
}
}
+
+ return Changed;
}
StoreRef RegionStoreManager::removeDeadBindings(Store store,
@@ -2560,7 +2608,7 @@ StoreRef RegionStoreManager::removeDeadBindings(Store store,
W.AddToWorkList(*I);
}
- W.RunWorkList();
+ do W.RunWorkList(); while (W.UpdatePostponed());
// We have now scanned the store, marking reachable regions and symbols
// as live. We now remove all the regions that are dead from the store
@@ -2581,11 +2629,18 @@ StoreRef RegionStoreManager::removeDeadBindings(Store store,
// Utility methods.
//===----------------------------------------------------------------------===//
-void RegionStoreManager::print(Store store, raw_ostream &OS,
- const char* nl) {
- RegionBindingsRef B = getRegionBindings(store);
- OS << "Store (direct and default bindings), "
- << B.asStore()
- << " :" << nl;
- B.dump(OS, nl);
+void RegionStoreManager::printJson(raw_ostream &Out, Store S, const char *NL,
+ unsigned int Space, bool IsDot) const {
+ RegionBindingsRef Bindings = getRegionBindings(S);
+
+ Indent(Out, Space, IsDot) << "\"store\": ";
+
+ if (Bindings.isEmpty()) {
+ Out << "null," << NL;
+ return;
+ }
+
+ Out << "{ \"pointer\": \"" << Bindings.asStore() << "\", \"items\": [" << NL;
+ Bindings.printJson(Out, NL, Space + 1, IsDot);
+ Indent(Out, Space, IsDot) << "]}," << NL;
}
diff --git a/lib/StaticAnalyzer/Core/SMTConstraintManager.cpp b/lib/StaticAnalyzer/Core/SMTConstraintManager.cpp
new file mode 100644
index 000000000000..d5c14351d330
--- /dev/null
+++ b/lib/StaticAnalyzer/Core/SMTConstraintManager.cpp
@@ -0,0 +1,18 @@
+//== SMTConstraintManager.cpp -----------------------------------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/SMTConstraintManager.h"
+
+using namespace clang;
+using namespace ento;
+
+std::unique_ptr<ConstraintManager>
+ento::CreateZ3ConstraintManager(ProgramStateManager &StMgr, SubEngine *Eng) {
+ return llvm::make_unique<SMTConstraintManager>(Eng, StMgr.getSValBuilder());
+}
diff --git a/lib/StaticAnalyzer/Core/SValBuilder.cpp b/lib/StaticAnalyzer/Core/SValBuilder.cpp
index 6c0d487c8a87..3a5841137e1a 100644
--- a/lib/StaticAnalyzer/Core/SValBuilder.cpp
+++ b/lib/StaticAnalyzer/Core/SValBuilder.cpp
@@ -1,9 +1,8 @@
//===- SValBuilder.cpp - Basic class for all SValBuilder implementations --===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/StaticAnalyzer/Core/SVals.cpp b/lib/StaticAnalyzer/Core/SVals.cpp
index 933c5c330072..9b5de6c3eb92 100644
--- a/lib/StaticAnalyzer/Core/SVals.cpp
+++ b/lib/StaticAnalyzer/Core/SVals.cpp
@@ -1,9 +1,8 @@
-//===- RValues.cpp - Abstract RValues for Path-Sens. Value Tracking -------===//
+//===-- SVals.cpp - Abstract RValues for Path-Sens. Value Tracking --------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -17,6 +16,7 @@
#include "clang/AST/DeclCXX.h"
#include "clang/AST/Expr.h"
#include "clang/AST/Type.h"
+#include "clang/Basic/JsonSupport.h"
#include "clang/Basic/LLVM.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
@@ -284,6 +284,15 @@ SVal loc::ConcreteInt::evalBinOp(BasicValueFactory& BasicVals,
LLVM_DUMP_METHOD void SVal::dump() const { dumpToStream(llvm::errs()); }
+void SVal::printJson(raw_ostream &Out, bool AddQuotes) const {
+ std::string Buf;
+ llvm::raw_string_ostream TempOut(Buf);
+
+ dumpToStream(TempOut);
+
+ Out << JsonFormat(TempOut.str(), AddQuotes);
+}
+
void SVal::dumpToStream(raw_ostream &os) const {
switch (getBaseKind()) {
case UnknownValKind:
diff --git a/lib/StaticAnalyzer/Core/SarifDiagnostics.cpp b/lib/StaticAnalyzer/Core/SarifDiagnostics.cpp
index fecbc0001079..d1faf3f4dea9 100644
--- a/lib/StaticAnalyzer/Core/SarifDiagnostics.cpp
+++ b/lib/StaticAnalyzer/Core/SarifDiagnostics.cpp
@@ -1,9 +1,8 @@
//===--- SarifDiagnostics.cpp - Sarif Diagnostics for Paths -----*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -192,15 +191,16 @@ static json::Object createLocation(json::Object &&PhysicalLocation,
static Importance calculateImportance(const PathDiagnosticPiece &Piece) {
switch (Piece.getKind()) {
- case PathDiagnosticPiece::Kind::Call:
- case PathDiagnosticPiece::Kind::Macro:
- case PathDiagnosticPiece::Kind::Note:
+ case PathDiagnosticPiece::Call:
+ case PathDiagnosticPiece::Macro:
+ case PathDiagnosticPiece::Note:
+ case PathDiagnosticPiece::PopUp:
// FIXME: What should be reported here?
break;
- case PathDiagnosticPiece::Kind::Event:
+ case PathDiagnosticPiece::Event:
return Piece.getTagStr() == "ConditionBRVisitor" ? Importance::Important
: Importance::Essential;
- case PathDiagnosticPiece::Kind::ControlFlow:
+ case PathDiagnosticPiece::ControlFlow:
return Importance::Unimportant;
}
return Importance::Unimportant;
@@ -257,7 +257,7 @@ static json::Object createResult(const PathDiagnostic &Diag, json::Array &Files,
static StringRef getRuleDescription(StringRef CheckName) {
return llvm::StringSwitch<StringRef>(CheckName)
#define GET_CHECKERS
-#define CHECKER(FULLNAME, CLASS, HELPTEXT, DOC_URI) \
+#define CHECKER(FULLNAME, CLASS, HELPTEXT, DOC_URI, IS_HIDDEN) \
.Case(FULLNAME, HELPTEXT)
#include "clang/StaticAnalyzer/Checkers/Checkers.inc"
#undef CHECKER
@@ -268,7 +268,7 @@ static StringRef getRuleDescription(StringRef CheckName) {
static StringRef getRuleHelpURIStr(StringRef CheckName) {
return llvm::StringSwitch<StringRef>(CheckName)
#define GET_CHECKERS
-#define CHECKER(FULLNAME, CLASS, HELPTEXT, DOC_URI) \
+#define CHECKER(FULLNAME, CLASS, HELPTEXT, DOC_URI, IS_HIDDEN) \
.Case(FULLNAME, DOC_URI)
#include "clang/StaticAnalyzer/Checkers/Checkers.inc"
#undef CHECKER
@@ -345,5 +345,5 @@ void SarifDiagnostics::FlushDiagnosticsImpl(
"http://json.schemastore.org/sarif-2.0.0-csd.2.beta.2018-11-28"},
{"version", "2.0.0-csd.2.beta.2018-11-28"},
{"runs", json::Array{createRun(Diags)}}};
- OS << llvm::formatv("{0:2}", json::Value(std::move(Sarif)));
+ OS << llvm::formatv("{0:2}\n", json::Value(std::move(Sarif)));
}
diff --git a/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp b/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp
index adb40178f5b1..85f60231a276 100644
--- a/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp
+++ b/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp
@@ -1,9 +1,8 @@
//== SimpleConstraintManager.cpp --------------------------------*- C++ -*--==//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp b/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
index fc57cecac9cb..84c52f53ca5e 100644
--- a/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
+++ b/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
@@ -1,9 +1,8 @@
// SimpleSValBuilder.cpp - A basic SValBuilder -----------------------*- C++ -*-
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -526,7 +525,7 @@ SVal SimpleSValBuilder::evalBinOpNN(ProgramStateRef state,
case BO_Sub:
if (resultTy->isIntegralOrEnumerationType())
return makeIntVal(0, resultTy);
- return evalCastFromNonLoc(makeIntVal(0, /*Unsigned=*/false), resultTy);
+ return evalCastFromNonLoc(makeIntVal(0, /*isUnsigned=*/false), resultTy);
case BO_Or:
case BO_And:
return evalCastFromNonLoc(lhs, resultTy);
@@ -572,7 +571,15 @@ SVal SimpleSValBuilder::evalBinOpNN(ProgramStateRef state,
// add 1 to a LocAsInteger, we'd better unpack the Loc and add to it,
// then pack it back into a LocAsInteger.
llvm::APSInt i = rhs.castAs<nonloc::ConcreteInt>().getValue();
- BasicVals.getAPSIntType(Context.VoidPtrTy).apply(i);
+ // If the region has a symbolic base, pay attention to the type; it
+ // might be coming from a non-default address space. For non-symbolic
+ // regions it doesn't matter that much because such comparisons would
+ // most likely evaluate to concrete false anyway. FIXME: We might
+ // still need to handle the non-comparison case.
+ if (SymbolRef lSym = lhs.getAsLocSymbol(true))
+ BasicVals.getAPSIntType(lSym->getType()).apply(i);
+ else
+ BasicVals.getAPSIntType(Context.VoidPtrTy).apply(i);
return evalBinOpLL(state, op, lhsL, makeLoc(i), resultTy);
}
default:
diff --git a/lib/StaticAnalyzer/Core/Store.cpp b/lib/StaticAnalyzer/Core/Store.cpp
index 4fa937d9658d..3cf616161c66 100644
--- a/lib/StaticAnalyzer/Core/Store.cpp
+++ b/lib/StaticAnalyzer/Core/Store.cpp
@@ -1,9 +1,8 @@
//===- Store.cpp - Interface for maps from Locations to Values ------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/StaticAnalyzer/Core/SubEngine.cpp b/lib/StaticAnalyzer/Core/SubEngine.cpp
index 350f4b8bb3a2..d7ddd9cf4610 100644
--- a/lib/StaticAnalyzer/Core/SubEngine.cpp
+++ b/lib/StaticAnalyzer/Core/SubEngine.cpp
@@ -1,9 +1,8 @@
//== SubEngine.cpp - Interface of the subengine of CoreEngine ------*- C++ -*-//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/StaticAnalyzer/Core/SymbolManager.cpp b/lib/StaticAnalyzer/Core/SymbolManager.cpp
index 66273f099a38..675209f6fd7e 100644
--- a/lib/StaticAnalyzer/Core/SymbolManager.cpp
+++ b/lib/StaticAnalyzer/Core/SymbolManager.cpp
@@ -1,9 +1,8 @@
//===- SymbolManager.h - Management of Symbolic Values --------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -405,7 +404,7 @@ void SymbolReaper::markLive(SymbolRef sym) {
}
void SymbolReaper::markLive(const MemRegion *region) {
- RegionRoots.insert(region);
+ RegionRoots.insert(region->getBaseRegion());
markElementIndicesLive(region);
}
@@ -426,11 +425,15 @@ void SymbolReaper::markInUse(SymbolRef sym) {
}
bool SymbolReaper::isLiveRegion(const MemRegion *MR) {
+ // TODO: For now, liveness of a memory region is equivalent to liveness of its
+ // base region. In fact we can do a bit better: say, if a particular FieldDecl
+ // is not used later in the path, we can diagnose a leak of a value within
+ // that field earlier than, say, the variable that contains the field dies.
+ MR = MR->getBaseRegion();
+
if (RegionRoots.count(MR))
return true;
- MR = MR->getBaseRegion();
-
if (const auto *SR = dyn_cast<SymbolicRegion>(MR))
return isLive(SR->getSymbol());
diff --git a/lib/StaticAnalyzer/Core/TaintManager.cpp b/lib/StaticAnalyzer/Core/TaintManager.cpp
deleted file mode 100644
index c34b0ca1839d..000000000000
--- a/lib/StaticAnalyzer/Core/TaintManager.cpp
+++ /dev/null
@@ -1,23 +0,0 @@
-//== TaintManager.cpp ------------------------------------------ -*- C++ -*--=//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "clang/StaticAnalyzer/Core/PathSensitive/TaintManager.h"
-
-using namespace clang;
-using namespace ento;
-
-void *ProgramStateTrait<TaintMap>::GDMIndex() {
- static int index = 0;
- return &index;
-}
-
-void *ProgramStateTrait<DerivedSymTaint>::GDMIndex() {
- static int index;
- return &index;
-}
diff --git a/lib/StaticAnalyzer/Core/WorkList.cpp b/lib/StaticAnalyzer/Core/WorkList.cpp
index e705393cb83a..129d1720395e 100644
--- a/lib/StaticAnalyzer/Core/WorkList.cpp
+++ b/lib/StaticAnalyzer/Core/WorkList.cpp
@@ -1,9 +1,8 @@
//===- WorkList.cpp - Analyzer work-list implementation--------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/StaticAnalyzer/Core/Z3ConstraintManager.cpp b/lib/StaticAnalyzer/Core/Z3ConstraintManager.cpp
deleted file mode 100644
index c4729f969f33..000000000000
--- a/lib/StaticAnalyzer/Core/Z3ConstraintManager.cpp
+++ /dev/null
@@ -1,841 +0,0 @@
-//== Z3ConstraintManager.cpp --------------------------------*- C++ -*--==//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "clang/Basic/TargetInfo.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/SMTConstraintManager.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/SMTConv.h"
-
-#include "clang/Config/config.h"
-
-using namespace clang;
-using namespace ento;
-
-#if CLANG_ANALYZER_WITH_Z3
-
-#include <z3.h>
-
-namespace {
-
-/// Configuration class for Z3
-class Z3Config {
- friend class Z3Context;
-
- Z3_config Config;
-
-public:
- Z3Config() : Config(Z3_mk_config()) {
- // Enable model finding
- Z3_set_param_value(Config, "model", "true");
- // Disable proof generation
- Z3_set_param_value(Config, "proof", "false");
- // Set timeout to 15000ms = 15s
- Z3_set_param_value(Config, "timeout", "15000");
- }
-
- ~Z3Config() { Z3_del_config(Config); }
-}; // end class Z3Config
-
-// Function used to report errors
-void Z3ErrorHandler(Z3_context Context, Z3_error_code Error) {
- llvm::report_fatal_error("Z3 error: " +
- llvm::Twine(Z3_get_error_msg(Context, Error)));
-}
-
-/// Wrapper for Z3 context
-class Z3Context {
-public:
- Z3_context Context;
-
- Z3Context() {
- Context = Z3_mk_context_rc(Z3Config().Config);
- // The error function is set here because the context is the first object
- // created by the backend
- Z3_set_error_handler(Context, Z3ErrorHandler);
- }
-
- virtual ~Z3Context() {
- Z3_del_context(Context);
- Context = nullptr;
- }
-}; // end class Z3Context
-
-/// Wrapper for Z3 Sort
-class Z3Sort : public SMTSort {
- friend class Z3Solver;
-
- Z3Context &Context;
-
- Z3_sort Sort;
-
-public:
- /// Default constructor, mainly used by make_shared
- Z3Sort(Z3Context &C, Z3_sort ZS) : Context(C), Sort(ZS) {
- Z3_inc_ref(Context.Context, reinterpret_cast<Z3_ast>(Sort));
- }
-
- /// Override implicit copy constructor for correct reference counting.
- Z3Sort(const Z3Sort &Other) : Context(Other.Context), Sort(Other.Sort) {
- Z3_inc_ref(Context.Context, reinterpret_cast<Z3_ast>(Sort));
- }
-
- /// Override implicit copy assignment constructor for correct reference
- /// counting.
- Z3Sort &operator=(const Z3Sort &Other) {
- Z3_inc_ref(Context.Context, reinterpret_cast<Z3_ast>(Other.Sort));
- Z3_dec_ref(Context.Context, reinterpret_cast<Z3_ast>(Sort));
- Sort = Other.Sort;
- return *this;
- }
-
- Z3Sort(Z3Sort &&Other) = delete;
- Z3Sort &operator=(Z3Sort &&Other) = delete;
-
- ~Z3Sort() {
- if (Sort)
- Z3_dec_ref(Context.Context, reinterpret_cast<Z3_ast>(Sort));
- }
-
- bool isBitvectorSortImpl() const override {
- return (Z3_get_sort_kind(Context.Context, Sort) == Z3_BV_SORT);
- }
-
- bool isFloatSortImpl() const override {
- return (Z3_get_sort_kind(Context.Context, Sort) == Z3_FLOATING_POINT_SORT);
- }
-
- bool isBooleanSortImpl() const override {
- return (Z3_get_sort_kind(Context.Context, Sort) == Z3_BOOL_SORT);
- }
-
- unsigned getBitvectorSortSizeImpl() const override {
- return Z3_get_bv_sort_size(Context.Context, Sort);
- }
-
- unsigned getFloatSortSizeImpl() const override {
- return Z3_fpa_get_ebits(Context.Context, Sort) +
- Z3_fpa_get_sbits(Context.Context, Sort);
- }
-
- bool equal_to(SMTSort const &Other) const override {
- return Z3_is_eq_sort(Context.Context, Sort,
- static_cast<const Z3Sort &>(Other).Sort);
- }
-
- void print(raw_ostream &OS) const override {
- OS << Z3_sort_to_string(Context.Context, Sort);
- }
-}; // end class Z3Sort
-
-static const Z3Sort &toZ3Sort(const SMTSort &S) {
- return static_cast<const Z3Sort &>(S);
-}
-
-class Z3Expr : public SMTExpr {
- friend class Z3Solver;
-
- Z3Context &Context;
-
- Z3_ast AST;
-
-public:
- Z3Expr(Z3Context &C, Z3_ast ZA) : SMTExpr(), Context(C), AST(ZA) {
- Z3_inc_ref(Context.Context, AST);
- }
-
- /// Override implicit copy constructor for correct reference counting.
- Z3Expr(const Z3Expr &Copy) : SMTExpr(), Context(Copy.Context), AST(Copy.AST) {
- Z3_inc_ref(Context.Context, AST);
- }
-
- /// Override implicit copy assignment constructor for correct reference
- /// counting.
- Z3Expr &operator=(const Z3Expr &Other) {
- Z3_inc_ref(Context.Context, Other.AST);
- Z3_dec_ref(Context.Context, AST);
- AST = Other.AST;
- return *this;
- }
-
- Z3Expr(Z3Expr &&Other) = delete;
- Z3Expr &operator=(Z3Expr &&Other) = delete;
-
- ~Z3Expr() {
- if (AST)
- Z3_dec_ref(Context.Context, AST);
- }
-
- void Profile(llvm::FoldingSetNodeID &ID) const override {
- ID.AddInteger(Z3_get_ast_hash(Context.Context, AST));
- }
-
- /// Comparison of AST equality, not model equivalence.
- bool equal_to(SMTExpr const &Other) const override {
- assert(Z3_is_eq_sort(Context.Context, Z3_get_sort(Context.Context, AST),
- Z3_get_sort(Context.Context,
- static_cast<const Z3Expr &>(Other).AST)) &&
- "AST's must have the same sort");
- return Z3_is_eq_ast(Context.Context, AST,
- static_cast<const Z3Expr &>(Other).AST);
- }
-
- void print(raw_ostream &OS) const override {
- OS << Z3_ast_to_string(Context.Context, AST);
- }
-}; // end class Z3Expr
-
-static const Z3Expr &toZ3Expr(const SMTExpr &E) {
- return static_cast<const Z3Expr &>(E);
-}
-
-class Z3Model {
- friend class Z3Solver;
-
- Z3Context &Context;
-
- Z3_model Model;
-
-public:
- Z3Model(Z3Context &C, Z3_model ZM) : Context(C), Model(ZM) {
- Z3_model_inc_ref(Context.Context, Model);
- }
-
- Z3Model(const Z3Model &Other) = delete;
- Z3Model(Z3Model &&Other) = delete;
- Z3Model &operator=(Z3Model &Other) = delete;
- Z3Model &operator=(Z3Model &&Other) = delete;
-
- ~Z3Model() {
- if (Model)
- Z3_model_dec_ref(Context.Context, Model);
- }
-
- void print(raw_ostream &OS) const {
- OS << Z3_model_to_string(Context.Context, Model);
- }
-
- LLVM_DUMP_METHOD void dump() const { print(llvm::errs()); }
-}; // end class Z3Model
-
-/// Get the corresponding IEEE floating-point type for a given bitwidth.
-static const llvm::fltSemantics &getFloatSemantics(unsigned BitWidth) {
- switch (BitWidth) {
- default:
- llvm_unreachable("Unsupported floating-point semantics!");
- break;
- case 16:
- return llvm::APFloat::IEEEhalf();
- case 32:
- return llvm::APFloat::IEEEsingle();
- case 64:
- return llvm::APFloat::IEEEdouble();
- case 128:
- return llvm::APFloat::IEEEquad();
- }
-}
-
-// Determine whether two float semantics are equivalent
-static bool areEquivalent(const llvm::fltSemantics &LHS,
- const llvm::fltSemantics &RHS) {
- return (llvm::APFloat::semanticsPrecision(LHS) ==
- llvm::APFloat::semanticsPrecision(RHS)) &&
- (llvm::APFloat::semanticsMinExponent(LHS) ==
- llvm::APFloat::semanticsMinExponent(RHS)) &&
- (llvm::APFloat::semanticsMaxExponent(LHS) ==
- llvm::APFloat::semanticsMaxExponent(RHS)) &&
- (llvm::APFloat::semanticsSizeInBits(LHS) ==
- llvm::APFloat::semanticsSizeInBits(RHS));
-}
-
-} // end anonymous namespace
-
-typedef llvm::ImmutableSet<std::pair<SymbolRef, Z3Expr>> ConstraintZ3Ty;
-REGISTER_TRAIT_WITH_PROGRAMSTATE(ConstraintZ3, ConstraintZ3Ty)
-
-namespace {
-
-class Z3Solver : public SMTSolver {
- friend class Z3ConstraintManager;
-
- Z3Context Context;
-
- Z3_solver Solver;
-
-public:
- Z3Solver() : Solver(Z3_mk_simple_solver(Context.Context)) {
- Z3_solver_inc_ref(Context.Context, Solver);
- }
-
- Z3Solver(const Z3Solver &Other) = delete;
- Z3Solver(Z3Solver &&Other) = delete;
- Z3Solver &operator=(Z3Solver &Other) = delete;
- Z3Solver &operator=(Z3Solver &&Other) = delete;
-
- ~Z3Solver() {
- if (Solver)
- Z3_solver_dec_ref(Context.Context, Solver);
- }
-
- void addConstraint(const SMTExprRef &Exp) const override {
- Z3_solver_assert(Context.Context, Solver, toZ3Expr(*Exp).AST);
- }
-
- SMTSortRef getBoolSort() override {
- return std::make_shared<Z3Sort>(Context, Z3_mk_bool_sort(Context.Context));
- }
-
- SMTSortRef getBitvectorSort(unsigned BitWidth) override {
- return std::make_shared<Z3Sort>(Context,
- Z3_mk_bv_sort(Context.Context, BitWidth));
- }
-
- SMTSortRef getSort(const SMTExprRef &Exp) override {
- return std::make_shared<Z3Sort>(
- Context, Z3_get_sort(Context.Context, toZ3Expr(*Exp).AST));
- }
-
- SMTSortRef getFloat16Sort() override {
- return std::make_shared<Z3Sort>(Context,
- Z3_mk_fpa_sort_16(Context.Context));
- }
-
- SMTSortRef getFloat32Sort() override {
- return std::make_shared<Z3Sort>(Context,
- Z3_mk_fpa_sort_32(Context.Context));
- }
-
- SMTSortRef getFloat64Sort() override {
- return std::make_shared<Z3Sort>(Context,
- Z3_mk_fpa_sort_64(Context.Context));
- }
-
- SMTSortRef getFloat128Sort() override {
- return std::make_shared<Z3Sort>(Context,
- Z3_mk_fpa_sort_128(Context.Context));
- }
-
- SMTExprRef newExprRef(const SMTExpr &E) const override {
- return std::make_shared<Z3Expr>(toZ3Expr(E));
- }
-
- SMTExprRef mkBVNeg(const SMTExprRef &Exp) override {
- return newExprRef(
- Z3Expr(Context, Z3_mk_bvneg(Context.Context, toZ3Expr(*Exp).AST)));
- }
-
- SMTExprRef mkBVNot(const SMTExprRef &Exp) override {
- return newExprRef(
- Z3Expr(Context, Z3_mk_bvnot(Context.Context, toZ3Expr(*Exp).AST)));
- }
-
- SMTExprRef mkNot(const SMTExprRef &Exp) override {
- return newExprRef(
- Z3Expr(Context, Z3_mk_not(Context.Context, toZ3Expr(*Exp).AST)));
- }
-
- SMTExprRef mkBVAdd(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
- return newExprRef(
- Z3Expr(Context, Z3_mk_bvadd(Context.Context, toZ3Expr(*LHS).AST,
- toZ3Expr(*RHS).AST)));
- }
-
- SMTExprRef mkBVSub(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
- return newExprRef(
- Z3Expr(Context, Z3_mk_bvsub(Context.Context, toZ3Expr(*LHS).AST,
- toZ3Expr(*RHS).AST)));
- }
-
- SMTExprRef mkBVMul(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
- return newExprRef(
- Z3Expr(Context, Z3_mk_bvmul(Context.Context, toZ3Expr(*LHS).AST,
- toZ3Expr(*RHS).AST)));
- }
-
- SMTExprRef mkBVSRem(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
- return newExprRef(
- Z3Expr(Context, Z3_mk_bvsrem(Context.Context, toZ3Expr(*LHS).AST,
- toZ3Expr(*RHS).AST)));
- }
-
- SMTExprRef mkBVURem(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
- return newExprRef(
- Z3Expr(Context, Z3_mk_bvurem(Context.Context, toZ3Expr(*LHS).AST,
- toZ3Expr(*RHS).AST)));
- }
-
- SMTExprRef mkBVSDiv(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
- return newExprRef(
- Z3Expr(Context, Z3_mk_bvsdiv(Context.Context, toZ3Expr(*LHS).AST,
- toZ3Expr(*RHS).AST)));
- }
-
- SMTExprRef mkBVUDiv(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
- return newExprRef(
- Z3Expr(Context, Z3_mk_bvudiv(Context.Context, toZ3Expr(*LHS).AST,
- toZ3Expr(*RHS).AST)));
- }
-
- SMTExprRef mkBVShl(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
- return newExprRef(
- Z3Expr(Context, Z3_mk_bvshl(Context.Context, toZ3Expr(*LHS).AST,
- toZ3Expr(*RHS).AST)));
- }
-
- SMTExprRef mkBVAshr(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
- return newExprRef(
- Z3Expr(Context, Z3_mk_bvashr(Context.Context, toZ3Expr(*LHS).AST,
- toZ3Expr(*RHS).AST)));
- }
-
- SMTExprRef mkBVLshr(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
- return newExprRef(
- Z3Expr(Context, Z3_mk_bvlshr(Context.Context, toZ3Expr(*LHS).AST,
- toZ3Expr(*RHS).AST)));
- }
-
- SMTExprRef mkBVXor(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
- return newExprRef(
- Z3Expr(Context, Z3_mk_bvxor(Context.Context, toZ3Expr(*LHS).AST,
- toZ3Expr(*RHS).AST)));
- }
-
- SMTExprRef mkBVOr(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
- return newExprRef(
- Z3Expr(Context, Z3_mk_bvor(Context.Context, toZ3Expr(*LHS).AST,
- toZ3Expr(*RHS).AST)));
- }
-
- SMTExprRef mkBVAnd(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
- return newExprRef(
- Z3Expr(Context, Z3_mk_bvand(Context.Context, toZ3Expr(*LHS).AST,
- toZ3Expr(*RHS).AST)));
- }
-
- SMTExprRef mkBVUlt(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
- return newExprRef(
- Z3Expr(Context, Z3_mk_bvult(Context.Context, toZ3Expr(*LHS).AST,
- toZ3Expr(*RHS).AST)));
- }
-
- SMTExprRef mkBVSlt(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
- return newExprRef(
- Z3Expr(Context, Z3_mk_bvslt(Context.Context, toZ3Expr(*LHS).AST,
- toZ3Expr(*RHS).AST)));
- }
-
- SMTExprRef mkBVUgt(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
- return newExprRef(
- Z3Expr(Context, Z3_mk_bvugt(Context.Context, toZ3Expr(*LHS).AST,
- toZ3Expr(*RHS).AST)));
- }
-
- SMTExprRef mkBVSgt(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
- return newExprRef(
- Z3Expr(Context, Z3_mk_bvsgt(Context.Context, toZ3Expr(*LHS).AST,
- toZ3Expr(*RHS).AST)));
- }
-
- SMTExprRef mkBVUle(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
- return newExprRef(
- Z3Expr(Context, Z3_mk_bvule(Context.Context, toZ3Expr(*LHS).AST,
- toZ3Expr(*RHS).AST)));
- }
-
- SMTExprRef mkBVSle(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
- return newExprRef(
- Z3Expr(Context, Z3_mk_bvsle(Context.Context, toZ3Expr(*LHS).AST,
- toZ3Expr(*RHS).AST)));
- }
-
- SMTExprRef mkBVUge(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
- return newExprRef(
- Z3Expr(Context, Z3_mk_bvuge(Context.Context, toZ3Expr(*LHS).AST,
- toZ3Expr(*RHS).AST)));
- }
-
- SMTExprRef mkBVSge(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
- return newExprRef(
- Z3Expr(Context, Z3_mk_bvsge(Context.Context, toZ3Expr(*LHS).AST,
- toZ3Expr(*RHS).AST)));
- }
-
- SMTExprRef mkAnd(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
- Z3_ast Args[2] = {toZ3Expr(*LHS).AST, toZ3Expr(*RHS).AST};
- return newExprRef(Z3Expr(Context, Z3_mk_and(Context.Context, 2, Args)));
- }
-
- SMTExprRef mkOr(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
- Z3_ast Args[2] = {toZ3Expr(*LHS).AST, toZ3Expr(*RHS).AST};
- return newExprRef(Z3Expr(Context, Z3_mk_or(Context.Context, 2, Args)));
- }
-
- SMTExprRef mkEqual(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
- return newExprRef(
- Z3Expr(Context, Z3_mk_eq(Context.Context, toZ3Expr(*LHS).AST,
- toZ3Expr(*RHS).AST)));
- }
-
- SMTExprRef mkFPNeg(const SMTExprRef &Exp) override {
- return newExprRef(
- Z3Expr(Context, Z3_mk_fpa_neg(Context.Context, toZ3Expr(*Exp).AST)));
- }
-
- SMTExprRef mkFPIsInfinite(const SMTExprRef &Exp) override {
- return newExprRef(Z3Expr(
- Context, Z3_mk_fpa_is_infinite(Context.Context, toZ3Expr(*Exp).AST)));
- }
-
- SMTExprRef mkFPIsNaN(const SMTExprRef &Exp) override {
- return newExprRef(
- Z3Expr(Context, Z3_mk_fpa_is_nan(Context.Context, toZ3Expr(*Exp).AST)));
- }
-
- SMTExprRef mkFPIsNormal(const SMTExprRef &Exp) override {
- return newExprRef(Z3Expr(
- Context, Z3_mk_fpa_is_normal(Context.Context, toZ3Expr(*Exp).AST)));
- }
-
- SMTExprRef mkFPIsZero(const SMTExprRef &Exp) override {
- return newExprRef(Z3Expr(
- Context, Z3_mk_fpa_is_zero(Context.Context, toZ3Expr(*Exp).AST)));
- }
-
- SMTExprRef mkFPMul(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
- SMTExprRef RoundingMode = getFloatRoundingMode();
- return newExprRef(
- Z3Expr(Context,
- Z3_mk_fpa_mul(Context.Context, toZ3Expr(*LHS).AST,
- toZ3Expr(*RHS).AST, toZ3Expr(*RoundingMode).AST)));
- }
-
- SMTExprRef mkFPDiv(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
- SMTExprRef RoundingMode = getFloatRoundingMode();
- return newExprRef(
- Z3Expr(Context,
- Z3_mk_fpa_div(Context.Context, toZ3Expr(*LHS).AST,
- toZ3Expr(*RHS).AST, toZ3Expr(*RoundingMode).AST)));
- }
-
- SMTExprRef mkFPRem(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
- return newExprRef(
- Z3Expr(Context, Z3_mk_fpa_rem(Context.Context, toZ3Expr(*LHS).AST,
- toZ3Expr(*RHS).AST)));
- }
-
- SMTExprRef mkFPAdd(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
- SMTExprRef RoundingMode = getFloatRoundingMode();
- return newExprRef(
- Z3Expr(Context,
- Z3_mk_fpa_add(Context.Context, toZ3Expr(*LHS).AST,
- toZ3Expr(*RHS).AST, toZ3Expr(*RoundingMode).AST)));
- }
-
- SMTExprRef mkFPSub(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
- SMTExprRef RoundingMode = getFloatRoundingMode();
- return newExprRef(
- Z3Expr(Context,
- Z3_mk_fpa_sub(Context.Context, toZ3Expr(*LHS).AST,
- toZ3Expr(*RHS).AST, toZ3Expr(*RoundingMode).AST)));
- }
-
- SMTExprRef mkFPLt(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
- return newExprRef(
- Z3Expr(Context, Z3_mk_fpa_lt(Context.Context, toZ3Expr(*LHS).AST,
- toZ3Expr(*RHS).AST)));
- }
-
- SMTExprRef mkFPGt(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
- return newExprRef(
- Z3Expr(Context, Z3_mk_fpa_gt(Context.Context, toZ3Expr(*LHS).AST,
- toZ3Expr(*RHS).AST)));
- }
-
- SMTExprRef mkFPLe(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
- return newExprRef(
- Z3Expr(Context, Z3_mk_fpa_leq(Context.Context, toZ3Expr(*LHS).AST,
- toZ3Expr(*RHS).AST)));
- }
-
- SMTExprRef mkFPGe(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
- return newExprRef(
- Z3Expr(Context, Z3_mk_fpa_geq(Context.Context, toZ3Expr(*LHS).AST,
- toZ3Expr(*RHS).AST)));
- }
-
- SMTExprRef mkFPEqual(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
- return newExprRef(
- Z3Expr(Context, Z3_mk_fpa_eq(Context.Context, toZ3Expr(*LHS).AST,
- toZ3Expr(*RHS).AST)));
- }
-
- SMTExprRef mkIte(const SMTExprRef &Cond, const SMTExprRef &T,
- const SMTExprRef &F) override {
- return newExprRef(
- Z3Expr(Context, Z3_mk_ite(Context.Context, toZ3Expr(*Cond).AST,
- toZ3Expr(*T).AST, toZ3Expr(*F).AST)));
- }
-
- SMTExprRef mkBVSignExt(unsigned i, const SMTExprRef &Exp) override {
- return newExprRef(Z3Expr(
- Context, Z3_mk_sign_ext(Context.Context, i, toZ3Expr(*Exp).AST)));
- }
-
- SMTExprRef mkBVZeroExt(unsigned i, const SMTExprRef &Exp) override {
- return newExprRef(Z3Expr(
- Context, Z3_mk_zero_ext(Context.Context, i, toZ3Expr(*Exp).AST)));
- }
-
- SMTExprRef mkBVExtract(unsigned High, unsigned Low,
- const SMTExprRef &Exp) override {
- return newExprRef(Z3Expr(Context, Z3_mk_extract(Context.Context, High, Low,
- toZ3Expr(*Exp).AST)));
- }
-
- SMTExprRef mkBVConcat(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
- return newExprRef(
- Z3Expr(Context, Z3_mk_concat(Context.Context, toZ3Expr(*LHS).AST,
- toZ3Expr(*RHS).AST)));
- }
-
- SMTExprRef mkFPtoFP(const SMTExprRef &From, const SMTSortRef &To) override {
- SMTExprRef RoundingMode = getFloatRoundingMode();
- return newExprRef(Z3Expr(
- Context,
- Z3_mk_fpa_to_fp_float(Context.Context, toZ3Expr(*RoundingMode).AST,
- toZ3Expr(*From).AST, toZ3Sort(*To).Sort)));
- }
-
- SMTExprRef mkSBVtoFP(const SMTExprRef &From, const SMTSortRef &To) override {
- SMTExprRef RoundingMode = getFloatRoundingMode();
- return newExprRef(Z3Expr(
- Context,
- Z3_mk_fpa_to_fp_signed(Context.Context, toZ3Expr(*RoundingMode).AST,
- toZ3Expr(*From).AST, toZ3Sort(*To).Sort)));
- }
-
- SMTExprRef mkUBVtoFP(const SMTExprRef &From, const SMTSortRef &To) override {
- SMTExprRef RoundingMode = getFloatRoundingMode();
- return newExprRef(Z3Expr(
- Context,
- Z3_mk_fpa_to_fp_unsigned(Context.Context, toZ3Expr(*RoundingMode).AST,
- toZ3Expr(*From).AST, toZ3Sort(*To).Sort)));
- }
-
- SMTExprRef mkFPtoSBV(const SMTExprRef &From, unsigned ToWidth) override {
- SMTExprRef RoundingMode = getFloatRoundingMode();
- return newExprRef(Z3Expr(
- Context, Z3_mk_fpa_to_sbv(Context.Context, toZ3Expr(*RoundingMode).AST,
- toZ3Expr(*From).AST, ToWidth)));
- }
-
- SMTExprRef mkFPtoUBV(const SMTExprRef &From, unsigned ToWidth) override {
- SMTExprRef RoundingMode = getFloatRoundingMode();
- return newExprRef(Z3Expr(
- Context, Z3_mk_fpa_to_ubv(Context.Context, toZ3Expr(*RoundingMode).AST,
- toZ3Expr(*From).AST, ToWidth)));
- }
-
- SMTExprRef mkBoolean(const bool b) override {
- return newExprRef(Z3Expr(Context, b ? Z3_mk_true(Context.Context)
- : Z3_mk_false(Context.Context)));
- }
-
- SMTExprRef mkBitvector(const llvm::APSInt Int, unsigned BitWidth) override {
- const SMTSortRef Sort = getBitvectorSort(BitWidth);
- return newExprRef(
- Z3Expr(Context, Z3_mk_numeral(Context.Context, Int.toString(10).c_str(),
- toZ3Sort(*Sort).Sort)));
- }
-
- SMTExprRef mkFloat(const llvm::APFloat Float) override {
- SMTSortRef Sort =
- getFloatSort(llvm::APFloat::semanticsSizeInBits(Float.getSemantics()));
-
- llvm::APSInt Int = llvm::APSInt(Float.bitcastToAPInt(), false);
- SMTExprRef Z3Int = mkBitvector(Int, Int.getBitWidth());
- return newExprRef(Z3Expr(
- Context, Z3_mk_fpa_to_fp_bv(Context.Context, toZ3Expr(*Z3Int).AST,
- toZ3Sort(*Sort).Sort)));
- }
-
- SMTExprRef mkSymbol(const char *Name, SMTSortRef Sort) override {
- return newExprRef(
- Z3Expr(Context, Z3_mk_const(Context.Context,
- Z3_mk_string_symbol(Context.Context, Name),
- toZ3Sort(*Sort).Sort)));
- }
-
- llvm::APSInt getBitvector(const SMTExprRef &Exp, unsigned BitWidth,
- bool isUnsigned) override {
- return llvm::APSInt(
- llvm::APInt(BitWidth,
- Z3_get_numeral_string(Context.Context, toZ3Expr(*Exp).AST),
- 10),
- isUnsigned);
- }
-
- bool getBoolean(const SMTExprRef &Exp) override {
- return Z3_get_bool_value(Context.Context, toZ3Expr(*Exp).AST) == Z3_L_TRUE;
- }
-
- SMTExprRef getFloatRoundingMode() override {
- // TODO: Don't assume nearest ties to even rounding mode
- return newExprRef(Z3Expr(Context, Z3_mk_fpa_rne(Context.Context)));
- }
-
- bool toAPFloat(const SMTSortRef &Sort, const SMTExprRef &AST,
- llvm::APFloat &Float, bool useSemantics) {
- assert(Sort->isFloatSort() && "Unsupported sort to floating-point!");
-
- llvm::APSInt Int(Sort->getFloatSortSize(), true);
- const llvm::fltSemantics &Semantics =
- getFloatSemantics(Sort->getFloatSortSize());
- SMTSortRef BVSort = getBitvectorSort(Sort->getFloatSortSize());
- if (!toAPSInt(BVSort, AST, Int, true)) {
- return false;
- }
-
- if (useSemantics && !areEquivalent(Float.getSemantics(), Semantics)) {
- assert(false && "Floating-point types don't match!");
- return false;
- }
-
- Float = llvm::APFloat(Semantics, Int);
- return true;
- }
-
- bool toAPSInt(const SMTSortRef &Sort, const SMTExprRef &AST,
- llvm::APSInt &Int, bool useSemantics) {
- if (Sort->isBitvectorSort()) {
- if (useSemantics && Int.getBitWidth() != Sort->getBitvectorSortSize()) {
- assert(false && "Bitvector types don't match!");
- return false;
- }
-
- // FIXME: This function is also used to retrieve floating-point values,
- // which can be 16, 32, 64 or 128 bits long. Bitvectors can be anything
- // between 1 and 64 bits long, which is the reason we have this weird
- // guard. In the future, we need proper calls in the backend to retrieve
- // floating-points and its special values (NaN, +/-infinity, +/-zero),
- // then we can drop this weird condition.
- if (Sort->getBitvectorSortSize() <= 64 ||
- Sort->getBitvectorSortSize() == 128) {
- Int = getBitvector(AST, Int.getBitWidth(), Int.isUnsigned());
- return true;
- }
-
- assert(false && "Bitwidth not supported!");
- return false;
- }
-
- if (Sort->isBooleanSort()) {
- if (useSemantics && Int.getBitWidth() < 1) {
- assert(false && "Boolean type doesn't match!");
- return false;
- }
-
- Int = llvm::APSInt(llvm::APInt(Int.getBitWidth(), getBoolean(AST)),
- Int.isUnsigned());
- return true;
- }
-
- llvm_unreachable("Unsupported sort to integer!");
- }
-
- bool getInterpretation(const SMTExprRef &Exp, llvm::APSInt &Int) override {
- Z3Model Model(Context, Z3_solver_get_model(Context.Context, Solver));
- Z3_func_decl Func = Z3_get_app_decl(
- Context.Context, Z3_to_app(Context.Context, toZ3Expr(*Exp).AST));
- if (Z3_model_has_interp(Context.Context, Model.Model, Func) != Z3_L_TRUE)
- return false;
-
- SMTExprRef Assign = newExprRef(
- Z3Expr(Context,
- Z3_model_get_const_interp(Context.Context, Model.Model, Func)));
- SMTSortRef Sort = getSort(Assign);
- return toAPSInt(Sort, Assign, Int, true);
- }
-
- bool getInterpretation(const SMTExprRef &Exp, llvm::APFloat &Float) override {
- Z3Model Model(Context, Z3_solver_get_model(Context.Context, Solver));
- Z3_func_decl Func = Z3_get_app_decl(
- Context.Context, Z3_to_app(Context.Context, toZ3Expr(*Exp).AST));
- if (Z3_model_has_interp(Context.Context, Model.Model, Func) != Z3_L_TRUE)
- return false;
-
- SMTExprRef Assign = newExprRef(
- Z3Expr(Context,
- Z3_model_get_const_interp(Context.Context, Model.Model, Func)));
- SMTSortRef Sort = getSort(Assign);
- return toAPFloat(Sort, Assign, Float, true);
- }
-
- Optional<bool> check() const override {
- Z3_lbool res = Z3_solver_check(Context.Context, Solver);
- if (res == Z3_L_TRUE)
- return true;
-
- if (res == Z3_L_FALSE)
- return false;
-
- return Optional<bool>();
- }
-
- void push() override { return Z3_solver_push(Context.Context, Solver); }
-
- void pop(unsigned NumStates = 1) override {
- assert(Z3_solver_get_num_scopes(Context.Context, Solver) >= NumStates);
- return Z3_solver_pop(Context.Context, Solver, NumStates);
- }
-
- bool isFPSupported() override { return true; }
-
- /// Reset the solver and remove all constraints.
- void reset() override { Z3_solver_reset(Context.Context, Solver); }
-
- void print(raw_ostream &OS) const override {
- OS << Z3_solver_to_string(Context.Context, Solver);
- }
-}; // end class Z3Solver
-
-class Z3ConstraintManager : public SMTConstraintManager<ConstraintZ3, Z3Expr> {
- SMTSolverRef Solver = CreateZ3Solver();
-
-public:
- Z3ConstraintManager(SubEngine *SE, SValBuilder &SB)
- : SMTConstraintManager(SE, SB, Solver) {}
-}; // end class Z3ConstraintManager
-
-} // end anonymous namespace
-
-#endif
-
-SMTSolverRef clang::ento::CreateZ3Solver() {
-#if CLANG_ANALYZER_WITH_Z3
- return llvm::make_unique<Z3Solver>();
-#else
- llvm::report_fatal_error("Clang was not compiled with Z3 support, rebuild "
- "with -DCLANG_ANALYZER_ENABLE_Z3_SOLVER=ON",
- false);
- return nullptr;
-#endif
-}
-
-std::unique_ptr<ConstraintManager>
-ento::CreateZ3ConstraintManager(ProgramStateManager &StMgr, SubEngine *Eng) {
-#if CLANG_ANALYZER_WITH_Z3
- return llvm::make_unique<Z3ConstraintManager>(Eng, StMgr.getSValBuilder());
-#else
- llvm::report_fatal_error("Clang was not compiled with Z3 support, rebuild "
- "with -DCLANG_ANALYZER_ENABLE_Z3_SOLVER=ON",
- false);
- return nullptr;
-#endif
-}
diff --git a/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp b/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
index d87937d9b63d..454b61fd51a1 100644
--- a/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
+++ b/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
@@ -1,9 +1,8 @@
//===--- AnalysisConsumer.cpp - ASTConsumer for running Analyses ----------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -84,10 +83,11 @@ void ento::createTextPathDiagnosticConsumer(AnalyzerOptions &AnalyzerOpts,
namespace {
class ClangDiagPathDiagConsumer : public PathDiagnosticConsumer {
DiagnosticsEngine &Diag;
- bool IncludePath;
+ bool IncludePath, ShouldEmitAsError;
+
public:
ClangDiagPathDiagConsumer(DiagnosticsEngine &Diag)
- : Diag(Diag), IncludePath(false) {}
+ : Diag(Diag), IncludePath(false), ShouldEmitAsError(false) {}
~ClangDiagPathDiagConsumer() override {}
StringRef getName() const override { return "ClangDiags"; }
@@ -102,9 +102,14 @@ public:
IncludePath = true;
}
+ void enableWerror() { ShouldEmitAsError = true; }
+
void FlushDiagnosticsImpl(std::vector<const PathDiagnostic *> &Diags,
FilesMade *filesMade) override {
- unsigned WarnID = Diag.getCustomDiagID(DiagnosticsEngine::Warning, "%0");
+ unsigned WarnID =
+ ShouldEmitAsError
+ ? Diag.getCustomDiagID(DiagnosticsEngine::Error, "%0")
+ : Diag.getCustomDiagID(DiagnosticsEngine::Warning, "%0");
unsigned NoteID = Diag.getCustomDiagID(DiagnosticsEngine::Note, "%0");
for (std::vector<const PathDiagnostic*>::iterator I = Diags.begin(),
@@ -191,7 +196,9 @@ public:
/// Time the analyzes time of each translation unit.
std::unique_ptr<llvm::TimerGroup> AnalyzerTimers;
- std::unique_ptr<llvm::Timer> TUTotalTimer;
+ std::unique_ptr<llvm::Timer> SyntaxCheckTimer;
+ std::unique_ptr<llvm::Timer> ExprEngineTimer;
+ std::unique_ptr<llvm::Timer> BugReporterTimer;
/// The information about analyzed functions shared throughout the
/// translation unit.
@@ -207,8 +214,13 @@ public:
if (Opts->PrintStats || Opts->ShouldSerializeStats) {
AnalyzerTimers = llvm::make_unique<llvm::TimerGroup>(
"analyzer", "Analyzer timers");
- TUTotalTimer = llvm::make_unique<llvm::Timer>(
- "time", "Analyzer total time", *AnalyzerTimers);
+ SyntaxCheckTimer = llvm::make_unique<llvm::Timer>(
+ "syntaxchecks", "Syntax-based analysis time", *AnalyzerTimers);
+ ExprEngineTimer = llvm::make_unique<llvm::Timer>(
+ "exprengine", "Path exploration time", *AnalyzerTimers);
+ BugReporterTimer = llvm::make_unique<llvm::Timer>(
+ "bugreporter", "Path-sensitive report post-processing time",
+ *AnalyzerTimers);
llvm::EnableStatistics(/* PrintOnExit= */ false);
}
}
@@ -226,6 +238,9 @@ public:
new ClangDiagPathDiagConsumer(PP.getDiagnostics());
PathConsumers.push_back(clangDiags);
+ if (Opts->AnalyzerWerror)
+ clangDiags->enableWerror();
+
if (Opts->AnalysisDiagOpt == PD_TEXT) {
clangDiags->enablePaths();
@@ -338,8 +353,42 @@ public:
/// Handle callbacks for arbitrary Decls.
bool VisitDecl(Decl *D) {
AnalysisMode Mode = getModeForDecl(D, RecVisitorMode);
- if (Mode & AM_Syntax)
+ if (Mode & AM_Syntax) {
+ if (SyntaxCheckTimer)
+ SyntaxCheckTimer->startTimer();
checkerMgr->runCheckersOnASTDecl(D, *Mgr, *RecVisitorBR);
+ if (SyntaxCheckTimer)
+ SyntaxCheckTimer->stopTimer();
+ }
+ return true;
+ }
+
+ bool VisitVarDecl(VarDecl *VD) {
+ if (!Opts->IsNaiveCTUEnabled)
+ return true;
+
+ if (VD->hasExternalStorage() || VD->isStaticDataMember()) {
+ if (!cross_tu::containsConst(VD, *Ctx))
+ return true;
+ } else {
+ // Cannot be initialized in another TU.
+ return true;
+ }
+
+ if (VD->getAnyInitializer())
+ return true;
+
+ llvm::Expected<const VarDecl *> CTUDeclOrError =
+ CTU.getCrossTUDefinition(VD, Opts->CTUDir, Opts->CTUIndexName,
+ Opts->DisplayCTUProgress);
+
+ if (!CTUDeclOrError) {
+ handleAllErrors(CTUDeclOrError.takeError(),
+ [&](const cross_tu::IndexError &IE) {
+ CTU.emitCrossTUDiagnostics(IE);
+ });
+ }
+
return true;
}
@@ -529,7 +578,11 @@ static bool isBisonFile(ASTContext &C) {
void AnalysisConsumer::runAnalysisOnTranslationUnit(ASTContext &C) {
BugReporter BR(*Mgr);
TranslationUnitDecl *TU = C.getTranslationUnitDecl();
+ if (SyntaxCheckTimer)
+ SyntaxCheckTimer->startTimer();
checkerMgr->runCheckersOnASTDecl(TU, *Mgr, BR);
+ if (SyntaxCheckTimer)
+ SyntaxCheckTimer->stopTimer();
// Run the AST-only checks using the order in which functions are defined.
// If inlining is not turned on, use the simplest function order for path
@@ -571,8 +624,6 @@ void AnalysisConsumer::HandleTranslationUnit(ASTContext &C) {
if (Diags.hasErrorOccurred() || Diags.hasFatalErrorOccurred())
return;
- if (TUTotalTimer) TUTotalTimer->startTimer();
-
if (isBisonFile(C)) {
reportAnalyzerProgress("Skipping bison-generated file\n");
} else if (Opts->DisableAllChecks) {
@@ -585,8 +636,6 @@ void AnalysisConsumer::HandleTranslationUnit(ASTContext &C) {
runAnalysisOnTranslationUnit(C);
}
- if (TUTotalTimer) TUTotalTimer->stopTimer();
-
// Count how many basic blocks we have not covered.
NumBlocksInAnalyzedFunctions = FunctionSummaries.getTotalNumBasicBlocks();
NumVisitedBlocksInAnalyzedFunctions =
@@ -710,8 +759,13 @@ void AnalysisConsumer::HandleCode(Decl *D, AnalysisMode Mode,
BugReporter BR(*Mgr);
- if (Mode & AM_Syntax)
+ if (Mode & AM_Syntax) {
+ if (SyntaxCheckTimer)
+ SyntaxCheckTimer->startTimer();
checkerMgr->runCheckersOnASTBody(D, *Mgr, BR);
+ if (SyntaxCheckTimer)
+ SyntaxCheckTimer->stopTimer();
+ }
if ((Mode & AM_Path) && checkerMgr->hasPathSensitiveCheckers()) {
RunPathSensitiveChecks(D, IMode, VisitedCallees);
if (IMode != ExprEngine::Inline_Minimal)
@@ -738,8 +792,12 @@ void AnalysisConsumer::RunPathSensitiveChecks(Decl *D,
ExprEngine Eng(CTU, *Mgr, VisitedCallees, &FunctionSummaries, IMode);
// Execute the worklist algorithm.
+ if (ExprEngineTimer)
+ ExprEngineTimer->startTimer();
Eng.ExecuteWorkList(Mgr->getAnalysisDeclContextManager().getStackFrame(D),
Mgr->options.MaxNodesPerTopLevelFunction);
+ if (ExprEngineTimer)
+ ExprEngineTimer->stopTimer();
if (!Mgr->options.DumpExplodedGraphTo.empty())
Eng.DumpGraph(Mgr->options.TrimGraph, Mgr->options.DumpExplodedGraphTo);
@@ -749,7 +807,11 @@ void AnalysisConsumer::RunPathSensitiveChecks(Decl *D,
Eng.ViewGraph(Mgr->options.TrimGraph);
// Display warnings.
+ if (BugReporterTimer)
+ BugReporterTimer->startTimer();
Eng.getBugReporter().FlushReports();
+ if (BugReporterTimer)
+ BugReporterTimer->stopTimer();
}
//===----------------------------------------------------------------------===//
diff --git a/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp b/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp
index 1c31c35b75e4..1e45ee96145a 100644
--- a/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp
+++ b/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp
@@ -1,9 +1,8 @@
//===--- CheckerRegistration.cpp - Registration for the Analyzer Checkers -===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -19,7 +18,6 @@
#include "clang/StaticAnalyzer/Frontend/CheckerRegistry.h"
#include "clang/StaticAnalyzer/Frontend/FrontendActions.h"
#include "llvm/ADT/SmallVector.h"
-#include "llvm/Support/FormattedStream.h"
#include "llvm/Support/raw_ostream.h"
#include <memory>
@@ -34,46 +32,52 @@ std::unique_ptr<CheckerManager> ento::createCheckerManager(
DiagnosticsEngine &diags) {
auto checkerMgr = llvm::make_unique<CheckerManager>(context, opts);
- CheckerRegistry allCheckers(plugins, diags);
-
- for (const auto &Fn : checkerRegistrationFns)
- Fn(allCheckers);
+ CheckerRegistry allCheckers(plugins, diags, opts, context.getLangOpts(),
+ checkerRegistrationFns);
- allCheckers.initializeManager(*checkerMgr, opts);
- allCheckers.validateCheckerOptions(opts);
+ allCheckers.initializeManager(*checkerMgr);
+ allCheckers.validateCheckerOptions();
checkerMgr->finishedCheckerRegistration();
return checkerMgr;
}
void ento::printCheckerHelp(raw_ostream &out, ArrayRef<std::string> plugins,
- DiagnosticsEngine &diags) {
+ AnalyzerOptions &anopts,
+ DiagnosticsEngine &diags,
+ const LangOptions &langOpts) {
out << "OVERVIEW: Clang Static Analyzer Checkers List\n\n";
out << "USAGE: -analyzer-checker <CHECKER or PACKAGE,...>\n\n";
- CheckerRegistry(plugins, diags).printHelp(out);
+ CheckerRegistry(plugins, diags, anopts, langOpts)
+ .printCheckerWithDescList(out);
}
void ento::printEnabledCheckerList(raw_ostream &out,
ArrayRef<std::string> plugins,
- const AnalyzerOptions &opts,
- DiagnosticsEngine &diags) {
+ AnalyzerOptions &anopts,
+ DiagnosticsEngine &diags,
+ const LangOptions &langOpts) {
out << "OVERVIEW: Clang Static Analyzer Enabled Checkers List\n\n";
- CheckerRegistry(plugins, diags).printList(out, opts);
+ CheckerRegistry(plugins, diags, anopts, langOpts)
+ .printEnabledCheckerList(out);
+}
+
+void ento::printCheckerConfigList(raw_ostream &OS,
+ ArrayRef<std::string> plugins,
+ AnalyzerOptions &opts,
+ DiagnosticsEngine &diags,
+ const LangOptions &LangOpts) {
+ CheckerRegistry(plugins, diags, opts, LangOpts)
+ .printCheckerOptionList(OS);
}
void ento::printAnalyzerConfigList(raw_ostream &out) {
out << "OVERVIEW: Clang Static Analyzer -analyzer-config Option List\n\n";
- out << "USAGE: clang -cc1 [CLANG_OPTIONS] -analyzer-config "
- "<OPTION1=VALUE,OPTION2=VALUE,...>\n\n";
- out << " clang -cc1 [CLANG_OPTIONS] -analyzer-config OPTION1=VALUE, "
- "-analyzer-config OPTION2=VALUE, ...\n\n";
- out << " clang [CLANG_OPTIONS] -Xclang -analyzer-config -Xclang"
- "<OPTION1=VALUE,OPTION2=VALUE,...>\n\n";
- out << " clang [CLANG_OPTIONS] -Xclang -analyzer-config -Xclang "
- "OPTION1=VALUE, -Xclang -analyzer-config -Xclang "
- "OPTION2=VALUE, ...\n\n";
+ out << "USAGE: -analyzer-config <OPTION1=VALUE,OPTION2=VALUE,...>\n\n";
+ out << " -analyzer-config OPTION1=VALUE, -analyzer-config "
+ "OPTION2=VALUE, ...\n\n";
out << "OPTIONS:\n\n";
using OptionAndDescriptionTy = std::pair<StringRef, std::string>;
@@ -107,31 +111,10 @@ void ento::printAnalyzerConfigList(raw_ostream &out) {
return LHS.first < RHS.first;
});
- constexpr size_t MinLineWidth = 70;
- constexpr size_t PadForOpt = 2;
- constexpr size_t OptionWidth = 30;
- constexpr size_t PadForDesc = PadForOpt + OptionWidth;
- static_assert(MinLineWidth > PadForDesc, "MinLineWidth must be greater!");
-
- llvm::formatted_raw_ostream FOut(out);
-
for (const auto &Pair : PrintableOptions) {
- FOut.PadToColumn(PadForOpt) << Pair.first;
-
- // If the buffer's length is greater then PadForDesc, print a newline.
- if (FOut.getColumn() > PadForDesc)
- FOut << '\n';
-
- FOut.PadToColumn(PadForDesc);
-
- for (char C : Pair.second) {
- if (FOut.getColumn() > MinLineWidth && C == ' ') {
- FOut << '\n';
- FOut.PadToColumn(PadForDesc);
- continue;
- }
- FOut << C;
- }
- FOut << "\n\n";
+ AnalyzerOptions::printFormattedEntry(out, Pair, /*InitialPad*/ 2,
+ /*EntryWidth*/ 30,
+ /*MinLineWidth*/ 70);
+ out << "\n\n";
}
}
diff --git a/lib/StaticAnalyzer/Frontend/CheckerRegistry.cpp b/lib/StaticAnalyzer/Frontend/CheckerRegistry.cpp
index 620c0e588906..3fd4c36947cb 100644
--- a/lib/StaticAnalyzer/Frontend/CheckerRegistry.cpp
+++ b/lib/StaticAnalyzer/Frontend/CheckerRegistry.cpp
@@ -1,19 +1,19 @@
//===- CheckerRegistry.cpp - Maintains all available checkers -------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "clang/StaticAnalyzer/Frontend/CheckerRegistry.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/LLVM.h"
+#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
-#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/StringMap.h"
@@ -29,219 +29,592 @@ using llvm::sys::DynamicLibrary;
using RegisterCheckersFn = void (*)(CheckerRegistry &);
-static bool isCompatibleAPIVersion(const char *versionString) {
- // If the version string is null, it's not an analyzer plugin.
- if (!versionString)
+static bool isCompatibleAPIVersion(const char *VersionString) {
+ // If the version string is null, its not an analyzer plugin.
+ if (!VersionString)
return false;
// For now, none of the static analyzer API is considered stable.
// Versions must match exactly.
- return strcmp(versionString, CLANG_ANALYZER_API_VERSION_STRING) == 0;
+ return strcmp(VersionString, CLANG_ANALYZER_API_VERSION_STRING) == 0;
+}
+
+namespace {
+template <class T> struct FullNameLT {
+ bool operator()(const T &Lhs, const T &Rhs) {
+ return Lhs.FullName < Rhs.FullName;
+ }
+};
+
+using PackageNameLT = FullNameLT<CheckerRegistry::PackageInfo>;
+using CheckerNameLT = FullNameLT<CheckerRegistry::CheckerInfo>;
+} // end of anonymous namespace
+
+template <class CheckerOrPackageInfoList>
+static
+ typename std::conditional<std::is_const<CheckerOrPackageInfoList>::value,
+ typename CheckerOrPackageInfoList::const_iterator,
+ typename CheckerOrPackageInfoList::iterator>::type
+ binaryFind(CheckerOrPackageInfoList &Collection, StringRef FullName) {
+
+ using CheckerOrPackage = typename CheckerOrPackageInfoList::value_type;
+ using CheckerOrPackageFullNameLT = FullNameLT<CheckerOrPackage>;
+
+ assert(std::is_sorted(Collection.begin(), Collection.end(),
+ CheckerOrPackageFullNameLT{}) &&
+ "In order to efficiently gather checkers/packages, this function "
+ "expects them to be already sorted!");
+
+ return llvm::lower_bound(Collection, CheckerOrPackage(FullName),
+ CheckerOrPackageFullNameLT{});
+}
+
+static constexpr char PackageSeparator = '.';
+
+static bool isInPackage(const CheckerRegistry::CheckerInfo &Checker,
+ StringRef PackageName) {
+ // Does the checker's full name have the package as a prefix?
+ if (!Checker.FullName.startswith(PackageName))
+ return false;
+
+ // Is the package actually just the name of a specific checker?
+ if (Checker.FullName.size() == PackageName.size())
+ return true;
+
+ // Is the checker in the package (or a subpackage)?
+ if (Checker.FullName[PackageName.size()] == PackageSeparator)
+ return true;
+
+ return false;
}
-CheckerRegistry::CheckerRegistry(ArrayRef<std::string> plugins,
- DiagnosticsEngine &diags) : Diags(diags) {
+CheckerRegistry::CheckerInfoListRange
+CheckerRegistry::getMutableCheckersForCmdLineArg(StringRef CmdLineArg) {
+ auto It = binaryFind(Checkers, CmdLineArg);
+
+ if (!isInPackage(*It, CmdLineArg))
+ return {Checkers.end(), Checkers.end()};
+
+ // See how large the package is.
+ // If the package doesn't exist, assume the option refers to a single
+ // checker.
+ size_t Size = 1;
+ llvm::StringMap<size_t>::const_iterator PackageSize =
+ PackageSizes.find(CmdLineArg);
+
+ if (PackageSize != PackageSizes.end())
+ Size = PackageSize->getValue();
+
+ return {It, It + Size};
+}
+
+CheckerRegistry::CheckerRegistry(
+ ArrayRef<std::string> Plugins, DiagnosticsEngine &Diags,
+ AnalyzerOptions &AnOpts, const LangOptions &LangOpts,
+ ArrayRef<std::function<void(CheckerRegistry &)>> CheckerRegistrationFns)
+ : Diags(Diags), AnOpts(AnOpts), LangOpts(LangOpts) {
+
+ // Register builtin checkers.
#define GET_CHECKERS
-#define CHECKER(FULLNAME, CLASS, HELPTEXT, DOC_URI) \
- addChecker(register##CLASS, FULLNAME, HELPTEXT, DOC_URI);
+#define CHECKER(FULLNAME, CLASS, HELPTEXT, DOC_URI, IS_HIDDEN) \
+ addChecker(register##CLASS, shouldRegister##CLASS, FULLNAME, HELPTEXT, \
+ DOC_URI, IS_HIDDEN);
+
+#define GET_PACKAGES
+#define PACKAGE(FULLNAME) addPackage(FULLNAME);
+
#include "clang/StaticAnalyzer/Checkers/Checkers.inc"
#undef CHECKER
#undef GET_CHECKERS
+#undef PACKAGE
+#undef GET_PACKAGES
- for (ArrayRef<std::string>::iterator i = plugins.begin(), e = plugins.end();
- i != e; ++i) {
+ // Register checkers from plugins.
+ for (const std::string &Plugin : Plugins) {
// Get access to the plugin.
- std::string err;
- DynamicLibrary lib = DynamicLibrary::getPermanentLibrary(i->c_str(), &err);
- if (!lib.isValid()) {
- diags.Report(diag::err_fe_unable_to_load_plugin) << *i << err;
+ std::string ErrorMsg;
+ DynamicLibrary Lib =
+ DynamicLibrary::getPermanentLibrary(Plugin.c_str(), &ErrorMsg);
+ if (!Lib.isValid()) {
+ Diags.Report(diag::err_fe_unable_to_load_plugin) << Plugin << ErrorMsg;
continue;
}
- // See if it's compatible with this build of clang.
- const char *pluginAPIVersion =
- (const char *) lib.getAddressOfSymbol("clang_analyzerAPIVersionString");
- if (!isCompatibleAPIVersion(pluginAPIVersion)) {
+ // See if its compatible with this build of clang.
+ const char *PluginAPIVersion = static_cast<const char *>(
+ Lib.getAddressOfSymbol("clang_analyzerAPIVersionString"));
+
+ if (!isCompatibleAPIVersion(PluginAPIVersion)) {
Diags.Report(diag::warn_incompatible_analyzer_plugin_api)
- << llvm::sys::path::filename(*i);
+ << llvm::sys::path::filename(Plugin);
Diags.Report(diag::note_incompatible_analyzer_plugin_api)
- << CLANG_ANALYZER_API_VERSION_STRING
- << pluginAPIVersion;
+ << CLANG_ANALYZER_API_VERSION_STRING << PluginAPIVersion;
continue;
}
// Register its checkers.
- RegisterCheckersFn registerPluginCheckers =
- (RegisterCheckersFn) (intptr_t) lib.getAddressOfSymbol(
- "clang_registerCheckers");
- if (registerPluginCheckers)
- registerPluginCheckers(*this);
+ RegisterCheckersFn RegisterPluginCheckers =
+ reinterpret_cast<RegisterCheckersFn>(
+ Lib.getAddressOfSymbol("clang_registerCheckers"));
+ if (RegisterPluginCheckers)
+ RegisterPluginCheckers(*this);
+ }
+
+ // Register statically linked checkers, that aren't generated from the tblgen
+ // file, but rather passed their registry function as a parameter in
+ // checkerRegistrationFns.
+
+ for (const auto &Fn : CheckerRegistrationFns)
+ Fn(*this);
+
+ // Sort checkers for efficient collection.
+ // FIXME: Alphabetical sort puts 'experimental' in the middle.
+ // Would it be better to name it '~experimental' or something else
+ // that's ASCIIbetically last?
+ llvm::sort(Packages, PackageNameLT{});
+ llvm::sort(Checkers, CheckerNameLT{});
+
+#define GET_CHECKER_DEPENDENCIES
+
+#define CHECKER_DEPENDENCY(FULLNAME, DEPENDENCY) \
+ addDependency(FULLNAME, DEPENDENCY);
+
+#define GET_CHECKER_OPTIONS
+#define CHECKER_OPTION(TYPE, FULLNAME, CMDFLAG, DESC, DEFAULT_VAL, DEVELOPMENT_STATUS, IS_HIDDEN) \
+ addCheckerOption(TYPE, FULLNAME, CMDFLAG, DEFAULT_VAL, DESC, DEVELOPMENT_STATUS, IS_HIDDEN);
+
+#define GET_PACKAGE_OPTIONS
+#define PACKAGE_OPTION(TYPE, FULLNAME, CMDFLAG, DESC, DEFAULT_VAL, DEVELOPMENT_STATUS, IS_HIDDEN) \
+ addPackageOption(TYPE, FULLNAME, CMDFLAG, DEFAULT_VAL, DESC, DEVELOPMENT_STATUS, IS_HIDDEN);
+
+#include "clang/StaticAnalyzer/Checkers/Checkers.inc"
+#undef CHECKER_DEPENDENCY
+#undef GET_CHECKER_DEPENDENCIES
+#undef CHECKER_OPTION
+#undef GET_CHECKER_OPTIONS
+#undef PACKAGE_OPTION
+#undef GET_PACKAGE_OPTIONS
+
+ resolveDependencies();
+ resolveCheckerAndPackageOptions();
+
+ // Parse '-analyzer-checker' and '-analyzer-disable-checker' options from the
+ // command line.
+ for (const std::pair<std::string, bool> &Opt : AnOpts.CheckersControlList) {
+ CheckerInfoListRange CheckerForCmdLineArg =
+ getMutableCheckersForCmdLineArg(Opt.first);
+
+ if (CheckerForCmdLineArg.begin() == CheckerForCmdLineArg.end()) {
+ Diags.Report(diag::err_unknown_analyzer_checker) << Opt.first;
+ Diags.Report(diag::note_suggest_disabling_all_checkers);
+ }
+
+ for (CheckerInfo &checker : CheckerForCmdLineArg) {
+ checker.State = Opt.second ? StateFromCmdLine::State_Enabled
+ : StateFromCmdLine::State_Disabled;
+ }
}
}
-static constexpr char PackageSeparator = '.';
+/// Collects dependencies in \p ret, returns false on failure.
+static bool
+collectDependenciesImpl(const CheckerRegistry::ConstCheckerInfoList &Deps,
+ const LangOptions &LO,
+ CheckerRegistry::CheckerInfoSet &Ret);
+
+/// Collects dependenies in \p enabledCheckers. Return None on failure.
+LLVM_NODISCARD
+static llvm::Optional<CheckerRegistry::CheckerInfoSet>
+collectDependencies(const CheckerRegistry::CheckerInfo &checker,
+ const LangOptions &LO) {
+
+ CheckerRegistry::CheckerInfoSet Ret;
+ // Add dependencies to the enabled checkers only if all of them can be
+ // enabled.
+ if (!collectDependenciesImpl(checker.Dependencies, LO, Ret))
+ return None;
+
+ return Ret;
+}
+
+static bool
+collectDependenciesImpl(const CheckerRegistry::ConstCheckerInfoList &Deps,
+ const LangOptions &LO,
+ CheckerRegistry::CheckerInfoSet &Ret) {
+
+ for (const CheckerRegistry::CheckerInfo *Dependency : Deps) {
-static bool checkerNameLT(const CheckerRegistry::CheckerInfo &a,
- const CheckerRegistry::CheckerInfo &b) {
- return a.FullName < b.FullName;
+ if (Dependency->isDisabled(LO))
+ return false;
+
+ // Collect dependencies recursively.
+ if (!collectDependenciesImpl(Dependency->Dependencies, LO, Ret))
+ return false;
+
+ Ret.insert(Dependency);
+ }
+
+ return true;
}
-static bool isInPackage(const CheckerRegistry::CheckerInfo &checker,
- StringRef packageName) {
- // Does the checker's full name have the package as a prefix?
- if (!checker.FullName.startswith(packageName))
- return false;
+CheckerRegistry::CheckerInfoSet CheckerRegistry::getEnabledCheckers() const {
- // Is the package actually just the name of a specific checker?
- if (checker.FullName.size() == packageName.size())
- return true;
+ CheckerInfoSet EnabledCheckers;
- // Is the checker in the package (or a subpackage)?
- if (checker.FullName[packageName.size()] == PackageSeparator)
- return true;
+ for (const CheckerInfo &Checker : Checkers) {
+ if (!Checker.isEnabled(LangOpts))
+ continue;
- return false;
+ // Recursively enable its dependencies.
+ llvm::Optional<CheckerInfoSet> Deps =
+ collectDependencies(Checker, LangOpts);
+
+ if (!Deps) {
+ // If we failed to enable any of the dependencies, don't enable this
+ // checker.
+ continue;
+ }
+
+ // Note that set_union also preserves the order of insertion.
+ EnabledCheckers.set_union(*Deps);
+
+ // Enable the checker.
+ EnabledCheckers.insert(&Checker);
+ }
+
+ return EnabledCheckers;
}
-CheckerRegistry::CheckerInfoSet CheckerRegistry::getEnabledCheckers(
- const AnalyzerOptions &Opts) const {
+void CheckerRegistry::resolveDependencies() {
+ for (const std::pair<StringRef, StringRef> &Entry : Dependencies) {
+ auto CheckerIt = binaryFind(Checkers, Entry.first);
+ assert(CheckerIt != Checkers.end() && CheckerIt->FullName == Entry.first &&
+ "Failed to find the checker while attempting to set up its "
+ "dependencies!");
- assert(std::is_sorted(Checkers.begin(), Checkers.end(), checkerNameLT) &&
- "In order to efficiently gather checkers, this function expects them "
- "to be already sorted!");
+ auto DependencyIt = binaryFind(Checkers, Entry.second);
+ assert(DependencyIt != Checkers.end() &&
+ DependencyIt->FullName == Entry.second &&
+ "Failed to find the dependency of a checker!");
- CheckerInfoSet enabledCheckers;
- const auto end = Checkers.cend();
+ CheckerIt->Dependencies.emplace_back(&*DependencyIt);
+ }
- for (const std::pair<std::string, bool> &opt : Opts.CheckersControlList) {
- // Use a binary search to find the possible start of the package.
- CheckerRegistry::CheckerInfo packageInfo(nullptr, opt.first, "", "");
- auto firstRelatedChecker =
- std::lower_bound(Checkers.cbegin(), end, packageInfo, checkerNameLT);
+ Dependencies.clear();
+}
- if (firstRelatedChecker == end ||
- !isInPackage(*firstRelatedChecker, opt.first)) {
- Diags.Report(diag::err_unknown_analyzer_checker) << opt.first;
- Diags.Report(diag::note_suggest_disabling_all_checkers);
- return {};
+void CheckerRegistry::addDependency(StringRef FullName, StringRef Dependency) {
+ Dependencies.emplace_back(FullName, Dependency);
+}
+
+/// Insert the checker/package option to AnalyzerOptions' config table, and
+/// validate it, if the user supplied it on the command line.
+static void insertAndValidate(StringRef FullName,
+ const CheckerRegistry::CmdLineOption &Option,
+ AnalyzerOptions &AnOpts,
+ DiagnosticsEngine &Diags) {
+
+ std::string FullOption = (FullName + ":" + Option.OptionName).str();
+
+ auto It = AnOpts.Config.insert({FullOption, Option.DefaultValStr});
+
+ // Insertation was successful -- CmdLineOption's constructor will validate
+ // whether values received from plugins or TableGen files are correct.
+ if (It.second)
+ return;
+
+ // Insertion failed, the user supplied this package/checker option on the
+ // command line. If the supplied value is invalid, we'll restore the option
+ // to it's default value, and if we're in non-compatibility mode, we'll also
+ // emit an error.
+
+ StringRef SuppliedValue = It.first->getValue();
+
+ if (Option.OptionType == "bool") {
+ if (SuppliedValue != "true" && SuppliedValue != "false") {
+ if (AnOpts.ShouldEmitErrorsOnInvalidConfigValue) {
+ Diags.Report(diag::err_analyzer_checker_option_invalid_input)
+ << FullOption << "a boolean value";
+ }
+
+ It.first->setValue(Option.DefaultValStr);
}
+ return;
+ }
- // See how large the package is.
- // If the package doesn't exist, assume the option refers to a single
- // checker.
- size_t size = 1;
- llvm::StringMap<size_t>::const_iterator packageSize =
- Packages.find(opt.first);
- if (packageSize != Packages.end())
- size = packageSize->getValue();
+ if (Option.OptionType == "int") {
+ int Tmp;
+ bool HasFailed = SuppliedValue.getAsInteger(0, Tmp);
+ if (HasFailed) {
+ if (AnOpts.ShouldEmitErrorsOnInvalidConfigValue) {
+ Diags.Report(diag::err_analyzer_checker_option_invalid_input)
+ << FullOption << "an integer value";
+ }
- // Step through all the checkers in the package.
- for (auto lastRelatedChecker = firstRelatedChecker+size;
- firstRelatedChecker != lastRelatedChecker; ++firstRelatedChecker)
- if (opt.second)
- enabledCheckers.insert(&*firstRelatedChecker);
- else
- enabledCheckers.remove(&*firstRelatedChecker);
+ It.first->setValue(Option.DefaultValStr);
+ }
+ return;
}
+}
+
+template <class T>
+static void
+insertOptionToCollection(StringRef FullName, T &Collection,
+ const CheckerRegistry::CmdLineOption &Option,
+ AnalyzerOptions &AnOpts, DiagnosticsEngine &Diags) {
+ auto It = binaryFind(Collection, FullName);
+ assert(It != Collection.end() &&
+ "Failed to find the checker while attempting to add a command line "
+ "option to it!");
- return enabledCheckers;
+ insertAndValidate(FullName, Option, AnOpts, Diags);
+
+ It->CmdLineOptions.emplace_back(Option);
+}
+
+void CheckerRegistry::resolveCheckerAndPackageOptions() {
+ for (const std::pair<StringRef, CmdLineOption> &CheckerOptEntry :
+ CheckerOptions) {
+ insertOptionToCollection(CheckerOptEntry.first, Checkers,
+ CheckerOptEntry.second, AnOpts, Diags);
+ }
+ CheckerOptions.clear();
+
+ for (const std::pair<StringRef, CmdLineOption> &PackageOptEntry :
+ PackageOptions) {
+ insertOptionToCollection(PackageOptEntry.first, Packages,
+ PackageOptEntry.second, AnOpts, Diags);
+ }
+ PackageOptions.clear();
+}
+
+void CheckerRegistry::addPackage(StringRef FullName) {
+ Packages.emplace_back(PackageInfo(FullName));
+}
+
+void CheckerRegistry::addPackageOption(StringRef OptionType,
+ StringRef PackageFullName,
+ StringRef OptionName,
+ StringRef DefaultValStr,
+ StringRef Description,
+ StringRef DevelopmentStatus,
+ bool IsHidden) {
+ PackageOptions.emplace_back(
+ PackageFullName, CmdLineOption{OptionType, OptionName, DefaultValStr,
+ Description, DevelopmentStatus, IsHidden});
}
-void CheckerRegistry::addChecker(InitializationFunction Fn, StringRef Name,
- StringRef Desc, StringRef DocsUri) {
- Checkers.emplace_back(Fn, Name, Desc, DocsUri);
+void CheckerRegistry::addChecker(InitializationFunction Rfn,
+ ShouldRegisterFunction Sfn, StringRef Name,
+ StringRef Desc, StringRef DocsUri,
+ bool IsHidden) {
+ Checkers.emplace_back(Rfn, Sfn, Name, Desc, DocsUri, IsHidden);
// Record the presence of the checker in its packages.
- StringRef packageName, leafName;
- std::tie(packageName, leafName) = Name.rsplit(PackageSeparator);
- while (!leafName.empty()) {
- Packages[packageName] += 1;
- std::tie(packageName, leafName) = packageName.rsplit(PackageSeparator);
+ StringRef PackageName, LeafName;
+ std::tie(PackageName, LeafName) = Name.rsplit(PackageSeparator);
+ while (!LeafName.empty()) {
+ PackageSizes[PackageName] += 1;
+ std::tie(PackageName, LeafName) = PackageName.rsplit(PackageSeparator);
}
}
-void CheckerRegistry::initializeManager(CheckerManager &checkerMgr,
- const AnalyzerOptions &Opts) const {
- // Sort checkers for efficient collection.
- llvm::sort(Checkers, checkerNameLT);
+void CheckerRegistry::addCheckerOption(StringRef OptionType,
+ StringRef CheckerFullName,
+ StringRef OptionName,
+ StringRef DefaultValStr,
+ StringRef Description,
+ StringRef DevelopmentStatus,
+ bool IsHidden) {
+ CheckerOptions.emplace_back(
+ CheckerFullName, CmdLineOption{OptionType, OptionName, DefaultValStr,
+ Description, DevelopmentStatus, IsHidden});
+}
+void CheckerRegistry::initializeManager(CheckerManager &CheckerMgr) const {
// Collect checkers enabled by the options.
- CheckerInfoSet enabledCheckers = getEnabledCheckers(Opts);
+ CheckerInfoSet enabledCheckers = getEnabledCheckers();
// Initialize the CheckerManager with all enabled checkers.
- for (const auto *i : enabledCheckers) {
- checkerMgr.setCurrentCheckName(CheckName(i->FullName));
- i->Initialize(checkerMgr);
+ for (const auto *Checker : enabledCheckers) {
+ CheckerMgr.setCurrentCheckName(CheckName(Checker->FullName));
+ Checker->Initialize(CheckerMgr);
+ }
+}
+
+static void
+isOptionContainedIn(const CheckerRegistry::CmdLineOptionList &OptionList,
+ StringRef SuppliedChecker, StringRef SuppliedOption,
+ const AnalyzerOptions &AnOpts, DiagnosticsEngine &Diags) {
+
+ if (!AnOpts.ShouldEmitErrorsOnInvalidConfigValue)
+ return;
+
+ using CmdLineOption = CheckerRegistry::CmdLineOption;
+
+ auto SameOptName = [SuppliedOption](const CmdLineOption &Opt) {
+ return Opt.OptionName == SuppliedOption;
+ };
+
+ auto OptionIt = llvm::find_if(OptionList, SameOptName);
+
+ if (OptionIt == OptionList.end()) {
+ Diags.Report(diag::err_analyzer_checker_option_unknown)
+ << SuppliedChecker << SuppliedOption;
+ return;
}
}
-void CheckerRegistry::validateCheckerOptions(
- const AnalyzerOptions &opts) const {
- for (const auto &config : opts.Config) {
- size_t pos = config.getKey().find(':');
- if (pos == StringRef::npos)
+void CheckerRegistry::validateCheckerOptions() const {
+ for (const auto &Config : AnOpts.Config) {
+
+ StringRef SuppliedChecker;
+ StringRef SuppliedOption;
+ std::tie(SuppliedChecker, SuppliedOption) = Config.getKey().split(':');
+
+ if (SuppliedOption.empty())
continue;
- bool hasChecker = false;
- StringRef checkerName = config.getKey().substr(0, pos);
- for (const auto &checker : Checkers) {
- if (checker.FullName.startswith(checkerName) &&
- (checker.FullName.size() == pos || checker.FullName[pos] == '.')) {
- hasChecker = true;
- break;
- }
+ // AnalyzerOptions' config table contains the user input, so an entry could
+ // look like this:
+ //
+ // cor:NoFalsePositives=true
+ //
+ // Since lower_bound would look for the first element *not less* than "cor",
+ // it would return with an iterator to the first checker in the core, so we
+ // we really have to use find here, which uses operator==.
+ auto CheckerIt = llvm::find(Checkers, CheckerInfo(SuppliedChecker));
+ if (CheckerIt != Checkers.end()) {
+ isOptionContainedIn(CheckerIt->CmdLineOptions, SuppliedChecker,
+ SuppliedOption, AnOpts, Diags);
+ continue;
}
- if (!hasChecker)
- Diags.Report(diag::err_unknown_analyzer_checker) << checkerName;
+
+ auto PackageIt = llvm::find(Packages, PackageInfo(SuppliedChecker));
+ if (PackageIt != Packages.end()) {
+ isOptionContainedIn(PackageIt->CmdLineOptions, SuppliedChecker,
+ SuppliedOption, AnOpts, Diags);
+ continue;
+ }
+
+ Diags.Report(diag::err_unknown_analyzer_checker) << SuppliedChecker;
}
}
-void CheckerRegistry::printHelp(raw_ostream &out,
- size_t maxNameChars) const {
- // FIXME: Alphabetical sort puts 'experimental' in the middle.
- // Would it be better to name it '~experimental' or something else
- // that's ASCIIbetically last?
- llvm::sort(Checkers, checkerNameLT);
-
+void CheckerRegistry::printCheckerWithDescList(raw_ostream &Out,
+ size_t MaxNameChars) const {
// FIXME: Print available packages.
- out << "CHECKERS:\n";
+ Out << "CHECKERS:\n";
// Find the maximum option length.
- size_t optionFieldWidth = 0;
- for (const auto &i : Checkers) {
+ size_t OptionFieldWidth = 0;
+ for (const auto &Checker : Checkers) {
// Limit the amount of padding we are willing to give up for alignment.
// Package.Name Description [Hidden]
- size_t nameLength = i.FullName.size();
- if (nameLength <= maxNameChars)
- optionFieldWidth = std::max(optionFieldWidth, nameLength);
+ size_t NameLength = Checker.FullName.size();
+ if (NameLength <= MaxNameChars)
+ OptionFieldWidth = std::max(OptionFieldWidth, NameLength);
}
- const size_t initialPad = 2;
- for (const auto &i : Checkers) {
- out.indent(initialPad) << i.FullName;
-
- int pad = optionFieldWidth - i.FullName.size();
+ const size_t InitialPad = 2;
+
+ auto Print = [=](llvm::raw_ostream &Out, const CheckerInfo &Checker,
+ StringRef Description) {
+ AnalyzerOptions::printFormattedEntry(Out, {Checker.FullName, Description},
+ InitialPad, OptionFieldWidth);
+ Out << '\n';
+ };
+
+ for (const auto &Checker : Checkers) {
+ // The order of this if branches is significant, we wouldn't like to display
+ // developer checkers even in the alpha output. For example,
+ // alpha.cplusplus.IteratorModeling is a modeling checker, hence it's hidden
+ // by default, and users (even when the user is a developer of an alpha
+ // checker) shouldn't normally tinker with whether they should be enabled.
+
+ if (Checker.IsHidden) {
+ if (AnOpts.ShowCheckerHelpDeveloper)
+ Print(Out, Checker, Checker.Desc);
+ continue;
+ }
- // Break on long option names.
- if (pad < 0) {
- out << '\n';
- pad = optionFieldWidth + initialPad;
+ if (Checker.FullName.startswith("alpha")) {
+ if (AnOpts.ShowCheckerHelpAlpha)
+ Print(Out, Checker,
+ ("(Enable only for development!) " + Checker.Desc).str());
+ continue;
}
- out.indent(pad + 2) << i.Desc;
- out << '\n';
+ if (AnOpts.ShowCheckerHelp)
+ Print(Out, Checker, Checker.Desc);
}
}
-void CheckerRegistry::printList(raw_ostream &out,
- const AnalyzerOptions &opts) const {
- // Sort checkers for efficient collection.
- llvm::sort(Checkers, checkerNameLT);
-
+void CheckerRegistry::printEnabledCheckerList(raw_ostream &Out) const {
// Collect checkers enabled by the options.
- CheckerInfoSet enabledCheckers = getEnabledCheckers(opts);
+ CheckerInfoSet EnabledCheckers = getEnabledCheckers();
+
+ for (const auto *i : EnabledCheckers)
+ Out << i->FullName << '\n';
+}
+
+void CheckerRegistry::printCheckerOptionList(raw_ostream &Out) const {
+ Out << "OVERVIEW: Clang Static Analyzer Checker and Package Option List\n\n";
+ Out << "USAGE: -analyzer-config <OPTION1=VALUE,OPTION2=VALUE,...>\n\n";
+ Out << " -analyzer-config OPTION1=VALUE, -analyzer-config "
+ "OPTION2=VALUE, ...\n\n";
+ Out << "OPTIONS:\n\n";
+
+ std::multimap<StringRef, const CmdLineOption &> OptionMap;
+
+ for (const CheckerInfo &Checker : Checkers) {
+ for (const CmdLineOption &Option : Checker.CmdLineOptions) {
+ OptionMap.insert({Checker.FullName, Option});
+ }
+ }
+
+ for (const PackageInfo &Package : Packages) {
+ for (const CmdLineOption &Option : Package.CmdLineOptions) {
+ OptionMap.insert({Package.FullName, Option});
+ }
+ }
+
+ auto Print = [] (llvm::raw_ostream &Out, StringRef FullOption, StringRef Desc) {
+ AnalyzerOptions::printFormattedEntry(Out, {FullOption, Desc},
+ /*InitialPad*/ 2,
+ /*EntryWidth*/ 50,
+ /*MinLineWidth*/ 90);
+ Out << "\n\n";
+ };
+ for (const std::pair<StringRef, const CmdLineOption &> &Entry : OptionMap) {
+ const CmdLineOption &Option = Entry.second;
+ std::string FullOption = (Entry.first + ":" + Option.OptionName).str();
+
+ std::string Desc =
+ ("(" + Option.OptionType + ") " + Option.Description + " (default: " +
+ (Option.DefaultValStr.empty() ? "\"\"" : Option.DefaultValStr) + ")")
+ .str();
+
+ // The list of these if branches is significant, we wouldn't like to
+ // display hidden alpha checker options for
+ // -analyzer-checker-option-help-alpha.
+
+ if (Option.IsHidden) {
+ if (AnOpts.ShowCheckerOptionDeveloperList)
+ Print(Out, FullOption, Desc);
+ continue;
+ }
- for (const auto *i : enabledCheckers)
- out << i->FullName << '\n';
+ if (Option.DevelopmentStatus == "alpha" ||
+ Entry.first.startswith("alpha")) {
+ if (AnOpts.ShowCheckerOptionAlphaList)
+ Print(Out, FullOption,
+ llvm::Twine("(Enable only for development!) " + Desc).str());
+ continue;
+ }
+
+ if (AnOpts.ShowCheckerOptionList)
+ Print(Out, FullOption, Desc);
+ }
}
diff --git a/lib/StaticAnalyzer/Frontend/FrontendActions.cpp b/lib/StaticAnalyzer/Frontend/FrontendActions.cpp
index b33608042ce3..a8af6b3d801a 100644
--- a/lib/StaticAnalyzer/Frontend/FrontendActions.cpp
+++ b/lib/StaticAnalyzer/Frontend/FrontendActions.cpp
@@ -1,9 +1,8 @@
//===--- FrontendActions.cpp ----------------------------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/StaticAnalyzer/Frontend/ModelConsumer.cpp b/lib/StaticAnalyzer/Frontend/ModelConsumer.cpp
index 60825ef7411d..276f7313b08f 100644
--- a/lib/StaticAnalyzer/Frontend/ModelConsumer.cpp
+++ b/lib/StaticAnalyzer/Frontend/ModelConsumer.cpp
@@ -1,9 +1,8 @@
//===--- ModelConsumer.cpp - ASTConsumer for consuming model files --------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
diff --git a/lib/StaticAnalyzer/Frontend/ModelInjector.cpp b/lib/StaticAnalyzer/Frontend/ModelInjector.cpp
index b1927c8401d6..fe5f59045cde 100644
--- a/lib/StaticAnalyzer/Frontend/ModelInjector.cpp
+++ b/lib/StaticAnalyzer/Frontend/ModelInjector.cpp
@@ -1,9 +1,8 @@
//===-- ModelInjector.cpp ---------------------------------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -83,8 +82,6 @@ void ModelInjector::onBodySynthesis(const NamedDecl *D) {
Instance.getDiagnostics().setSourceManager(&SM);
- Instance.setVirtualFileSystem(&CI.getVirtualFileSystem());
-
// The instance wants to take ownership, however DisableFree frontend option
// is set to true to avoid double free issues
Instance.setFileManager(&CI.getFileManager());
diff --git a/lib/StaticAnalyzer/Frontend/ModelInjector.h b/lib/StaticAnalyzer/Frontend/ModelInjector.h
index b1b6de9ef9d9..d2016c3b112c 100644
--- a/lib/StaticAnalyzer/Frontend/ModelInjector.h
+++ b/lib/StaticAnalyzer/Frontend/ModelInjector.h
@@ -1,9 +1,8 @@
//===-- ModelInjector.h -----------------------------------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
diff --git a/lib/Tooling/ASTDiff/ASTDiff.cpp b/lib/Tooling/ASTDiff/ASTDiff.cpp
index 592e8572c770..69eff20bff7a 100644
--- a/lib/Tooling/ASTDiff/ASTDiff.cpp
+++ b/lib/Tooling/ASTDiff/ASTDiff.cpp
@@ -1,9 +1,8 @@
//===- ASTDiff.cpp - AST differencing implementation-----------*- C++ -*- -===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -238,8 +237,8 @@ struct PreorderVisitor : public RecursiveASTVisitor<PreorderVisitor> {
return true;
}
bool TraverseStmt(Stmt *S) {
- if (S)
- S = S->IgnoreImplicit();
+ if (auto *E = dyn_cast_or_null<Expr>(S))
+ S = E->IgnoreImplicit();
if (isNodeExcluded(Tree.AST.getSourceManager(), S))
return true;
auto SavedState = PreTraverse(S);
diff --git a/lib/Tooling/AllTUsExecution.cpp b/lib/Tooling/AllTUsExecution.cpp
index 0f172b782963..ca9db7a561be 100644
--- a/lib/Tooling/AllTUsExecution.cpp
+++ b/lib/Tooling/AllTUsExecution.cpp
@@ -1,15 +1,15 @@
//===- lib/Tooling/AllTUsExecution.cpp - Execute actions on all TUs. ------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "clang/Tooling/AllTUsExecution.h"
#include "clang/Tooling/ToolExecutorPluginRegistry.h"
#include "llvm/Support/ThreadPool.h"
+#include "llvm/Support/VirtualFileSystem.h"
namespace clang {
namespace tooling {
@@ -115,25 +115,22 @@ llvm::Error AllTUsToolExecutor::execute(
{
llvm::ThreadPool Pool(ThreadCount == 0 ? llvm::hardware_concurrency()
: ThreadCount);
- llvm::SmallString<128> InitialWorkingDir;
- if (auto EC = llvm::sys::fs::current_path(InitialWorkingDir)) {
- InitialWorkingDir = "";
- llvm::errs() << "Error while getting current working directory: "
- << EC.message() << "\n";
- }
for (std::string File : Files) {
Pool.async(
[&](std::string Path) {
Log("[" + std::to_string(Count()) + "/" + TotalNumStr +
"] Processing file " + Path);
- ClangTool Tool(Compilations, {Path});
+ // Each thread gets an indepent copy of a VFS to allow different
+ // concurrent working directories.
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> FS =
+ llvm::vfs::createPhysicalFileSystem().release();
+ ClangTool Tool(Compilations, {Path},
+ std::make_shared<PCHContainerOperations>(), FS);
Tool.appendArgumentsAdjuster(Action.second);
Tool.appendArgumentsAdjuster(getDefaultArgumentsAdjusters());
for (const auto &FileAndContent : OverlayFiles)
Tool.mapVirtualFile(FileAndContent.first(),
FileAndContent.second);
- // Do not restore working dir from multiple threads to avoid races.
- Tool.setRestoreWorkingDir(false);
if (Tool.run(Action.first.get()))
AppendError(llvm::Twine("Failed to run action on ") + Path +
"\n");
@@ -142,11 +139,6 @@ llvm::Error AllTUsToolExecutor::execute(
}
// Make sure all tasks have finished before resetting the working directory.
Pool.wait();
- if (!InitialWorkingDir.empty()) {
- if (auto EC = llvm::sys::fs::set_current_path(InitialWorkingDir))
- llvm::errs() << "Error while restoring working directory: "
- << EC.message() << "\n";
- }
}
if (!ErrorMsg.empty())
diff --git a/lib/Tooling/ArgumentsAdjusters.cpp b/lib/Tooling/ArgumentsAdjusters.cpp
index c8e9c167422e..942b35df453e 100644
--- a/lib/Tooling/ArgumentsAdjusters.cpp
+++ b/lib/Tooling/ArgumentsAdjusters.cpp
@@ -1,9 +1,8 @@
//===- ArgumentsAdjusters.cpp - Command line arguments adjuster -----------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -24,14 +23,18 @@ namespace tooling {
ArgumentsAdjuster getClangSyntaxOnlyAdjuster() {
return [](const CommandLineArguments &Args, StringRef /*unused*/) {
CommandLineArguments AdjustedArgs;
+ bool HasSyntaxOnly = false;
for (size_t i = 0, e = Args.size(); i < e; ++i) {
StringRef Arg = Args[i];
// FIXME: Remove options that generate output.
if (!Arg.startswith("-fcolor-diagnostics") &&
!Arg.startswith("-fdiagnostics-color"))
AdjustedArgs.push_back(Args[i]);
+ if (Arg == "-fsyntax-only")
+ HasSyntaxOnly = true;
}
- AdjustedArgs.push_back("-fsyntax-only");
+ if (!HasSyntaxOnly)
+ AdjustedArgs.push_back("-fsyntax-only");
return AdjustedArgs;
};
}
@@ -108,5 +111,27 @@ ArgumentsAdjuster combineAdjusters(ArgumentsAdjuster First,
};
}
+ArgumentsAdjuster getStripPluginsAdjuster() {
+ return [](const CommandLineArguments &Args, StringRef /*unused*/) {
+ CommandLineArguments AdjustedArgs;
+ for (size_t I = 0, E = Args.size(); I != E; I++) {
+ // According to https://clang.llvm.org/docs/ClangPlugins.html
+ // plugin arguments are in the form:
+ // -Xclang {-load, -plugin, -plugin-arg-<plugin-name>, -add-plugin}
+ // -Xclang <arbitrary-argument>
+ if (I + 4 < E && Args[I] == "-Xclang" &&
+ (Args[I + 1] == "-load" || Args[I + 1] == "-plugin" ||
+ llvm::StringRef(Args[I + 1]).startswith("-plugin-arg-") ||
+ Args[I + 1] == "-add-plugin") &&
+ Args[I + 2] == "-Xclang") {
+ I += 3;
+ continue;
+ }
+ AdjustedArgs.push_back(Args[I]);
+ }
+ return AdjustedArgs;
+ };
+}
+
} // end namespace tooling
} // end namespace clang
diff --git a/lib/Tooling/CommonOptionsParser.cpp b/lib/Tooling/CommonOptionsParser.cpp
index 74ad4e83ee3f..f7956f7998f5 100644
--- a/lib/Tooling/CommonOptionsParser.cpp
+++ b/lib/Tooling/CommonOptionsParser.cpp
@@ -1,9 +1,8 @@
//===--- CommonOptionsParser.cpp - common options for clang tools ---------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -40,7 +39,7 @@ const char *const CommonOptionsParser::HelpMessage =
"\tCMake option to get this output). When no build path is specified,\n"
"\ta search for compile_commands.json will be attempted through all\n"
"\tparent paths of the first input file . See:\n"
- "\thttp://clang.llvm.org/docs/HowToSetupToolingForLLVM.html for an\n"
+ "\thttps://clang.llvm.org/docs/HowToSetupToolingForLLVM.html for an\n"
"\texample of setting up Clang Tooling on a source tree.\n"
"\n"
"<source0> ... specify the paths of source files. These paths are\n"
@@ -84,8 +83,6 @@ std::vector<CompileCommand> ArgumentsAdjustingCompilations::adjustCommands(
llvm::Error CommonOptionsParser::init(
int &argc, const char **argv, cl::OptionCategory &Category,
llvm::cl::NumOccurrencesFlag OccurrencesFlag, const char *Overview) {
- static cl::opt<bool> Help("h", cl::desc("Alias for -help"), cl::Hidden,
- cl::sub(*cl::AllSubCommands));
static cl::opt<std::string> BuildPath("p", cl::desc("Build path"),
cl::Optional, cl::cat(Category),
diff --git a/lib/Tooling/CompilationDatabase.cpp b/lib/Tooling/CompilationDatabase.cpp
index cce8e1f1df24..4c64750bef19 100644
--- a/lib/Tooling/CompilationDatabase.cpp
+++ b/lib/Tooling/CompilationDatabase.cpp
@@ -1,9 +1,8 @@
//===- CompilationDatabase.cpp --------------------------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Tooling/Core/Diagnostic.cpp b/lib/Tooling/Core/Diagnostic.cpp
index e3a33d9a3755..235bd7fc1433 100644
--- a/lib/Tooling/Core/Diagnostic.cpp
+++ b/lib/Tooling/Core/Diagnostic.cpp
@@ -1,9 +1,8 @@
//===--- Diagnostic.cpp - Framework for clang diagnostics tools ----------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -13,6 +12,7 @@
#include "clang/Tooling/Core/Diagnostic.h"
#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/STLExtras.h"
namespace clang {
namespace tooling {
@@ -41,11 +41,21 @@ Diagnostic::Diagnostic(llvm::StringRef DiagnosticName,
Diagnostic::Diagnostic(llvm::StringRef DiagnosticName,
const DiagnosticMessage &Message,
- const llvm::StringMap<Replacements> &Fix,
const SmallVector<DiagnosticMessage, 1> &Notes,
Level DiagLevel, llvm::StringRef BuildDirectory)
- : DiagnosticName(DiagnosticName), Message(Message), Fix(Fix), Notes(Notes),
+ : DiagnosticName(DiagnosticName), Message(Message), Notes(Notes),
DiagLevel(DiagLevel), BuildDirectory(BuildDirectory) {}
+const llvm::StringMap<Replacements> *selectFirstFix(const Diagnostic& D) {
+ if (!D.Message.Fix.empty())
+ return &D.Message.Fix;
+ auto Iter = llvm::find_if(D.Notes, [](const tooling::DiagnosticMessage &D) {
+ return !D.Fix.empty();
+ });
+ if (Iter != D.Notes.end())
+ return &Iter->Fix;
+ return nullptr;
+}
+
} // end namespace tooling
} // end namespace clang
diff --git a/lib/Tooling/Core/Lookup.cpp b/lib/Tooling/Core/Lookup.cpp
index cc448d144e2c..735a5df5ed21 100644
--- a/lib/Tooling/Core/Lookup.cpp
+++ b/lib/Tooling/Core/Lookup.cpp
@@ -1,9 +1,8 @@
//===--- Lookup.cpp - Framework for clang refactoring tools ---------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -15,6 +14,8 @@
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclarationName.h"
+#include "clang/Basic/SourceLocation.h"
+#include "llvm/ADT/SmallVector.h"
using namespace clang;
using namespace clang::tooling;
@@ -115,38 +116,72 @@ static bool isFullyQualified(const NestedNameSpecifier *NNS) {
return false;
}
-// Returns true if spelling symbol \p QName as \p Spelling in \p UseContext is
-// ambiguous. For example, if QName is "::y::bar" and the spelling is "y::bar"
-// in `UseContext` "a" that contains a nested namespace "a::y", then "y::bar"
-// can be resolved to ::a::y::bar, which can cause compile error.
+// Adds more scope specifier to the spelled name until the spelling is not
+// ambiguous. A spelling is ambiguous if the resolution of the symbol is
+// ambiguous. For example, if QName is "::y::bar", the spelling is "y::bar", and
+// context contains a nested namespace "a::y", then "y::bar" can be resolved to
+// ::a::y::bar in the context, which can cause compile error.
// FIXME: consider using namespaces.
-static bool isAmbiguousNameInScope(StringRef Spelling, StringRef QName,
- const DeclContext &UseContext) {
+static std::string disambiguateSpellingInScope(StringRef Spelling,
+ StringRef QName,
+ const DeclContext &UseContext,
+ SourceLocation UseLoc) {
assert(QName.startswith("::"));
+ assert(QName.endswith(Spelling));
if (Spelling.startswith("::"))
- return false;
+ return Spelling;
- // Lookup the first component of Spelling in all enclosing namespaces and
- // check if there is any existing symbols with the same name but in different
- // scope.
- StringRef Head = Spelling.split("::").first;
+ auto UnspelledSpecifier = QName.drop_back(Spelling.size());
+ llvm::SmallVector<llvm::StringRef, 2> UnspelledScopes;
+ UnspelledSpecifier.split(UnspelledScopes, "::", /*MaxSplit=*/-1,
+ /*KeepEmpty=*/false);
- llvm::SmallVector<const NamespaceDecl *, 4> UseNamespaces =
+ llvm::SmallVector<const NamespaceDecl *, 4> EnclosingNamespaces =
getAllNamedNamespaces(&UseContext);
auto &AST = UseContext.getParentASTContext();
StringRef TrimmedQName = QName.substr(2);
- for (const auto *NS : UseNamespaces) {
- auto LookupRes = NS->lookup(DeclarationName(&AST.Idents.get(Head)));
- if (!LookupRes.empty()) {
- for (const NamedDecl *Res : LookupRes)
- if (!TrimmedQName.startswith(Res->getQualifiedNameAsString()))
- return true;
+ const auto &SM = UseContext.getParentASTContext().getSourceManager();
+ UseLoc = SM.getSpellingLoc(UseLoc);
+
+ auto IsAmbiguousSpelling = [&](const llvm::StringRef CurSpelling) {
+ if (CurSpelling.startswith("::"))
+ return false;
+ // Lookup the first component of Spelling in all enclosing namespaces
+ // and check if there is any existing symbols with the same name but in
+ // different scope.
+ StringRef Head = CurSpelling.split("::").first;
+ for (const auto *NS : EnclosingNamespaces) {
+ auto LookupRes = NS->lookup(DeclarationName(&AST.Idents.get(Head)));
+ if (!LookupRes.empty()) {
+ for (const NamedDecl *Res : LookupRes)
+ // If `Res` is not visible in `UseLoc`, we don't consider it
+ // ambiguous. For example, a reference in a header file should not be
+ // affected by a potentially ambiguous name in some file that includes
+ // the header.
+ if (!TrimmedQName.startswith(Res->getQualifiedNameAsString()) &&
+ SM.isBeforeInTranslationUnit(
+ SM.getSpellingLoc(Res->getLocation()), UseLoc))
+ return true;
+ }
+ }
+ return false;
+ };
+
+ // Add more qualifiers until the spelling is not ambiguous.
+ std::string Disambiguated = Spelling;
+ while (IsAmbiguousSpelling(Disambiguated)) {
+ if (UnspelledScopes.empty()) {
+ Disambiguated = "::" + Disambiguated;
+ } else {
+ Disambiguated = (UnspelledScopes.back() + "::" + Disambiguated).str();
+ UnspelledScopes.pop_back();
}
}
- return false;
+ return Disambiguated;
}
std::string tooling::replaceNestedName(const NestedNameSpecifier *Use,
+ SourceLocation UseLoc,
const DeclContext *UseContext,
const NamedDecl *FromDecl,
StringRef ReplacementString) {
@@ -180,12 +215,7 @@ std::string tooling::replaceNestedName(const NestedNameSpecifier *Use,
// specific).
StringRef Suggested = getBestNamespaceSubstr(UseContext, ReplacementString,
isFullyQualified(Use));
- // Use the fully qualified name if the suggested name is ambiguous.
- // FIXME: consider re-shortening the name until the name is not ambiguous. We
- // are not doing this because ambiguity is pretty bad and we should not try to
- // be clever in handling such cases. Making this noticeable to users seems to
- // be a better option.
- return isAmbiguousNameInScope(Suggested, ReplacementString, *UseContext)
- ? ReplacementString
- : Suggested;
+
+ return disambiguateSpellingInScope(Suggested, ReplacementString, *UseContext,
+ UseLoc);
}
diff --git a/lib/Tooling/Core/Replacement.cpp b/lib/Tooling/Core/Replacement.cpp
index 3b7e39814afa..546158714e3c 100644
--- a/lib/Tooling/Core/Replacement.cpp
+++ b/lib/Tooling/Core/Replacement.cpp
@@ -1,9 +1,8 @@
//===- Replacement.cpp - Framework for clang refactoring tools ------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -520,12 +519,11 @@ calculateRangesAfterReplacements(const Replacements &Replaces,
return MergedRanges;
tooling::Replacements FakeReplaces;
for (const auto &R : MergedRanges) {
- auto Err = FakeReplaces.add(Replacement(Replaces.begin()->getFilePath(),
- R.getOffset(), R.getLength(),
- std::string(R.getLength(), ' ')));
- assert(!Err &&
- "Replacements must not conflict since ranges have been merged.");
- llvm::consumeError(std::move(Err));
+ llvm::cantFail(
+ FakeReplaces.add(Replacement(Replaces.begin()->getFilePath(),
+ R.getOffset(), R.getLength(),
+ std::string(R.getLength(), ' '))),
+ "Replacements must not conflict since ranges have been merged.");
}
return FakeReplaces.merge(Replaces).getAffectedRanges();
}
diff --git a/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp b/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp
new file mode 100644
index 000000000000..4868f2663796
--- /dev/null
+++ b/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp
@@ -0,0 +1,149 @@
+//===- DependencyScanningWorker.cpp - clang-scan-deps worker --------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Tooling/DependencyScanning/DependencyScanningWorker.h"
+#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Frontend/FrontendActions.h"
+#include "clang/Frontend/TextDiagnosticPrinter.h"
+#include "clang/Frontend/Utils.h"
+#include "clang/Tooling/Tooling.h"
+
+using namespace clang;
+using namespace tooling;
+using namespace dependencies;
+
+namespace {
+
+/// Prints out all of the gathered dependencies into a string.
+class DependencyPrinter : public DependencyFileGenerator {
+public:
+ DependencyPrinter(std::unique_ptr<DependencyOutputOptions> Opts,
+ std::string &S)
+ : DependencyFileGenerator(*Opts), Opts(std::move(Opts)), S(S) {}
+
+ void finishedMainFile(DiagnosticsEngine &Diags) override {
+ llvm::raw_string_ostream OS(S);
+ outputDependencyFile(OS);
+ }
+
+private:
+ std::unique_ptr<DependencyOutputOptions> Opts;
+ std::string &S;
+};
+
+/// A proxy file system that doesn't call `chdir` when changing the working
+/// directory of a clang tool.
+class ProxyFileSystemWithoutChdir : public llvm::vfs::ProxyFileSystem {
+public:
+ ProxyFileSystemWithoutChdir(
+ llvm::IntrusiveRefCntPtr<llvm::vfs::FileSystem> FS)
+ : ProxyFileSystem(std::move(FS)) {}
+
+ llvm::ErrorOr<std::string> getCurrentWorkingDirectory() const override {
+ assert(!CWD.empty() && "empty CWD");
+ return CWD;
+ }
+
+ std::error_code setCurrentWorkingDirectory(const Twine &Path) override {
+ CWD = Path.str();
+ return {};
+ }
+
+private:
+ std::string CWD;
+};
+
+/// A clang tool that runs the preprocessor in a mode that's optimized for
+/// dependency scanning for the given compiler invocation.
+class DependencyScanningAction : public tooling::ToolAction {
+public:
+ DependencyScanningAction(StringRef WorkingDirectory,
+ std::string &DependencyFileContents)
+ : WorkingDirectory(WorkingDirectory),
+ DependencyFileContents(DependencyFileContents) {}
+
+ bool runInvocation(std::shared_ptr<CompilerInvocation> Invocation,
+ FileManager *FileMgr,
+ std::shared_ptr<PCHContainerOperations> PCHContainerOps,
+ DiagnosticConsumer *DiagConsumer) override {
+ // Create a compiler instance to handle the actual work.
+ CompilerInstance Compiler(std::move(PCHContainerOps));
+ Compiler.setInvocation(std::move(Invocation));
+ FileMgr->getFileSystemOpts().WorkingDir = WorkingDirectory;
+ Compiler.setFileManager(FileMgr);
+
+ // Don't print 'X warnings and Y errors generated'.
+ Compiler.getDiagnosticOpts().ShowCarets = false;
+ // Create the compiler's actual diagnostics engine.
+ Compiler.createDiagnostics(DiagConsumer, /*ShouldOwnClient=*/false);
+ if (!Compiler.hasDiagnostics())
+ return false;
+
+ Compiler.createSourceManager(*FileMgr);
+
+ // Create the dependency collector that will collect the produced
+ // dependencies.
+ //
+ // This also moves the existing dependency output options from the
+ // invocation to the collector. The options in the invocation are reset,
+ // which ensures that the compiler won't create new dependency collectors,
+ // and thus won't write out the extra '.d' files to disk.
+ auto Opts = llvm::make_unique<DependencyOutputOptions>(
+ std::move(Compiler.getInvocation().getDependencyOutputOpts()));
+ // We need at least one -MT equivalent for the generator to work.
+ if (Opts->Targets.empty())
+ Opts->Targets = {"clang-scan-deps dependency"};
+ Compiler.addDependencyCollector(std::make_shared<DependencyPrinter>(
+ std::move(Opts), DependencyFileContents));
+
+ auto Action = llvm::make_unique<PreprocessOnlyAction>();
+ const bool Result = Compiler.ExecuteAction(*Action);
+ FileMgr->clearStatCache();
+ return Result;
+ }
+
+private:
+ StringRef WorkingDirectory;
+ /// The dependency file will be written to this string.
+ std::string &DependencyFileContents;
+};
+
+} // end anonymous namespace
+
+DependencyScanningWorker::DependencyScanningWorker() {
+ DiagOpts = new DiagnosticOptions();
+ PCHContainerOps = std::make_shared<PCHContainerOperations>();
+ /// FIXME: Use the shared file system from the service for fast scanning
+ /// mode.
+ WorkerFS = new ProxyFileSystemWithoutChdir(llvm::vfs::getRealFileSystem());
+}
+
+llvm::Expected<std::string>
+DependencyScanningWorker::getDependencyFile(const std::string &Input,
+ StringRef WorkingDirectory,
+ const CompilationDatabase &CDB) {
+ // Capture the emitted diagnostics and report them to the client
+ // in the case of a failure.
+ std::string DiagnosticOutput;
+ llvm::raw_string_ostream DiagnosticsOS(DiagnosticOutput);
+ TextDiagnosticPrinter DiagPrinter(DiagnosticsOS, DiagOpts.get());
+
+ WorkerFS->setCurrentWorkingDirectory(WorkingDirectory);
+ tooling::ClangTool Tool(CDB, Input, PCHContainerOps, WorkerFS);
+ Tool.clearArgumentsAdjusters();
+ Tool.setRestoreWorkingDir(false);
+ Tool.setPrintErrorMessage(false);
+ Tool.setDiagnosticConsumer(&DiagPrinter);
+ std::string Output;
+ DependencyScanningAction Action(WorkingDirectory, Output);
+ if (Tool.run(&Action)) {
+ return llvm::make_error<llvm::StringError>(DiagnosticsOS.str(),
+ llvm::inconvertibleErrorCode());
+ }
+ return Output;
+}
diff --git a/lib/Tooling/Execution.cpp b/lib/Tooling/Execution.cpp
index 9ddb18a57b46..c39a4fcdac82 100644
--- a/lib/Tooling/Execution.cpp
+++ b/lib/Tooling/Execution.cpp
@@ -1,9 +1,8 @@
//===- lib/Tooling/Execution.cpp - Implements tool execution framework. ---===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Tooling/FileMatchTrie.cpp b/lib/Tooling/FileMatchTrie.cpp
index 202b3f00f3fb..7df5a16fd88f 100644
--- a/lib/Tooling/FileMatchTrie.cpp
+++ b/lib/Tooling/FileMatchTrie.cpp
@@ -1,9 +1,8 @@
//===- FileMatchTrie.cpp --------------------------------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Tooling/FixIt.cpp b/lib/Tooling/FixIt.cpp
index 70942c5ac845..76c92c543736 100644
--- a/lib/Tooling/FixIt.cpp
+++ b/lib/Tooling/FixIt.cpp
@@ -1,9 +1,8 @@
//===--- FixIt.cpp - FixIt Hint utilities -----------------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -19,12 +18,11 @@ namespace tooling {
namespace fixit {
namespace internal {
-StringRef getText(SourceRange Range, const ASTContext &Context) {
- return Lexer::getSourceText(CharSourceRange::getTokenRange(Range),
- Context.getSourceManager(),
+StringRef getText(CharSourceRange Range, const ASTContext &Context) {
+ return Lexer::getSourceText(Range, Context.getSourceManager(),
Context.getLangOpts());
}
-} // end namespace internal
+} // namespace internal
} // end namespace fixit
} // end namespace tooling
diff --git a/lib/Tooling/GuessTargetAndModeCompilationDatabase.cpp b/lib/Tooling/GuessTargetAndModeCompilationDatabase.cpp
new file mode 100644
index 000000000000..ac3faf1b01f9
--- /dev/null
+++ b/lib/Tooling/GuessTargetAndModeCompilationDatabase.cpp
@@ -0,0 +1,57 @@
+//===- GuessTargetAndModeCompilationDatabase.cpp --------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Tooling/CompilationDatabase.h"
+#include "clang/Tooling/Tooling.h"
+#include <memory>
+
+namespace clang {
+namespace tooling {
+
+namespace {
+class TargetAndModeAdderDatabase : public CompilationDatabase {
+public:
+ TargetAndModeAdderDatabase(std::unique_ptr<CompilationDatabase> Base)
+ : Base(std::move(Base)) {
+ assert(this->Base != nullptr);
+ }
+
+ std::vector<std::string> getAllFiles() const override {
+ return Base->getAllFiles();
+ }
+
+ std::vector<CompileCommand> getAllCompileCommands() const override {
+ return addTargetAndMode(Base->getAllCompileCommands());
+ }
+
+ std::vector<CompileCommand>
+ getCompileCommands(StringRef FilePath) const override {
+ return addTargetAndMode(Base->getCompileCommands(FilePath));
+ }
+
+private:
+ std::vector<CompileCommand>
+ addTargetAndMode(std::vector<CompileCommand> Cmds) const {
+ for (auto &Cmd : Cmds) {
+ if (Cmd.CommandLine.empty())
+ continue;
+ addTargetAndModeForProgramName(Cmd.CommandLine, Cmd.CommandLine.front());
+ }
+ return Cmds;
+ }
+ std::unique_ptr<CompilationDatabase> Base;
+};
+} // namespace
+
+std::unique_ptr<CompilationDatabase>
+inferTargetAndDriverMode(std::unique_ptr<CompilationDatabase> Base) {
+ return llvm::make_unique<TargetAndModeAdderDatabase>(std::move(Base));
+}
+
+} // namespace tooling
+} // namespace clang
diff --git a/lib/Tooling/Inclusions/HeaderIncludes.cpp b/lib/Tooling/Inclusions/HeaderIncludes.cpp
index c74ad0b9cd56..a7f79c33bc55 100644
--- a/lib/Tooling/Inclusions/HeaderIncludes.cpp
+++ b/lib/Tooling/Inclusions/HeaderIncludes.cpp
@@ -1,15 +1,15 @@
//===--- HeaderIncludes.cpp - Insert/Delete #includes --*- C++ -*----------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "clang/Tooling/Inclusions/HeaderIncludes.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Lex/Lexer.h"
+#include "llvm/ADT/Optional.h"
#include "llvm/Support/FormatVariadic.h"
namespace clang {
@@ -51,12 +51,16 @@ unsigned getOffsetAfterTokenSequence(
// Check if a sequence of tokens is like "#<Name> <raw_identifier>". If it is,
// \p Tok will be the token after this directive; otherwise, it can be any token
-// after the given \p Tok (including \p Tok).
-bool checkAndConsumeDirectiveWithName(Lexer &Lex, StringRef Name, Token &Tok) {
+// after the given \p Tok (including \p Tok). If \p RawIDName is provided, the
+// (second) raw_identifier name is checked.
+bool checkAndConsumeDirectiveWithName(
+ Lexer &Lex, StringRef Name, Token &Tok,
+ llvm::Optional<StringRef> RawIDName = llvm::None) {
bool Matched = Tok.is(tok::hash) && !Lex.LexFromRawLexer(Tok) &&
Tok.is(tok::raw_identifier) &&
Tok.getRawIdentifier() == Name && !Lex.LexFromRawLexer(Tok) &&
- Tok.is(tok::raw_identifier);
+ Tok.is(tok::raw_identifier) &&
+ (!RawIDName || Tok.getRawIdentifier() == *RawIDName);
if (Matched)
Lex.LexFromRawLexer(Tok);
return Matched;
@@ -69,24 +73,45 @@ void skipComments(Lexer &Lex, Token &Tok) {
}
// Returns the offset after header guard directives and any comments
-// before/after header guards. If no header guard presents in the code, this
-// will returns the offset after skipping all comments from the start of the
-// code.
+// before/after header guards (e.g. #ifndef/#define pair, #pragma once). If no
+// header guard is present in the code, this will return the offset after
+// skipping all comments from the start of the code.
unsigned getOffsetAfterHeaderGuardsAndComments(StringRef FileName,
StringRef Code,
const IncludeStyle &Style) {
- return getOffsetAfterTokenSequence(
- FileName, Code, Style,
- [](const SourceManager &SM, Lexer &Lex, Token Tok) {
- skipComments(Lex, Tok);
- unsigned InitialOffset = SM.getFileOffset(Tok.getLocation());
- if (checkAndConsumeDirectiveWithName(Lex, "ifndef", Tok)) {
- skipComments(Lex, Tok);
- if (checkAndConsumeDirectiveWithName(Lex, "define", Tok))
- return SM.getFileOffset(Tok.getLocation());
- }
- return InitialOffset;
- });
+ // \p Consume returns location after header guard or 0 if no header guard is
+ // found.
+ auto ConsumeHeaderGuardAndComment =
+ [&](std::function<unsigned(const SourceManager &SM, Lexer &Lex,
+ Token Tok)>
+ Consume) {
+ return getOffsetAfterTokenSequence(
+ FileName, Code, Style,
+ [&Consume](const SourceManager &SM, Lexer &Lex, Token Tok) {
+ skipComments(Lex, Tok);
+ unsigned InitialOffset = SM.getFileOffset(Tok.getLocation());
+ return std::max(InitialOffset, Consume(SM, Lex, Tok));
+ });
+ };
+ return std::max(
+ // #ifndef/#define
+ ConsumeHeaderGuardAndComment(
+ [](const SourceManager &SM, Lexer &Lex, Token Tok) -> unsigned {
+ if (checkAndConsumeDirectiveWithName(Lex, "ifndef", Tok)) {
+ skipComments(Lex, Tok);
+ if (checkAndConsumeDirectiveWithName(Lex, "define", Tok))
+ return SM.getFileOffset(Tok.getLocation());
+ }
+ return 0;
+ }),
+ // #pragma once
+ ConsumeHeaderGuardAndComment(
+ [](const SourceManager &SM, Lexer &Lex, Token Tok) -> unsigned {
+ if (checkAndConsumeDirectiveWithName(Lex, "pragma", Tok,
+ StringRef("once")))
+ return SM.getFileOffset(Tok.getLocation());
+ return 0;
+ }));
}
// Check if a sequence of tokens is like
diff --git a/lib/Tooling/Inclusions/IncludeStyle.cpp b/lib/Tooling/Inclusions/IncludeStyle.cpp
index 3597710f1f6e..c53c82c83630 100644
--- a/lib/Tooling/Inclusions/IncludeStyle.cpp
+++ b/lib/Tooling/Inclusions/IncludeStyle.cpp
@@ -1,9 +1,8 @@
//===--- IncludeStyle.cpp - Style of C++ #include directives -----*- C++-*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Tooling/InterpolatingCompilationDatabase.cpp b/lib/Tooling/InterpolatingCompilationDatabase.cpp
index 4d0d84f660a2..53c8dd448fd9 100644
--- a/lib/Tooling/InterpolatingCompilationDatabase.cpp
+++ b/lib/Tooling/InterpolatingCompilationDatabase.cpp
@@ -1,9 +1,8 @@
//===- InterpolatingCompilationDatabase.cpp ---------------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -151,7 +150,8 @@ struct TransferableCommand {
// spelling of each argument; re-rendering is lossy for aliased flags.
// E.g. in CL mode, /W4 maps to -Wall.
auto OptTable = clang::driver::createDriverOptTable();
- Cmd.CommandLine.emplace_back(OldArgs.front());
+ if (!OldArgs.empty())
+ Cmd.CommandLine.emplace_back(OldArgs.front());
for (unsigned Pos = 1; Pos < OldArgs.size();) {
using namespace driver::options;
@@ -206,10 +206,13 @@ struct TransferableCommand {
bool TypeCertain;
auto TargetType = guessType(Filename, &TypeCertain);
// If the filename doesn't determine the language (.h), transfer with -x.
- if (TargetType != types::TY_INVALID && !TypeCertain && Type) {
- TargetType = types::onlyPrecompileType(TargetType) // header?
- ? types::lookupHeaderTypeForSourceType(*Type)
- : *Type;
+ if ((!TargetType || !TypeCertain) && Type) {
+ // Use *Type, or its header variant if the file is a header.
+ // Treat no/invalid extension as header (e.g. C++ standard library).
+ TargetType =
+ (!TargetType || types::onlyPrecompileType(TargetType)) // header?
+ ? types::lookupHeaderTypeForSourceType(*Type)
+ : *Type;
if (ClangCLMode) {
const StringRef Flag = toCLFlag(TargetType);
if (!Flag.empty())
@@ -227,6 +230,7 @@ struct TransferableCommand {
LangStandard::getLangStandardForKind(Std).getName()).str());
}
Result.CommandLine.push_back(Filename);
+ Result.Heuristic = "inferred from " + Cmd.Filename;
return Result;
}
@@ -240,7 +244,8 @@ private:
}
// Otherwise just check the clang executable file name.
- return llvm::sys::path::stem(CmdLine.front()).endswith_lower("cl");
+ return !CmdLine.empty() &&
+ llvm::sys::path::stem(CmdLine.front()).endswith_lower("cl");
}
// Map the language from the --std flag to that of the -x flag.
@@ -473,8 +478,7 @@ private:
ArrayRef<SubstringAndIndex> Idx) const {
assert(!Idx.empty());
// Longest substring match will be adjacent to a direct lookup.
- auto It =
- std::lower_bound(Idx.begin(), Idx.end(), SubstringAndIndex{Key, 0});
+ auto It = llvm::lower_bound(Idx, SubstringAndIndex{Key, 0});
if (It == Idx.begin())
return *It;
if (It == Idx.end())
diff --git a/lib/Tooling/JSONCompilationDatabase.cpp b/lib/Tooling/JSONCompilationDatabase.cpp
index b0feaa229c11..f19a0f7550b9 100644
--- a/lib/Tooling/JSONCompilationDatabase.cpp
+++ b/lib/Tooling/JSONCompilationDatabase.cpp
@@ -1,9 +1,8 @@
//===- JSONCompilationDatabase.cpp ----------------------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -15,7 +14,9 @@
#include "clang/Basic/LLVM.h"
#include "clang/Tooling/CompilationDatabase.h"
#include "clang/Tooling/CompilationDatabasePluginRegistry.h"
+#include "clang/Tooling/Tooling.h"
#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
@@ -166,7 +167,9 @@ class JSONCompilationDatabasePlugin : public CompilationDatabasePlugin {
llvm::sys::path::append(JSONDatabasePath, "compile_commands.json");
auto Base = JSONCompilationDatabase::loadFromFile(
JSONDatabasePath, ErrorMessage, JSONCommandLineSyntax::AutoDetect);
- return Base ? inferMissingCompileCommands(std::move(Base)) : nullptr;
+ return Base ? inferTargetAndDriverMode(
+ inferMissingCompileCommands(std::move(Base)))
+ : nullptr;
}
};
@@ -191,8 +194,11 @@ std::unique_ptr<JSONCompilationDatabase>
JSONCompilationDatabase::loadFromFile(StringRef FilePath,
std::string &ErrorMessage,
JSONCommandLineSyntax Syntax) {
+ // Don't mmap: if we're a long-lived process, the build system may overwrite.
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> DatabaseBuffer =
- llvm::MemoryBuffer::getFile(FilePath);
+ llvm::MemoryBuffer::getFile(FilePath, /*FileSize=*/-1,
+ /*RequiresNullTerminator=*/true,
+ /*IsVolatile=*/true);
if (std::error_code Result = DatabaseBuffer.getError()) {
ErrorMessage = "Error while opening JSON database: " + Result.message();
return nullptr;
@@ -250,15 +256,57 @@ JSONCompilationDatabase::getAllCompileCommands() const {
return Commands;
}
+static llvm::StringRef stripExecutableExtension(llvm::StringRef Name) {
+ Name.consume_back(".exe");
+ return Name;
+}
+
+// There are compiler-wrappers (ccache, distcc, gomacc) that take the "real"
+// compiler as an argument, e.g. distcc gcc -O3 foo.c.
+// These end up in compile_commands.json when people set CC="distcc gcc".
+// Clang's driver doesn't understand this, so we need to unwrap.
+static bool unwrapCommand(std::vector<std::string> &Args) {
+ if (Args.size() < 2)
+ return false;
+ StringRef Wrapper =
+ stripExecutableExtension(llvm::sys::path::filename(Args.front()));
+ if (Wrapper == "distcc" || Wrapper == "gomacc" || Wrapper == "ccache") {
+ // Most of these wrappers support being invoked 3 ways:
+ // `distcc g++ file.c` This is the mode we're trying to match.
+ // We need to drop `distcc`.
+ // `distcc file.c` This acts like compiler is cc or similar.
+ // Clang's driver can handle this, no change needed.
+ // `g++ file.c` g++ is a symlink to distcc.
+ // We don't even notice this case, and all is well.
+ //
+ // We need to distinguish between the first and second case.
+ // The wrappers themselves don't take flags, so Args[1] is a compiler flag,
+ // an input file, or a compiler. Inputs have extensions, compilers don't.
+ bool HasCompiler =
+ (Args[1][0] != '-') &&
+ !llvm::sys::path::has_extension(stripExecutableExtension(Args[1]));
+ if (HasCompiler) {
+ Args.erase(Args.begin());
+ return true;
+ }
+ // If !HasCompiler, wrappers act like GCC. Fine: so do we.
+ }
+ return false;
+}
+
static std::vector<std::string>
nodeToCommandLine(JSONCommandLineSyntax Syntax,
const std::vector<llvm::yaml::ScalarNode *> &Nodes) {
SmallString<1024> Storage;
- if (Nodes.size() == 1)
- return unescapeCommandLine(Syntax, Nodes[0]->getValue(Storage));
std::vector<std::string> Arguments;
- for (const auto *Node : Nodes)
- Arguments.push_back(Node->getValue(Storage));
+ if (Nodes.size() == 1)
+ Arguments = unescapeCommandLine(Syntax, Nodes[0]->getValue(Storage));
+ else
+ for (const auto *Node : Nodes)
+ Arguments.push_back(Node->getValue(Storage));
+ // There may be multiple wrappers: using distcc and ccache together is common.
+ while (unwrapCommand(Arguments))
+ ;
return Arguments;
}
@@ -371,6 +419,7 @@ bool JSONCompilationDatabase::parse(std::string &ErrorMessage) {
SmallString<128> AbsolutePath(
Directory->getValue(DirectoryStorage));
llvm::sys::path::append(AbsolutePath, FileName);
+ llvm::sys::path::remove_dots(AbsolutePath, /*remove_dot_dot=*/ true);
llvm::sys::path::native(AbsolutePath, NativeFilePath);
} else {
llvm::sys::path::native(FileName, NativeFilePath);
diff --git a/lib/Tooling/Refactoring.cpp b/lib/Tooling/Refactoring.cpp
index db34c952d794..f379a9c3bcf6 100644
--- a/lib/Tooling/Refactoring.cpp
+++ b/lib/Tooling/Refactoring.cpp
@@ -1,9 +1,8 @@
//===--- Refactoring.cpp - Framework for clang refactoring tools ----------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Tooling/Refactoring/ASTSelection.cpp b/lib/Tooling/Refactoring/ASTSelection.cpp
index b8f996d8218c..64e57af59011 100644
--- a/lib/Tooling/Refactoring/ASTSelection.cpp
+++ b/lib/Tooling/Refactoring/ASTSelection.cpp
@@ -1,9 +1,8 @@
//===--- ASTSelection.cpp - Clang refactoring library ---------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Tooling/Refactoring/ASTSelectionRequirements.cpp b/lib/Tooling/Refactoring/ASTSelectionRequirements.cpp
index c0232c5da442..14fc66a979ae 100644
--- a/lib/Tooling/Refactoring/ASTSelectionRequirements.cpp
+++ b/lib/Tooling/Refactoring/ASTSelectionRequirements.cpp
@@ -1,9 +1,8 @@
//===--- ASTSelectionRequirements.cpp - Clang refactoring library ---------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Tooling/Refactoring/AtomicChange.cpp b/lib/Tooling/Refactoring/AtomicChange.cpp
index e8b0fdbeb662..4cf63306d262 100644
--- a/lib/Tooling/Refactoring/AtomicChange.cpp
+++ b/lib/Tooling/Refactoring/AtomicChange.cpp
@@ -1,9 +1,8 @@
//===--- AtomicChange.cpp - AtomicChange implementation -----------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Tooling/Refactoring/Extract/Extract.cpp b/lib/Tooling/Refactoring/Extract/Extract.cpp
index 7a741bdb2e91..f5b94a462103 100644
--- a/lib/Tooling/Refactoring/Extract/Extract.cpp
+++ b/lib/Tooling/Refactoring/Extract/Extract.cpp
@@ -1,9 +1,8 @@
//===--- Extract.cpp - Clang refactoring library --------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
diff --git a/lib/Tooling/Refactoring/Extract/SourceExtraction.cpp b/lib/Tooling/Refactoring/Extract/SourceExtraction.cpp
index 7fd8cc2d3c7f..533c373e35c4 100644
--- a/lib/Tooling/Refactoring/Extract/SourceExtraction.cpp
+++ b/lib/Tooling/Refactoring/Extract/SourceExtraction.cpp
@@ -1,9 +1,8 @@
//===--- SourceExtraction.cpp - Clang refactoring library -----------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Tooling/Refactoring/Extract/SourceExtraction.h b/lib/Tooling/Refactoring/Extract/SourceExtraction.h
index 4b4bd8b477ff..545eb6c1a11c 100644
--- a/lib/Tooling/Refactoring/Extract/SourceExtraction.h
+++ b/lib/Tooling/Refactoring/Extract/SourceExtraction.h
@@ -1,9 +1,8 @@
//===--- SourceExtraction.cpp - Clang refactoring library -----------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Tooling/Refactoring/RangeSelector.cpp b/lib/Tooling/Refactoring/RangeSelector.cpp
new file mode 100644
index 000000000000..768c02e2277b
--- /dev/null
+++ b/lib/Tooling/Refactoring/RangeSelector.cpp
@@ -0,0 +1,296 @@
+//===--- RangeSelector.cpp - RangeSelector implementations ------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Tooling/Refactoring/RangeSelector.h"
+#include "clang/AST/Expr.h"
+#include "clang/ASTMatchers/ASTMatchFinder.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Tooling/Refactoring/SourceCode.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Errc.h"
+#include "llvm/Support/Error.h"
+#include <string>
+#include <utility>
+#include <vector>
+
+using namespace clang;
+using namespace tooling;
+
+using ast_matchers::MatchFinder;
+using ast_type_traits::ASTNodeKind;
+using ast_type_traits::DynTypedNode;
+using llvm::Error;
+using llvm::StringError;
+
+using MatchResult = MatchFinder::MatchResult;
+
+static Error invalidArgumentError(Twine Message) {
+ return llvm::make_error<StringError>(llvm::errc::invalid_argument, Message);
+}
+
+static Error typeError(StringRef ID, const ASTNodeKind &Kind) {
+ return invalidArgumentError("mismatched type (node id=" + ID +
+ " kind=" + Kind.asStringRef() + ")");
+}
+
+static Error typeError(StringRef ID, const ASTNodeKind &Kind,
+ Twine ExpectedType) {
+ return invalidArgumentError("mismatched type: expected one of " +
+ ExpectedType + " (node id=" + ID +
+ " kind=" + Kind.asStringRef() + ")");
+}
+
+static Error missingPropertyError(StringRef ID, Twine Description,
+ StringRef Property) {
+ return invalidArgumentError(Description + " requires property '" + Property +
+ "' (node id=" + ID + ")");
+}
+
+static Expected<DynTypedNode> getNode(const ast_matchers::BoundNodes &Nodes,
+ StringRef ID) {
+ auto &NodesMap = Nodes.getMap();
+ auto It = NodesMap.find(ID);
+ if (It == NodesMap.end())
+ return invalidArgumentError("ID not bound: " + ID);
+ return It->second;
+}
+
+// FIXME: handling of macros should be configurable.
+static SourceLocation findPreviousTokenStart(SourceLocation Start,
+ const SourceManager &SM,
+ const LangOptions &LangOpts) {
+ if (Start.isInvalid() || Start.isMacroID())
+ return SourceLocation();
+
+ SourceLocation BeforeStart = Start.getLocWithOffset(-1);
+ if (BeforeStart.isInvalid() || BeforeStart.isMacroID())
+ return SourceLocation();
+
+ return Lexer::GetBeginningOfToken(BeforeStart, SM, LangOpts);
+}
+
+// Finds the start location of the previous token of kind \p TK.
+// FIXME: handling of macros should be configurable.
+static SourceLocation findPreviousTokenKind(SourceLocation Start,
+ const SourceManager &SM,
+ const LangOptions &LangOpts,
+ tok::TokenKind TK) {
+ while (true) {
+ SourceLocation L = findPreviousTokenStart(Start, SM, LangOpts);
+ if (L.isInvalid() || L.isMacroID())
+ return SourceLocation();
+
+ Token T;
+ if (Lexer::getRawToken(L, T, SM, LangOpts, /*IgnoreWhiteSpace=*/true))
+ return SourceLocation();
+
+ if (T.is(TK))
+ return T.getLocation();
+
+ Start = L;
+ }
+}
+
+static SourceLocation findOpenParen(const CallExpr &E, const SourceManager &SM,
+ const LangOptions &LangOpts) {
+ SourceLocation EndLoc =
+ E.getNumArgs() == 0 ? E.getRParenLoc() : E.getArg(0)->getBeginLoc();
+ return findPreviousTokenKind(EndLoc, SM, LangOpts, tok::TokenKind::l_paren);
+}
+
+RangeSelector tooling::before(RangeSelector Selector) {
+ return [Selector](const MatchResult &Result) -> Expected<CharSourceRange> {
+ Expected<CharSourceRange> SelectedRange = Selector(Result);
+ if (!SelectedRange)
+ return SelectedRange.takeError();
+ return CharSourceRange::getCharRange(SelectedRange->getBegin());
+ };
+}
+
+RangeSelector tooling::after(RangeSelector Selector) {
+ return [Selector](const MatchResult &Result) -> Expected<CharSourceRange> {
+ Expected<CharSourceRange> SelectedRange = Selector(Result);
+ if (!SelectedRange)
+ return SelectedRange.takeError();
+ if (SelectedRange->isCharRange())
+ return CharSourceRange::getCharRange(SelectedRange->getEnd());
+ return CharSourceRange::getCharRange(Lexer::getLocForEndOfToken(
+ SelectedRange->getEnd(), 0, Result.Context->getSourceManager(),
+ Result.Context->getLangOpts()));
+ };
+}
+
+RangeSelector tooling::node(std::string ID) {
+ return [ID](const MatchResult &Result) -> Expected<CharSourceRange> {
+ Expected<DynTypedNode> Node = getNode(Result.Nodes, ID);
+ if (!Node)
+ return Node.takeError();
+ return Node->get<Stmt>() != nullptr && Node->get<Expr>() == nullptr
+ ? getExtendedRange(*Node, tok::TokenKind::semi, *Result.Context)
+ : CharSourceRange::getTokenRange(Node->getSourceRange());
+ };
+}
+
+RangeSelector tooling::statement(std::string ID) {
+ return [ID](const MatchResult &Result) -> Expected<CharSourceRange> {
+ Expected<DynTypedNode> Node = getNode(Result.Nodes, ID);
+ if (!Node)
+ return Node.takeError();
+ return getExtendedRange(*Node, tok::TokenKind::semi, *Result.Context);
+ };
+}
+
+RangeSelector tooling::range(RangeSelector Begin, RangeSelector End) {
+ return [Begin, End](const MatchResult &Result) -> Expected<CharSourceRange> {
+ Expected<CharSourceRange> BeginRange = Begin(Result);
+ if (!BeginRange)
+ return BeginRange.takeError();
+ Expected<CharSourceRange> EndRange = End(Result);
+ if (!EndRange)
+ return EndRange.takeError();
+ SourceLocation B = BeginRange->getBegin();
+ SourceLocation E = EndRange->getEnd();
+ // Note: we are precluding the possibility of sub-token ranges in the case
+ // that EndRange is a token range.
+ if (Result.SourceManager->isBeforeInTranslationUnit(E, B)) {
+ return invalidArgumentError("Bad range: out of order");
+ }
+ return CharSourceRange(SourceRange(B, E), EndRange->isTokenRange());
+ };
+}
+
+RangeSelector tooling::range(std::string BeginID, std::string EndID) {
+ return tooling::range(node(std::move(BeginID)), node(std::move(EndID)));
+}
+
+RangeSelector tooling::member(std::string ID) {
+ return [ID](const MatchResult &Result) -> Expected<CharSourceRange> {
+ Expected<DynTypedNode> Node = getNode(Result.Nodes, ID);
+ if (!Node)
+ return Node.takeError();
+ if (auto *M = Node->get<clang::MemberExpr>())
+ return CharSourceRange::getTokenRange(
+ M->getMemberNameInfo().getSourceRange());
+ return typeError(ID, Node->getNodeKind(), "MemberExpr");
+ };
+}
+
+RangeSelector tooling::name(std::string ID) {
+ return [ID](const MatchResult &Result) -> Expected<CharSourceRange> {
+ Expected<DynTypedNode> N = getNode(Result.Nodes, ID);
+ if (!N)
+ return N.takeError();
+ auto &Node = *N;
+ if (const auto *D = Node.get<NamedDecl>()) {
+ if (!D->getDeclName().isIdentifier())
+ return missingPropertyError(ID, "name", "identifier");
+ SourceLocation L = D->getLocation();
+ auto R = CharSourceRange::getTokenRange(L, L);
+ // Verify that the range covers exactly the name.
+ // FIXME: extend this code to support cases like `operator +` or
+ // `foo<int>` for which this range will be too short. Doing so will
+ // require subcasing `NamedDecl`, because it doesn't provide virtual
+ // access to the \c DeclarationNameInfo.
+ if (getText(R, *Result.Context) != D->getName())
+ return CharSourceRange();
+ return R;
+ }
+ if (const auto *E = Node.get<DeclRefExpr>()) {
+ if (!E->getNameInfo().getName().isIdentifier())
+ return missingPropertyError(ID, "name", "identifier");
+ SourceLocation L = E->getLocation();
+ return CharSourceRange::getTokenRange(L, L);
+ }
+ if (const auto *I = Node.get<CXXCtorInitializer>()) {
+ if (!I->isMemberInitializer() && I->isWritten())
+ return missingPropertyError(ID, "name", "explicit member initializer");
+ SourceLocation L = I->getMemberLocation();
+ return CharSourceRange::getTokenRange(L, L);
+ }
+ return typeError(ID, Node.getNodeKind(),
+ "DeclRefExpr, NamedDecl, CXXCtorInitializer");
+ };
+}
+
+namespace {
+// Creates a selector from a range-selection function \p Func, which selects a
+// range that is relative to a bound node id. \c T is the node type expected by
+// \p Func.
+template <typename T, CharSourceRange (*Func)(const MatchResult &, const T &)>
+class RelativeSelector {
+ std::string ID;
+
+public:
+ RelativeSelector(std::string ID) : ID(std::move(ID)) {}
+
+ Expected<CharSourceRange> operator()(const MatchResult &Result) {
+ Expected<DynTypedNode> N = getNode(Result.Nodes, ID);
+ if (!N)
+ return N.takeError();
+ if (const auto *Arg = N->get<T>())
+ return Func(Result, *Arg);
+ return typeError(ID, N->getNodeKind());
+ }
+};
+} // namespace
+
+// FIXME: Change the following functions from being in an anonymous namespace
+// to static functions, after the minimum Visual C++ has _MSC_VER >= 1915
+// (equivalent to Visual Studio 2017 v15.8 or higher). Using the anonymous
+// namespace works around a bug in earlier versions.
+namespace {
+// Returns the range of the statements (all source between the braces).
+CharSourceRange getStatementsRange(const MatchResult &,
+ const CompoundStmt &CS) {
+ return CharSourceRange::getCharRange(CS.getLBracLoc().getLocWithOffset(1),
+ CS.getRBracLoc());
+}
+} // namespace
+
+RangeSelector tooling::statements(std::string ID) {
+ return RelativeSelector<CompoundStmt, getStatementsRange>(std::move(ID));
+}
+
+namespace {
+// Returns the range of the source between the call's parentheses.
+CharSourceRange getCallArgumentsRange(const MatchResult &Result,
+ const CallExpr &CE) {
+ return CharSourceRange::getCharRange(
+ findOpenParen(CE, *Result.SourceManager, Result.Context->getLangOpts())
+ .getLocWithOffset(1),
+ CE.getRParenLoc());
+}
+} // namespace
+
+RangeSelector tooling::callArgs(std::string ID) {
+ return RelativeSelector<CallExpr, getCallArgumentsRange>(std::move(ID));
+}
+
+namespace {
+// Returns the range of the elements of the initializer list. Includes all
+// source between the braces.
+CharSourceRange getElementsRange(const MatchResult &,
+ const InitListExpr &E) {
+ return CharSourceRange::getCharRange(E.getLBraceLoc().getLocWithOffset(1),
+ E.getRBraceLoc());
+}
+} // namespace
+
+RangeSelector tooling::initListElements(std::string ID) {
+ return RelativeSelector<InitListExpr, getElementsRange>(std::move(ID));
+}
+
+RangeSelector tooling::expansion(RangeSelector S) {
+ return [S](const MatchResult &Result) -> Expected<CharSourceRange> {
+ Expected<CharSourceRange> SRange = S(Result);
+ if (!SRange)
+ return SRange.takeError();
+ return Result.SourceManager->getExpansionRange(*SRange);
+ };
+}
diff --git a/lib/Tooling/Refactoring/RefactoringActions.cpp b/lib/Tooling/Refactoring/RefactoringActions.cpp
index 37a1639cb446..1a3833243ab4 100644
--- a/lib/Tooling/Refactoring/RefactoringActions.cpp
+++ b/lib/Tooling/Refactoring/RefactoringActions.cpp
@@ -1,9 +1,8 @@
//===--- RefactoringActions.cpp - Constructs refactoring actions ----------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Tooling/Refactoring/Rename/RenamingAction.cpp b/lib/Tooling/Refactoring/Rename/RenamingAction.cpp
index 44ffae90efa7..1649513a077a 100644
--- a/lib/Tooling/Refactoring/Rename/RenamingAction.cpp
+++ b/lib/Tooling/Refactoring/Rename/RenamingAction.cpp
@@ -1,9 +1,8 @@
//===--- RenamingAction.cpp - Clang refactoring library -------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
@@ -75,6 +74,8 @@ RenameOccurrences::initiate(RefactoringRuleContext &Context,
std::move(NewName));
}
+const NamedDecl *RenameOccurrences::getRenameDecl() const { return ND; }
+
Expected<AtomicChanges>
RenameOccurrences::createSourceReplacements(RefactoringRuleContext &Context) {
Expected<SymbolOccurrences> Occurrences = findSymbolOccurrences(ND, Context);
diff --git a/lib/Tooling/Refactoring/Rename/SymbolOccurrences.cpp b/lib/Tooling/Refactoring/Rename/SymbolOccurrences.cpp
index ea64b2c1aa8c..8cc1ffaf4482 100644
--- a/lib/Tooling/Refactoring/Rename/SymbolOccurrences.cpp
+++ b/lib/Tooling/Refactoring/Rename/SymbolOccurrences.cpp
@@ -1,9 +1,8 @@
//===--- SymbolOccurrences.cpp - Clang refactoring library ----------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Tooling/Refactoring/Rename/USRFinder.cpp b/lib/Tooling/Refactoring/Rename/USRFinder.cpp
index 4ed805fd504c..55111202ac88 100644
--- a/lib/Tooling/Refactoring/Rename/USRFinder.cpp
+++ b/lib/Tooling/Refactoring/Rename/USRFinder.cpp
@@ -1,9 +1,8 @@
//===--- USRFinder.cpp - Clang refactoring library ------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
diff --git a/lib/Tooling/Refactoring/Rename/USRFindingAction.cpp b/lib/Tooling/Refactoring/Rename/USRFindingAction.cpp
index 2e7c9b0cc31b..54c6f3e734b1 100644
--- a/lib/Tooling/Refactoring/Rename/USRFindingAction.cpp
+++ b/lib/Tooling/Refactoring/Rename/USRFindingAction.cpp
@@ -1,9 +1,8 @@
//===--- USRFindingAction.cpp - Clang refactoring library -----------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
diff --git a/lib/Tooling/Refactoring/Rename/USRLocFinder.cpp b/lib/Tooling/Refactoring/Rename/USRLocFinder.cpp
index 7f60cf54c8ec..408e184f5bf5 100644
--- a/lib/Tooling/Refactoring/Rename/USRLocFinder.cpp
+++ b/lib/Tooling/Refactoring/Rename/USRLocFinder.cpp
@@ -1,9 +1,8 @@
//===--- USRLocFinder.cpp - Clang refactoring library ---------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
@@ -543,8 +542,8 @@ createRenameAtomicChanges(llvm::ArrayRef<std::string> USRs,
if (!llvm::isa<clang::TranslationUnitDecl>(
RenameInfo.Context->getDeclContext())) {
ReplacedName = tooling::replaceNestedName(
- RenameInfo.Specifier, RenameInfo.Context->getDeclContext(),
- RenameInfo.FromDecl,
+ RenameInfo.Specifier, RenameInfo.Begin,
+ RenameInfo.Context->getDeclContext(), RenameInfo.FromDecl,
NewName.startswith("::") ? NewName.str()
: ("::" + NewName).str());
} else {
diff --git a/lib/Tooling/Refactoring/SourceCode.cpp b/lib/Tooling/Refactoring/SourceCode.cpp
new file mode 100644
index 000000000000..3a97e178bbd4
--- /dev/null
+++ b/lib/Tooling/Refactoring/SourceCode.cpp
@@ -0,0 +1,31 @@
+//===--- SourceCode.cpp - Source code manipulation routines -----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides functions that simplify extraction of source code.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/Tooling/Refactoring/SourceCode.h"
+#include "clang/Lex/Lexer.h"
+
+using namespace clang;
+
+StringRef clang::tooling::getText(CharSourceRange Range,
+ const ASTContext &Context) {
+ return Lexer::getSourceText(Range, Context.getSourceManager(),
+ Context.getLangOpts());
+}
+
+CharSourceRange clang::tooling::maybeExtendRange(CharSourceRange Range,
+ tok::TokenKind Next,
+ ASTContext &Context) {
+ Optional<Token> Tok = Lexer::findNextToken(
+ Range.getEnd(), Context.getSourceManager(), Context.getLangOpts());
+ if (!Tok || !Tok->is(Next))
+ return Range;
+ return CharSourceRange::getTokenRange(Range.getBegin(), Tok->getLocation());
+}
diff --git a/lib/Tooling/Refactoring/Stencil.cpp b/lib/Tooling/Refactoring/Stencil.cpp
new file mode 100644
index 000000000000..09eca21c3cef
--- /dev/null
+++ b/lib/Tooling/Refactoring/Stencil.cpp
@@ -0,0 +1,175 @@
+//===--- Stencil.cpp - Stencil implementation -------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Tooling/Refactoring/Stencil.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTTypeTraits.h"
+#include "clang/AST/Expr.h"
+#include "clang/ASTMatchers/ASTMatchFinder.h"
+#include "clang/ASTMatchers/ASTMatchers.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Tooling/Refactoring/SourceCode.h"
+#include "llvm/Support/Errc.h"
+#include <atomic>
+#include <memory>
+#include <string>
+
+using namespace clang;
+using namespace tooling;
+
+using ast_matchers::MatchFinder;
+using llvm::Error;
+
+// A down_cast function to safely down cast a StencilPartInterface to a subclass
+// D. Returns nullptr if P is not an instance of D.
+template <typename D> const D *down_cast(const StencilPartInterface *P) {
+ if (P == nullptr || D::typeId() != P->typeId())
+ return nullptr;
+ return static_cast<const D *>(P);
+}
+
+static llvm::Expected<ast_type_traits::DynTypedNode>
+getNode(const ast_matchers::BoundNodes &Nodes, StringRef Id) {
+ auto &NodesMap = Nodes.getMap();
+ auto It = NodesMap.find(Id);
+ if (It == NodesMap.end())
+ return llvm::make_error<llvm::StringError>(llvm::errc::invalid_argument,
+ "Id not bound: " + Id);
+ return It->second;
+}
+
+namespace {
+// An arbitrary fragment of code within a stencil.
+struct RawTextData {
+ explicit RawTextData(std::string T) : Text(std::move(T)) {}
+ std::string Text;
+};
+
+// A debugging operation to dump the AST for a particular (bound) AST node.
+struct DebugPrintNodeOpData {
+ explicit DebugPrintNodeOpData(std::string S) : Id(std::move(S)) {}
+ std::string Id;
+};
+
+// The fragment of code corresponding to the selected range.
+struct SelectorOpData {
+ explicit SelectorOpData(RangeSelector S) : Selector(std::move(S)) {}
+ RangeSelector Selector;
+};
+} // namespace
+
+bool isEqualData(const RawTextData &A, const RawTextData &B) {
+ return A.Text == B.Text;
+}
+
+bool isEqualData(const DebugPrintNodeOpData &A, const DebugPrintNodeOpData &B) {
+ return A.Id == B.Id;
+}
+
+// Equality is not (yet) defined for \c RangeSelector.
+bool isEqualData(const SelectorOpData &, const SelectorOpData &) { return false; }
+
+// The `evalData()` overloads evaluate the given stencil data to a string, given
+// the match result, and append it to `Result`. We define an overload for each
+// type of stencil data.
+
+Error evalData(const RawTextData &Data, const MatchFinder::MatchResult &,
+ std::string *Result) {
+ Result->append(Data.Text);
+ return Error::success();
+}
+
+Error evalData(const DebugPrintNodeOpData &Data,
+ const MatchFinder::MatchResult &Match, std::string *Result) {
+ std::string Output;
+ llvm::raw_string_ostream Os(Output);
+ auto NodeOrErr = getNode(Match.Nodes, Data.Id);
+ if (auto Err = NodeOrErr.takeError())
+ return Err;
+ NodeOrErr->print(Os, PrintingPolicy(Match.Context->getLangOpts()));
+ *Result += Os.str();
+ return Error::success();
+}
+
+Error evalData(const SelectorOpData &Data, const MatchFinder::MatchResult &Match,
+ std::string *Result) {
+ auto Range = Data.Selector(Match);
+ if (!Range)
+ return Range.takeError();
+ *Result += getText(*Range, *Match.Context);
+ return Error::success();
+}
+
+template <typename T>
+class StencilPartImpl : public StencilPartInterface {
+ T Data;
+
+public:
+ template <typename... Ps>
+ explicit StencilPartImpl(Ps &&... Args)
+ : StencilPartInterface(StencilPartImpl::typeId()),
+ Data(std::forward<Ps>(Args)...) {}
+
+ // Generates a unique identifier for this class (specifically, one per
+ // instantiation of the template).
+ static const void* typeId() {
+ static bool b;
+ return &b;
+ }
+
+ Error eval(const MatchFinder::MatchResult &Match,
+ std::string *Result) const override {
+ return evalData(Data, Match, Result);
+ }
+
+ bool isEqual(const StencilPartInterface &Other) const override {
+ if (const auto *OtherPtr = down_cast<StencilPartImpl>(&Other))
+ return isEqualData(Data, OtherPtr->Data);
+ return false;
+ }
+};
+
+namespace {
+using RawText = StencilPartImpl<RawTextData>;
+using DebugPrintNodeOp = StencilPartImpl<DebugPrintNodeOpData>;
+using SelectorOp = StencilPartImpl<SelectorOpData>;
+} // namespace
+
+StencilPart Stencil::wrap(StringRef Text) {
+ return stencil::text(Text);
+}
+
+StencilPart Stencil::wrap(RangeSelector Selector) {
+ return stencil::selection(std::move(Selector));
+}
+
+void Stencil::append(Stencil OtherStencil) {
+ for (auto &Part : OtherStencil.Parts)
+ Parts.push_back(std::move(Part));
+}
+
+llvm::Expected<std::string>
+Stencil::eval(const MatchFinder::MatchResult &Match) const {
+ std::string Result;
+ for (const auto &Part : Parts)
+ if (auto Err = Part.eval(Match, &Result))
+ return std::move(Err);
+ return Result;
+}
+
+StencilPart stencil::text(StringRef Text) {
+ return StencilPart(std::make_shared<RawText>(Text));
+}
+
+StencilPart stencil::selection(RangeSelector Selector) {
+ return StencilPart(std::make_shared<SelectorOp>(std::move(Selector)));
+}
+
+StencilPart stencil::dPrint(StringRef Id) {
+ return StencilPart(std::make_shared<DebugPrintNodeOp>(Id));
+}
diff --git a/lib/Tooling/Refactoring/Transformer.cpp b/lib/Tooling/Refactoring/Transformer.cpp
new file mode 100644
index 000000000000..8e6fe6c7a940
--- /dev/null
+++ b/lib/Tooling/Refactoring/Transformer.cpp
@@ -0,0 +1,263 @@
+//===--- Transformer.cpp - Transformer library implementation ---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Tooling/Refactoring/Transformer.h"
+#include "clang/AST/Expr.h"
+#include "clang/ASTMatchers/ASTMatchFinder.h"
+#include "clang/ASTMatchers/ASTMatchers.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Rewrite/Core/Rewriter.h"
+#include "clang/Tooling/Refactoring/AtomicChange.h"
+#include "clang/Tooling/Refactoring/SourceCode.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Errc.h"
+#include "llvm/Support/Error.h"
+#include <deque>
+#include <string>
+#include <utility>
+#include <vector>
+
+using namespace clang;
+using namespace tooling;
+
+using ast_matchers::MatchFinder;
+using ast_matchers::internal::DynTypedMatcher;
+using ast_type_traits::ASTNodeKind;
+using ast_type_traits::DynTypedNode;
+using llvm::Error;
+using llvm::StringError;
+
+using MatchResult = MatchFinder::MatchResult;
+
+// Did the text at this location originate in a macro definition (aka. body)?
+// For example,
+//
+// #define NESTED(x) x
+// #define MACRO(y) { int y = NESTED(3); }
+// if (true) MACRO(foo)
+//
+// The if statement expands to
+//
+// if (true) { int foo = 3; }
+// ^ ^
+// Loc1 Loc2
+//
+// For SourceManager SM, SM.isMacroArgExpansion(Loc1) and
+// SM.isMacroArgExpansion(Loc2) are both true, but isOriginMacroBody(sm, Loc1)
+// is false, because "foo" originated in the source file (as an argument to a
+// macro), whereas isOriginMacroBody(SM, Loc2) is true, because "3" originated
+// in the definition of MACRO.
+static bool isOriginMacroBody(const clang::SourceManager &SM,
+ clang::SourceLocation Loc) {
+ while (Loc.isMacroID()) {
+ if (SM.isMacroBodyExpansion(Loc))
+ return true;
+ // Otherwise, it must be in an argument, so we continue searching up the
+ // invocation stack. getImmediateMacroCallerLoc() gives the location of the
+ // argument text, inside the call text.
+ Loc = SM.getImmediateMacroCallerLoc(Loc);
+ }
+ return false;
+}
+
+Expected<SmallVector<tooling::detail::Transformation, 1>>
+tooling::detail::translateEdits(const MatchResult &Result,
+ llvm::ArrayRef<ASTEdit> Edits) {
+ SmallVector<tooling::detail::Transformation, 1> Transformations;
+ for (const auto &Edit : Edits) {
+ Expected<CharSourceRange> Range = Edit.TargetRange(Result);
+ if (!Range)
+ return Range.takeError();
+ if (Range->isInvalid() ||
+ isOriginMacroBody(*Result.SourceManager, Range->getBegin()))
+ return SmallVector<Transformation, 0>();
+ auto Replacement = Edit.Replacement(Result);
+ if (!Replacement)
+ return Replacement.takeError();
+ tooling::detail::Transformation T;
+ T.Range = *Range;
+ T.Replacement = std::move(*Replacement);
+ Transformations.push_back(std::move(T));
+ }
+ return Transformations;
+}
+
+ASTEdit tooling::change(RangeSelector S, TextGenerator Replacement) {
+ ASTEdit E;
+ E.TargetRange = std::move(S);
+ E.Replacement = std::move(Replacement);
+ return E;
+}
+
+RewriteRule tooling::makeRule(DynTypedMatcher M, SmallVector<ASTEdit, 1> Edits,
+ TextGenerator Explanation) {
+ return RewriteRule{{RewriteRule::Case{
+ std::move(M), std::move(Edits), std::move(Explanation), {}}}};
+}
+
+void tooling::addInclude(RewriteRule &Rule, StringRef Header,
+ IncludeFormat Format) {
+ for (auto &Case : Rule.Cases)
+ Case.AddedIncludes.emplace_back(Header.str(), Format);
+}
+
+// Determines whether A is a base type of B in the class hierarchy, including
+// the implicit relationship of Type and QualType.
+static bool isBaseOf(ASTNodeKind A, ASTNodeKind B) {
+ static auto TypeKind = ASTNodeKind::getFromNodeKind<Type>();
+ static auto QualKind = ASTNodeKind::getFromNodeKind<QualType>();
+ /// Mimic the implicit conversions of Matcher<>.
+ /// - From Matcher<Type> to Matcher<QualType>
+ /// - From Matcher<Base> to Matcher<Derived>
+ return (A.isSame(TypeKind) && B.isSame(QualKind)) || A.isBaseOf(B);
+}
+
+// Try to find a common kind to which all of the rule's matchers can be
+// converted.
+static ASTNodeKind
+findCommonKind(const SmallVectorImpl<RewriteRule::Case> &Cases) {
+ assert(!Cases.empty() && "Rule must have at least one case.");
+ ASTNodeKind JoinKind = Cases[0].Matcher.getSupportedKind();
+ // Find a (least) Kind K, for which M.canConvertTo(K) holds, for all matchers
+ // M in Rules.
+ for (const auto &Case : Cases) {
+ auto K = Case.Matcher.getSupportedKind();
+ if (isBaseOf(JoinKind, K)) {
+ JoinKind = K;
+ continue;
+ }
+ if (K.isSame(JoinKind) || isBaseOf(K, JoinKind))
+ // JoinKind is already the lowest.
+ continue;
+ // K and JoinKind are unrelated -- there is no least common kind.
+ return ASTNodeKind();
+ }
+ return JoinKind;
+}
+
+// Binds each rule's matcher to a unique (and deterministic) tag based on
+// `TagBase`.
+static std::vector<DynTypedMatcher>
+taggedMatchers(StringRef TagBase,
+ const SmallVectorImpl<RewriteRule::Case> &Cases) {
+ std::vector<DynTypedMatcher> Matchers;
+ Matchers.reserve(Cases.size());
+ size_t count = 0;
+ for (const auto &Case : Cases) {
+ std::string Tag = (TagBase + Twine(count)).str();
+ ++count;
+ auto M = Case.Matcher.tryBind(Tag);
+ assert(M && "RewriteRule matchers should be bindable.");
+ Matchers.push_back(*std::move(M));
+ }
+ return Matchers;
+}
+
+// Simply gathers the contents of the various rules into a single rule. The
+// actual work to combine these into an ordered choice is deferred to matcher
+// registration.
+RewriteRule tooling::applyFirst(ArrayRef<RewriteRule> Rules) {
+ RewriteRule R;
+ for (auto &Rule : Rules)
+ R.Cases.append(Rule.Cases.begin(), Rule.Cases.end());
+ return R;
+}
+
+static DynTypedMatcher joinCaseMatchers(const RewriteRule &Rule) {
+ assert(!Rule.Cases.empty() && "Rule must have at least one case.");
+ if (Rule.Cases.size() == 1)
+ return Rule.Cases[0].Matcher;
+
+ auto CommonKind = findCommonKind(Rule.Cases);
+ assert(!CommonKind.isNone() && "Cases must have compatible matchers.");
+ return DynTypedMatcher::constructVariadic(
+ DynTypedMatcher::VO_AnyOf, CommonKind, taggedMatchers("Tag", Rule.Cases));
+}
+
+DynTypedMatcher tooling::detail::buildMatcher(const RewriteRule &Rule) {
+ DynTypedMatcher M = joinCaseMatchers(Rule);
+ M.setAllowBind(true);
+ // `tryBind` is guaranteed to succeed, because `AllowBind` was set to true.
+ return *M.tryBind(RewriteRule::RootID);
+}
+
+// Finds the case that was "selected" -- that is, whose matcher triggered the
+// `MatchResult`.
+const RewriteRule::Case &
+tooling::detail::findSelectedCase(const MatchResult &Result,
+ const RewriteRule &Rule) {
+ if (Rule.Cases.size() == 1)
+ return Rule.Cases[0];
+
+ auto &NodesMap = Result.Nodes.getMap();
+ for (size_t i = 0, N = Rule.Cases.size(); i < N; ++i) {
+ std::string Tag = ("Tag" + Twine(i)).str();
+ if (NodesMap.find(Tag) != NodesMap.end())
+ return Rule.Cases[i];
+ }
+ llvm_unreachable("No tag found for this rule.");
+}
+
+constexpr llvm::StringLiteral RewriteRule::RootID;
+
+void Transformer::registerMatchers(MatchFinder *MatchFinder) {
+ MatchFinder->addDynamicMatcher(tooling::detail::buildMatcher(Rule), this);
+}
+
+void Transformer::run(const MatchResult &Result) {
+ if (Result.Context->getDiagnostics().hasErrorOccurred())
+ return;
+
+ // Verify the existence and validity of the AST node that roots this rule.
+ auto &NodesMap = Result.Nodes.getMap();
+ auto Root = NodesMap.find(RewriteRule::RootID);
+ assert(Root != NodesMap.end() && "Transformation failed: missing root node.");
+ SourceLocation RootLoc = Result.SourceManager->getExpansionLoc(
+ Root->second.getSourceRange().getBegin());
+ assert(RootLoc.isValid() && "Invalid location for Root node of match.");
+
+ RewriteRule::Case Case = tooling::detail::findSelectedCase(Result, Rule);
+ auto Transformations = tooling::detail::translateEdits(Result, Case.Edits);
+ if (!Transformations) {
+ Consumer(Transformations.takeError());
+ return;
+ }
+
+ if (Transformations->empty()) {
+ // No rewrite applied (but no error encountered either).
+ RootLoc.print(llvm::errs() << "note: skipping match at loc ",
+ *Result.SourceManager);
+ llvm::errs() << "\n";
+ return;
+ }
+
+ // Record the results in the AtomicChange.
+ AtomicChange AC(*Result.SourceManager, RootLoc);
+ for (const auto &T : *Transformations) {
+ if (auto Err = AC.replace(*Result.SourceManager, T.Range, T.Replacement)) {
+ Consumer(std::move(Err));
+ return;
+ }
+ }
+
+ for (const auto &I : Case.AddedIncludes) {
+ auto &Header = I.first;
+ switch (I.second) {
+ case IncludeFormat::Quoted:
+ AC.addHeader(Header);
+ break;
+ case IncludeFormat::Angled:
+ AC.addHeader((llvm::Twine("<") + Header + ">").str());
+ break;
+ }
+ }
+
+ Consumer(std::move(AC));
+}
diff --git a/lib/Tooling/RefactoringCallbacks.cpp b/lib/Tooling/RefactoringCallbacks.cpp
index 9fd333ca554e..2aa5b5bf9d98 100644
--- a/lib/Tooling/RefactoringCallbacks.cpp
+++ b/lib/Tooling/RefactoringCallbacks.cpp
@@ -1,9 +1,8 @@
//===--- RefactoringCallbacks.cpp - Structural query framework ------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Tooling/StandaloneExecution.cpp b/lib/Tooling/StandaloneExecution.cpp
index 1daf792fb86f..ad82ee083a40 100644
--- a/lib/Tooling/StandaloneExecution.cpp
+++ b/lib/Tooling/StandaloneExecution.cpp
@@ -1,9 +1,8 @@
//===- lib/Tooling/Execution.cpp - Standalone clang action execution. -----===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Tooling/Syntax/BuildTree.cpp b/lib/Tooling/Syntax/BuildTree.cpp
new file mode 100644
index 000000000000..03c439c59e39
--- /dev/null
+++ b/lib/Tooling/Syntax/BuildTree.cpp
@@ -0,0 +1,273 @@
+//===- BuildTree.cpp ------------------------------------------*- C++ -*-=====//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+#include "clang/Tooling/Syntax/BuildTree.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/AST/Stmt.h"
+#include "clang/Basic/LLVM.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TokenKinds.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Tooling/Syntax/Nodes.h"
+#include "clang/Tooling/Syntax/Tokens.h"
+#include "clang/Tooling/Syntax/Tree.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/FormatVariadic.h"
+#include "llvm/Support/raw_ostream.h"
+#include <map>
+
+using namespace clang;
+
+/// A helper class for constructing the syntax tree while traversing a clang
+/// AST.
+///
+/// At each point of the traversal we maintain a list of pending nodes.
+/// Initially all tokens are added as pending nodes. When processing a clang AST
+/// node, the clients need to:
+/// - create a corresponding syntax node,
+/// - assign roles to all pending child nodes with 'markChild' and
+/// 'markChildToken',
+/// - replace the child nodes with the new syntax node in the pending list
+/// with 'foldNode'.
+///
+/// Note that all children are expected to be processed when building a node.
+///
+/// Call finalize() to finish building the tree and consume the root node.
+class syntax::TreeBuilder {
+public:
+ TreeBuilder(syntax::Arena &Arena) : Arena(Arena), Pending(Arena) {}
+
+ llvm::BumpPtrAllocator &allocator() { return Arena.allocator(); }
+
+ /// Populate children for \p New node, assuming it covers tokens from \p
+ /// Range.
+ void foldNode(llvm::ArrayRef<syntax::Token> Range, syntax::Tree *New);
+
+ /// Set role for a token starting at \p Loc.
+ void markChildToken(SourceLocation Loc, tok::TokenKind Kind, NodeRole R);
+
+ /// Finish building the tree and consume the root node.
+ syntax::TranslationUnit *finalize() && {
+ auto Tokens = Arena.tokenBuffer().expandedTokens();
+ // Build the root of the tree, consuming all the children.
+ Pending.foldChildren(Tokens,
+ new (Arena.allocator()) syntax::TranslationUnit);
+
+ return cast<syntax::TranslationUnit>(std::move(Pending).finalize());
+ }
+
+ /// getRange() finds the syntax tokens corresponding to the passed source
+ /// locations.
+ /// \p First is the start position of the first token and \p Last is the start
+ /// position of the last token.
+ llvm::ArrayRef<syntax::Token> getRange(SourceLocation First,
+ SourceLocation Last) const {
+ assert(First.isValid());
+ assert(Last.isValid());
+ assert(First == Last ||
+ Arena.sourceManager().isBeforeInTranslationUnit(First, Last));
+ return llvm::makeArrayRef(findToken(First), std::next(findToken(Last)));
+ }
+ llvm::ArrayRef<syntax::Token> getRange(const Decl *D) const {
+ return getRange(D->getBeginLoc(), D->getEndLoc());
+ }
+ llvm::ArrayRef<syntax::Token> getRange(const Stmt *S) const {
+ return getRange(S->getBeginLoc(), S->getEndLoc());
+ }
+
+private:
+ /// Finds a token starting at \p L. The token must exist.
+ const syntax::Token *findToken(SourceLocation L) const;
+
+ /// A collection of trees covering the input tokens.
+ /// When created, each tree corresponds to a single token in the file.
+ /// Clients call 'foldChildren' to attach one or more subtrees to a parent
+ /// node and update the list of trees accordingly.
+ ///
+ /// Ensures that added nodes properly nest and cover the whole token stream.
+ struct Forest {
+ Forest(syntax::Arena &A) {
+ // FIXME: do not add 'eof' to the tree.
+
+ // Create all leaf nodes.
+ for (auto &T : A.tokenBuffer().expandedTokens())
+ Trees.insert(Trees.end(),
+ {&T, NodeAndRole{new (A.allocator()) syntax::Leaf(&T)}});
+ }
+
+ void assignRole(llvm::ArrayRef<syntax::Token> Range,
+ syntax::NodeRole Role) {
+ assert(!Range.empty());
+ auto It = Trees.lower_bound(Range.begin());
+ assert(It != Trees.end() && "no node found");
+ assert(It->first == Range.begin() && "no child with the specified range");
+ assert((std::next(It) == Trees.end() ||
+ std::next(It)->first == Range.end()) &&
+ "no child with the specified range");
+ It->second.Role = Role;
+ }
+
+ /// Add \p Node to the forest and fill its children nodes based on the \p
+ /// NodeRange.
+ void foldChildren(llvm::ArrayRef<syntax::Token> NodeTokens,
+ syntax::Tree *Node) {
+ assert(!NodeTokens.empty());
+ assert(Node->firstChild() == nullptr && "node already has children");
+
+ auto *FirstToken = NodeTokens.begin();
+ auto BeginChildren = Trees.lower_bound(FirstToken);
+ assert(BeginChildren != Trees.end() &&
+ BeginChildren->first == FirstToken &&
+ "fold crosses boundaries of existing subtrees");
+ auto EndChildren = Trees.lower_bound(NodeTokens.end());
+ assert((EndChildren == Trees.end() ||
+ EndChildren->first == NodeTokens.end()) &&
+ "fold crosses boundaries of existing subtrees");
+
+ // (!) we need to go in reverse order, because we can only prepend.
+ for (auto It = EndChildren; It != BeginChildren; --It)
+ Node->prependChildLowLevel(std::prev(It)->second.Node,
+ std::prev(It)->second.Role);
+
+ Trees.erase(BeginChildren, EndChildren);
+ Trees.insert({FirstToken, NodeAndRole(Node)});
+ }
+
+ // EXPECTS: all tokens were consumed and are owned by a single root node.
+ syntax::Node *finalize() && {
+ assert(Trees.size() == 1);
+ auto *Root = Trees.begin()->second.Node;
+ Trees = {};
+ return Root;
+ }
+
+ std::string str(const syntax::Arena &A) const {
+ std::string R;
+ for (auto It = Trees.begin(); It != Trees.end(); ++It) {
+ unsigned CoveredTokens =
+ It != Trees.end()
+ ? (std::next(It)->first - It->first)
+ : A.tokenBuffer().expandedTokens().end() - It->first;
+
+ R += llvm::formatv("- '{0}' covers '{1}'+{2} tokens\n",
+ It->second.Node->kind(),
+ It->first->text(A.sourceManager()), CoveredTokens);
+ R += It->second.Node->dump(A);
+ }
+ return R;
+ }
+
+ private:
+ /// A with a role that should be assigned to it when adding to a parent.
+ struct NodeAndRole {
+ explicit NodeAndRole(syntax::Node *Node)
+ : Node(Node), Role(NodeRole::Unknown) {}
+
+ syntax::Node *Node;
+ NodeRole Role;
+ };
+
+ /// Maps from the start token to a subtree starting at that token.
+ /// FIXME: storing the end tokens is redundant.
+ /// FIXME: the key of a map is redundant, it is also stored in NodeForRange.
+ std::map<const syntax::Token *, NodeAndRole> Trees;
+ };
+
+ /// For debugging purposes.
+ std::string str() { return Pending.str(Arena); }
+
+ syntax::Arena &Arena;
+ Forest Pending;
+};
+
+namespace {
+class BuildTreeVisitor : public RecursiveASTVisitor<BuildTreeVisitor> {
+public:
+ explicit BuildTreeVisitor(ASTContext &Ctx, syntax::TreeBuilder &Builder)
+ : Builder(Builder), LangOpts(Ctx.getLangOpts()) {}
+
+ bool shouldTraversePostOrder() const { return true; }
+
+ bool TraverseDecl(Decl *D) {
+ if (!D || isa<TranslationUnitDecl>(D))
+ return RecursiveASTVisitor::TraverseDecl(D);
+ if (!llvm::isa<TranslationUnitDecl>(D->getDeclContext()))
+ return true; // Only build top-level decls for now, do not recurse.
+ return RecursiveASTVisitor::TraverseDecl(D);
+ }
+
+ bool VisitDecl(Decl *D) {
+ assert(llvm::isa<TranslationUnitDecl>(D->getDeclContext()) &&
+ "expected a top-level decl");
+ assert(!D->isImplicit());
+ Builder.foldNode(Builder.getRange(D),
+ new (allocator()) syntax::TopLevelDeclaration());
+ return true;
+ }
+
+ bool WalkUpFromTranslationUnitDecl(TranslationUnitDecl *TU) {
+ // (!) we do not want to call VisitDecl(), the declaration for translation
+ // unit is built by finalize().
+ return true;
+ }
+
+ bool WalkUpFromCompoundStmt(CompoundStmt *S) {
+ using NodeRole = syntax::NodeRole;
+
+ Builder.markChildToken(S->getLBracLoc(), tok::l_brace,
+ NodeRole::CompoundStatement_lbrace);
+ Builder.markChildToken(S->getRBracLoc(), tok::r_brace,
+ NodeRole::CompoundStatement_rbrace);
+
+ Builder.foldNode(Builder.getRange(S),
+ new (allocator()) syntax::CompoundStatement);
+ return true;
+ }
+
+private:
+ /// A small helper to save some typing.
+ llvm::BumpPtrAllocator &allocator() { return Builder.allocator(); }
+
+ syntax::TreeBuilder &Builder;
+ const LangOptions &LangOpts;
+};
+} // namespace
+
+void syntax::TreeBuilder::foldNode(llvm::ArrayRef<syntax::Token> Range,
+ syntax::Tree *New) {
+ Pending.foldChildren(Range, New);
+}
+
+void syntax::TreeBuilder::markChildToken(SourceLocation Loc,
+ tok::TokenKind Kind, NodeRole Role) {
+ if (Loc.isInvalid())
+ return;
+ Pending.assignRole(*findToken(Loc), Role);
+}
+
+const syntax::Token *syntax::TreeBuilder::findToken(SourceLocation L) const {
+ auto Tokens = Arena.tokenBuffer().expandedTokens();
+ auto &SM = Arena.sourceManager();
+ auto It = llvm::partition_point(Tokens, [&](const syntax::Token &T) {
+ return SM.isBeforeInTranslationUnit(T.location(), L);
+ });
+ assert(It != Tokens.end());
+ assert(It->location() == L);
+ return &*It;
+}
+
+syntax::TranslationUnit *
+syntax::buildSyntaxTree(Arena &A, const TranslationUnitDecl &TU) {
+ TreeBuilder Builder(A);
+ BuildTreeVisitor(TU.getASTContext(), Builder).TraverseAST(TU.getASTContext());
+ return std::move(Builder).finalize();
+}
diff --git a/lib/Tooling/Syntax/Nodes.cpp b/lib/Tooling/Syntax/Nodes.cpp
new file mode 100644
index 000000000000..061ed73bbebd
--- /dev/null
+++ b/lib/Tooling/Syntax/Nodes.cpp
@@ -0,0 +1,35 @@
+//===- Nodes.cpp ----------------------------------------------*- C++ -*-=====//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+#include "clang/Tooling/Syntax/Nodes.h"
+#include "clang/Basic/TokenKinds.h"
+
+using namespace clang;
+
+llvm::raw_ostream &syntax::operator<<(llvm::raw_ostream &OS, NodeKind K) {
+ switch (K) {
+ case NodeKind::Leaf:
+ return OS << "Leaf";
+ case NodeKind::TranslationUnit:
+ return OS << "TranslationUnit";
+ case NodeKind::TopLevelDeclaration:
+ return OS << "TopLevelDeclaration";
+ case NodeKind::CompoundStatement:
+ return OS << "CompoundStatement";
+ }
+ llvm_unreachable("unknown node kind");
+}
+
+syntax::Leaf *syntax::CompoundStatement::lbrace() {
+ return llvm::cast_or_null<syntax::Leaf>(
+ findChild(NodeRole::CompoundStatement_lbrace));
+}
+
+syntax::Leaf *syntax::CompoundStatement::rbrace() {
+ return llvm::cast_or_null<syntax::Leaf>(
+ findChild(NodeRole::CompoundStatement_rbrace));
+}
diff --git a/lib/Tooling/Syntax/Tokens.cpp b/lib/Tooling/Syntax/Tokens.cpp
new file mode 100644
index 000000000000..d82dc1f35c94
--- /dev/null
+++ b/lib/Tooling/Syntax/Tokens.cpp
@@ -0,0 +1,618 @@
+//===- Tokens.cpp - collect tokens from preprocessing ---------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+#include "clang/Tooling/Syntax/Tokens.h"
+
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "clang/Basic/LLVM.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TokenKinds.h"
+#include "clang/Lex/PPCallbacks.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/Token.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/FormatVariadic.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cassert>
+#include <iterator>
+#include <string>
+#include <utility>
+#include <vector>
+
+using namespace clang;
+using namespace clang::syntax;
+
+syntax::Token::Token(SourceLocation Location, unsigned Length,
+ tok::TokenKind Kind)
+ : Location(Location), Length(Length), Kind(Kind) {
+ assert(Location.isValid());
+}
+
+syntax::Token::Token(const clang::Token &T)
+ : Token(T.getLocation(), T.getLength(), T.getKind()) {
+ assert(!T.isAnnotation());
+}
+
+llvm::StringRef syntax::Token::text(const SourceManager &SM) const {
+ bool Invalid = false;
+ const char *Start = SM.getCharacterData(location(), &Invalid);
+ assert(!Invalid);
+ return llvm::StringRef(Start, length());
+}
+
+FileRange syntax::Token::range(const SourceManager &SM) const {
+ assert(location().isFileID() && "must be a spelled token");
+ FileID File;
+ unsigned StartOffset;
+ std::tie(File, StartOffset) = SM.getDecomposedLoc(location());
+ return FileRange(File, StartOffset, StartOffset + length());
+}
+
+FileRange syntax::Token::range(const SourceManager &SM,
+ const syntax::Token &First,
+ const syntax::Token &Last) {
+ auto F = First.range(SM);
+ auto L = Last.range(SM);
+ assert(F.file() == L.file() && "tokens from different files");
+ assert(F.endOffset() <= L.beginOffset() && "wrong order of tokens");
+ return FileRange(F.file(), F.beginOffset(), L.endOffset());
+}
+
+llvm::raw_ostream &syntax::operator<<(llvm::raw_ostream &OS, const Token &T) {
+ return OS << T.str();
+}
+
+FileRange::FileRange(FileID File, unsigned BeginOffset, unsigned EndOffset)
+ : File(File), Begin(BeginOffset), End(EndOffset) {
+ assert(File.isValid());
+ assert(BeginOffset <= EndOffset);
+}
+
+FileRange::FileRange(const SourceManager &SM, SourceLocation BeginLoc,
+ unsigned Length) {
+ assert(BeginLoc.isValid());
+ assert(BeginLoc.isFileID());
+
+ std::tie(File, Begin) = SM.getDecomposedLoc(BeginLoc);
+ End = Begin + Length;
+}
+FileRange::FileRange(const SourceManager &SM, SourceLocation BeginLoc,
+ SourceLocation EndLoc) {
+ assert(BeginLoc.isValid());
+ assert(BeginLoc.isFileID());
+ assert(EndLoc.isValid());
+ assert(EndLoc.isFileID());
+ assert(SM.getFileID(BeginLoc) == SM.getFileID(EndLoc));
+ assert(SM.getFileOffset(BeginLoc) <= SM.getFileOffset(EndLoc));
+
+ std::tie(File, Begin) = SM.getDecomposedLoc(BeginLoc);
+ End = SM.getFileOffset(EndLoc);
+}
+
+llvm::raw_ostream &syntax::operator<<(llvm::raw_ostream &OS,
+ const FileRange &R) {
+ return OS << llvm::formatv("FileRange(file = {0}, offsets = {1}-{2})",
+ R.file().getHashValue(), R.beginOffset(),
+ R.endOffset());
+}
+
+llvm::StringRef FileRange::text(const SourceManager &SM) const {
+ bool Invalid = false;
+ StringRef Text = SM.getBufferData(File, &Invalid);
+ if (Invalid)
+ return "";
+ assert(Begin <= Text.size());
+ assert(End <= Text.size());
+ return Text.substr(Begin, length());
+}
+
+std::pair<const syntax::Token *, const TokenBuffer::Mapping *>
+TokenBuffer::spelledForExpandedToken(const syntax::Token *Expanded) const {
+ assert(Expanded);
+ assert(ExpandedTokens.data() <= Expanded &&
+ Expanded < ExpandedTokens.data() + ExpandedTokens.size());
+
+ auto FileIt = Files.find(
+ SourceMgr->getFileID(SourceMgr->getExpansionLoc(Expanded->location())));
+ assert(FileIt != Files.end() && "no file for an expanded token");
+
+ const MarkedFile &File = FileIt->second;
+
+ unsigned ExpandedIndex = Expanded - ExpandedTokens.data();
+ // Find the first mapping that produced tokens after \p Expanded.
+ auto It = llvm::partition_point(File.Mappings, [&](const Mapping &M) {
+ return M.BeginExpanded <= ExpandedIndex;
+ });
+ // Our token could only be produced by the previous mapping.
+ if (It == File.Mappings.begin()) {
+ // No previous mapping, no need to modify offsets.
+ return {&File.SpelledTokens[ExpandedIndex - File.BeginExpanded], nullptr};
+ }
+ --It; // 'It' now points to last mapping that started before our token.
+
+ // Check if the token is part of the mapping.
+ if (ExpandedIndex < It->EndExpanded)
+ return {&File.SpelledTokens[It->BeginSpelled], /*Mapping*/ &*It};
+
+ // Not part of the mapping, use the index from previous mapping to compute the
+ // corresponding spelled token.
+ return {
+ &File.SpelledTokens[It->EndSpelled + (ExpandedIndex - It->EndExpanded)],
+ /*Mapping*/ nullptr};
+}
+
+llvm::ArrayRef<syntax::Token> TokenBuffer::spelledTokens(FileID FID) const {
+ auto It = Files.find(FID);
+ assert(It != Files.end());
+ return It->second.SpelledTokens;
+}
+
+std::string TokenBuffer::Mapping::str() const {
+ return llvm::formatv("spelled tokens: [{0},{1}), expanded tokens: [{2},{3})",
+ BeginSpelled, EndSpelled, BeginExpanded, EndExpanded);
+}
+
+llvm::Optional<llvm::ArrayRef<syntax::Token>>
+TokenBuffer::spelledForExpanded(llvm::ArrayRef<syntax::Token> Expanded) const {
+ // Mapping an empty range is ambiguous in case of empty mappings at either end
+ // of the range, bail out in that case.
+ if (Expanded.empty())
+ return llvm::None;
+
+ // FIXME: also allow changes uniquely mapping to macro arguments.
+
+ const syntax::Token *BeginSpelled;
+ const Mapping *BeginMapping;
+ std::tie(BeginSpelled, BeginMapping) =
+ spelledForExpandedToken(&Expanded.front());
+
+ const syntax::Token *LastSpelled;
+ const Mapping *LastMapping;
+ std::tie(LastSpelled, LastMapping) =
+ spelledForExpandedToken(&Expanded.back());
+
+ FileID FID = SourceMgr->getFileID(BeginSpelled->location());
+ // FIXME: Handle multi-file changes by trying to map onto a common root.
+ if (FID != SourceMgr->getFileID(LastSpelled->location()))
+ return llvm::None;
+
+ const MarkedFile &File = Files.find(FID)->second;
+
+ // Do not allow changes that cross macro expansion boundaries.
+ unsigned BeginExpanded = Expanded.begin() - ExpandedTokens.data();
+ unsigned EndExpanded = Expanded.end() - ExpandedTokens.data();
+ if (BeginMapping && BeginMapping->BeginExpanded < BeginExpanded)
+ return llvm::None;
+ if (LastMapping && EndExpanded < LastMapping->EndExpanded)
+ return llvm::None;
+ // All is good, return the result.
+ return llvm::makeArrayRef(
+ BeginMapping ? File.SpelledTokens.data() + BeginMapping->BeginSpelled
+ : BeginSpelled,
+ LastMapping ? File.SpelledTokens.data() + LastMapping->EndSpelled
+ : LastSpelled + 1);
+}
+
+llvm::Optional<TokenBuffer::Expansion>
+TokenBuffer::expansionStartingAt(const syntax::Token *Spelled) const {
+ assert(Spelled);
+ assert(Spelled->location().isFileID() && "not a spelled token");
+ auto FileIt = Files.find(SourceMgr->getFileID(Spelled->location()));
+ assert(FileIt != Files.end() && "file not tracked by token buffer");
+
+ auto &File = FileIt->second;
+ assert(File.SpelledTokens.data() <= Spelled &&
+ Spelled < (File.SpelledTokens.data() + File.SpelledTokens.size()));
+
+ unsigned SpelledIndex = Spelled - File.SpelledTokens.data();
+ auto M = llvm::partition_point(File.Mappings, [&](const Mapping &M) {
+ return M.BeginSpelled < SpelledIndex;
+ });
+ if (M == File.Mappings.end() || M->BeginSpelled != SpelledIndex)
+ return llvm::None;
+
+ Expansion E;
+ E.Spelled = llvm::makeArrayRef(File.SpelledTokens.data() + M->BeginSpelled,
+ File.SpelledTokens.data() + M->EndSpelled);
+ E.Expanded = llvm::makeArrayRef(ExpandedTokens.data() + M->BeginExpanded,
+ ExpandedTokens.data() + M->EndExpanded);
+ return E;
+}
+
+std::vector<syntax::Token> syntax::tokenize(FileID FID, const SourceManager &SM,
+ const LangOptions &LO) {
+ std::vector<syntax::Token> Tokens;
+ IdentifierTable Identifiers(LO);
+ auto AddToken = [&](clang::Token T) {
+ // Fill the proper token kind for keywords, etc.
+ if (T.getKind() == tok::raw_identifier && !T.needsCleaning() &&
+ !T.hasUCN()) { // FIXME: support needsCleaning and hasUCN cases.
+ clang::IdentifierInfo &II = Identifiers.get(T.getRawIdentifier());
+ T.setIdentifierInfo(&II);
+ T.setKind(II.getTokenID());
+ }
+ Tokens.push_back(syntax::Token(T));
+ };
+
+ Lexer L(FID, SM.getBuffer(FID), SM, LO);
+
+ clang::Token T;
+ while (!L.LexFromRawLexer(T))
+ AddToken(T);
+ // 'eof' is only the last token if the input is null-terminated. Never store
+ // it, for consistency.
+ if (T.getKind() != tok::eof)
+ AddToken(T);
+ return Tokens;
+}
+
+/// Records information reqired to construct mappings for the token buffer that
+/// we are collecting.
+class TokenCollector::CollectPPExpansions : public PPCallbacks {
+public:
+ CollectPPExpansions(TokenCollector &C) : Collector(&C) {}
+
+ /// Disabled instance will stop reporting anything to TokenCollector.
+ /// This ensures that uses of the preprocessor after TokenCollector::consume()
+ /// is called do not access the (possibly invalid) collector instance.
+ void disable() { Collector = nullptr; }
+
+ void MacroExpands(const clang::Token &MacroNameTok, const MacroDefinition &MD,
+ SourceRange Range, const MacroArgs *Args) override {
+ if (!Collector)
+ return;
+ // Only record top-level expansions, not those where:
+ // - the macro use is inside a macro body,
+ // - the macro appears in an argument to another macro.
+ if (!MacroNameTok.getLocation().isFileID() ||
+ (LastExpansionEnd.isValid() &&
+ Collector->PP.getSourceManager().isBeforeInTranslationUnit(
+ Range.getBegin(), LastExpansionEnd)))
+ return;
+ Collector->Expansions[Range.getBegin().getRawEncoding()] = Range.getEnd();
+ LastExpansionEnd = Range.getEnd();
+ }
+ // FIXME: handle directives like #pragma, #include, etc.
+private:
+ TokenCollector *Collector;
+ /// Used to detect recursive macro expansions.
+ SourceLocation LastExpansionEnd;
+};
+
+/// Fills in the TokenBuffer by tracing the run of a preprocessor. The
+/// implementation tracks the tokens, macro expansions and directives coming
+/// from the preprocessor and:
+/// - for each token, figures out if it is a part of an expanded token stream,
+/// spelled token stream or both. Stores the tokens appropriately.
+/// - records mappings from the spelled to expanded token ranges, e.g. for macro
+/// expansions.
+/// FIXME: also properly record:
+/// - #include directives,
+/// - #pragma, #line and other PP directives,
+/// - skipped pp regions,
+/// - ...
+
+TokenCollector::TokenCollector(Preprocessor &PP) : PP(PP) {
+ // Collect the expanded token stream during preprocessing.
+ PP.setTokenWatcher([this](const clang::Token &T) {
+ if (T.isAnnotation())
+ return;
+ DEBUG_WITH_TYPE("collect-tokens", llvm::dbgs()
+ << "Token: "
+ << syntax::Token(T).dumpForTests(
+ this->PP.getSourceManager())
+ << "\n"
+
+ );
+ Expanded.push_back(syntax::Token(T));
+ });
+ // And locations of macro calls, to properly recover boundaries of those in
+ // case of empty expansions.
+ auto CB = llvm::make_unique<CollectPPExpansions>(*this);
+ this->Collector = CB.get();
+ PP.addPPCallbacks(std::move(CB));
+}
+
+/// Builds mappings and spelled tokens in the TokenBuffer based on the expanded
+/// token stream.
+class TokenCollector::Builder {
+public:
+ Builder(std::vector<syntax::Token> Expanded, PPExpansions CollectedExpansions,
+ const SourceManager &SM, const LangOptions &LangOpts)
+ : Result(SM), CollectedExpansions(std::move(CollectedExpansions)), SM(SM),
+ LangOpts(LangOpts) {
+ Result.ExpandedTokens = std::move(Expanded);
+ }
+
+ TokenBuffer build() && {
+ buildSpelledTokens();
+
+ // Walk over expanded tokens and spelled tokens in parallel, building the
+ // mappings between those using source locations.
+ // To correctly recover empty macro expansions, we also take locations
+ // reported to PPCallbacks::MacroExpands into account as we do not have any
+ // expanded tokens with source locations to guide us.
+
+ // The 'eof' token is special, it is not part of spelled token stream. We
+ // handle it separately at the end.
+ assert(!Result.ExpandedTokens.empty());
+ assert(Result.ExpandedTokens.back().kind() == tok::eof);
+ for (unsigned I = 0; I < Result.ExpandedTokens.size() - 1; ++I) {
+ // (!) I might be updated by the following call.
+ processExpandedToken(I);
+ }
+
+ // 'eof' not handled in the loop, do it here.
+ assert(SM.getMainFileID() ==
+ SM.getFileID(Result.ExpandedTokens.back().location()));
+ fillGapUntil(Result.Files[SM.getMainFileID()],
+ Result.ExpandedTokens.back().location(),
+ Result.ExpandedTokens.size() - 1);
+ Result.Files[SM.getMainFileID()].EndExpanded = Result.ExpandedTokens.size();
+
+ // Some files might have unaccounted spelled tokens at the end, add an empty
+ // mapping for those as they did not have expanded counterparts.
+ fillGapsAtEndOfFiles();
+
+ return std::move(Result);
+ }
+
+private:
+ /// Process the next token in an expanded stream and move corresponding
+ /// spelled tokens, record any mapping if needed.
+ /// (!) \p I will be updated if this had to skip tokens, e.g. for macros.
+ void processExpandedToken(unsigned &I) {
+ auto L = Result.ExpandedTokens[I].location();
+ if (L.isMacroID()) {
+ processMacroExpansion(SM.getExpansionRange(L), I);
+ return;
+ }
+ if (L.isFileID()) {
+ auto FID = SM.getFileID(L);
+ TokenBuffer::MarkedFile &File = Result.Files[FID];
+
+ fillGapUntil(File, L, I);
+
+ // Skip the token.
+ assert(File.SpelledTokens[NextSpelled[FID]].location() == L &&
+ "no corresponding token in the spelled stream");
+ ++NextSpelled[FID];
+ return;
+ }
+ }
+
+ /// Skipped expanded and spelled tokens of a macro expansion that covers \p
+ /// SpelledRange. Add a corresponding mapping.
+ /// (!) \p I will be the index of the last token in an expansion after this
+ /// function returns.
+ void processMacroExpansion(CharSourceRange SpelledRange, unsigned &I) {
+ auto FID = SM.getFileID(SpelledRange.getBegin());
+ assert(FID == SM.getFileID(SpelledRange.getEnd()));
+ TokenBuffer::MarkedFile &File = Result.Files[FID];
+
+ fillGapUntil(File, SpelledRange.getBegin(), I);
+
+ // Skip all expanded tokens from the same macro expansion.
+ unsigned BeginExpanded = I;
+ for (; I + 1 < Result.ExpandedTokens.size(); ++I) {
+ auto NextL = Result.ExpandedTokens[I + 1].location();
+ if (!NextL.isMacroID() ||
+ SM.getExpansionLoc(NextL) != SpelledRange.getBegin())
+ break;
+ }
+ unsigned EndExpanded = I + 1;
+ consumeMapping(File, SM.getFileOffset(SpelledRange.getEnd()), BeginExpanded,
+ EndExpanded, NextSpelled[FID]);
+ }
+
+ /// Initializes TokenBuffer::Files and fills spelled tokens and expanded
+ /// ranges for each of the files.
+ void buildSpelledTokens() {
+ for (unsigned I = 0; I < Result.ExpandedTokens.size(); ++I) {
+ auto FID =
+ SM.getFileID(SM.getExpansionLoc(Result.ExpandedTokens[I].location()));
+ auto It = Result.Files.try_emplace(FID);
+ TokenBuffer::MarkedFile &File = It.first->second;
+
+ File.EndExpanded = I + 1;
+ if (!It.second)
+ continue; // we have seen this file before.
+
+ // This is the first time we see this file.
+ File.BeginExpanded = I;
+ File.SpelledTokens = tokenize(FID, SM, LangOpts);
+ }
+ }
+
+ void consumeEmptyMapping(TokenBuffer::MarkedFile &File, unsigned EndOffset,
+ unsigned ExpandedIndex, unsigned &SpelledIndex) {
+ consumeMapping(File, EndOffset, ExpandedIndex, ExpandedIndex, SpelledIndex);
+ }
+
+ /// Consumes spelled tokens that form a macro expansion and adds a entry to
+ /// the resulting token buffer.
+ /// (!) SpelledIndex is updated in-place.
+ void consumeMapping(TokenBuffer::MarkedFile &File, unsigned EndOffset,
+ unsigned BeginExpanded, unsigned EndExpanded,
+ unsigned &SpelledIndex) {
+ // We need to record this mapping before continuing.
+ unsigned MappingBegin = SpelledIndex;
+ ++SpelledIndex;
+
+ bool HitMapping =
+ tryConsumeSpelledUntil(File, EndOffset + 1, SpelledIndex).hasValue();
+ (void)HitMapping;
+ assert(!HitMapping && "recursive macro expansion?");
+
+ TokenBuffer::Mapping M;
+ M.BeginExpanded = BeginExpanded;
+ M.EndExpanded = EndExpanded;
+ M.BeginSpelled = MappingBegin;
+ M.EndSpelled = SpelledIndex;
+
+ File.Mappings.push_back(M);
+ }
+
+ /// Consumes spelled tokens until location \p L is reached and adds a mapping
+ /// covering the consumed tokens. The mapping will point to an empty expanded
+ /// range at position \p ExpandedIndex.
+ void fillGapUntil(TokenBuffer::MarkedFile &File, SourceLocation L,
+ unsigned ExpandedIndex) {
+ assert(L.isFileID());
+ FileID FID;
+ unsigned Offset;
+ std::tie(FID, Offset) = SM.getDecomposedLoc(L);
+
+ unsigned &SpelledIndex = NextSpelled[FID];
+ unsigned MappingBegin = SpelledIndex;
+ while (true) {
+ auto EndLoc = tryConsumeSpelledUntil(File, Offset, SpelledIndex);
+ if (SpelledIndex != MappingBegin) {
+ TokenBuffer::Mapping M;
+ M.BeginSpelled = MappingBegin;
+ M.EndSpelled = SpelledIndex;
+ M.BeginExpanded = M.EndExpanded = ExpandedIndex;
+ File.Mappings.push_back(M);
+ }
+ if (!EndLoc)
+ break;
+ consumeEmptyMapping(File, SM.getFileOffset(*EndLoc), ExpandedIndex,
+ SpelledIndex);
+
+ MappingBegin = SpelledIndex;
+ }
+ };
+
+ /// Consumes spelled tokens until it reaches Offset or a mapping boundary,
+ /// i.e. a name of a macro expansion or the start '#' token of a PP directive.
+ /// (!) NextSpelled is updated in place.
+ ///
+ /// returns None if \p Offset was reached, otherwise returns the end location
+ /// of a mapping that starts at \p NextSpelled.
+ llvm::Optional<SourceLocation>
+ tryConsumeSpelledUntil(TokenBuffer::MarkedFile &File, unsigned Offset,
+ unsigned &NextSpelled) {
+ for (; NextSpelled < File.SpelledTokens.size(); ++NextSpelled) {
+ auto L = File.SpelledTokens[NextSpelled].location();
+ if (Offset <= SM.getFileOffset(L))
+ return llvm::None; // reached the offset we are looking for.
+ auto Mapping = CollectedExpansions.find(L.getRawEncoding());
+ if (Mapping != CollectedExpansions.end())
+ return Mapping->second; // found a mapping before the offset.
+ }
+ return llvm::None; // no more tokens, we "reached" the offset.
+ }
+
+ /// Adds empty mappings for unconsumed spelled tokens at the end of each file.
+ void fillGapsAtEndOfFiles() {
+ for (auto &F : Result.Files) {
+ if (F.second.SpelledTokens.empty())
+ continue;
+ fillGapUntil(F.second, F.second.SpelledTokens.back().endLocation(),
+ F.second.EndExpanded);
+ }
+ }
+
+ TokenBuffer Result;
+ /// For each file, a position of the next spelled token we will consume.
+ llvm::DenseMap<FileID, unsigned> NextSpelled;
+ PPExpansions CollectedExpansions;
+ const SourceManager &SM;
+ const LangOptions &LangOpts;
+};
+
+TokenBuffer TokenCollector::consume() && {
+ PP.setTokenWatcher(nullptr);
+ Collector->disable();
+ return Builder(std::move(Expanded), std::move(Expansions),
+ PP.getSourceManager(), PP.getLangOpts())
+ .build();
+}
+
+std::string syntax::Token::str() const {
+ return llvm::formatv("Token({0}, length = {1})", tok::getTokenName(kind()),
+ length());
+}
+
+std::string syntax::Token::dumpForTests(const SourceManager &SM) const {
+ return llvm::formatv("{0} {1}", tok::getTokenName(kind()), text(SM));
+}
+
+std::string TokenBuffer::dumpForTests() const {
+ auto PrintToken = [this](const syntax::Token &T) -> std::string {
+ if (T.kind() == tok::eof)
+ return "<eof>";
+ return T.text(*SourceMgr);
+ };
+
+ auto DumpTokens = [this, &PrintToken](llvm::raw_ostream &OS,
+ llvm::ArrayRef<syntax::Token> Tokens) {
+ if (Tokens.empty()) {
+ OS << "<empty>";
+ return;
+ }
+ OS << Tokens[0].text(*SourceMgr);
+ for (unsigned I = 1; I < Tokens.size(); ++I) {
+ if (Tokens[I].kind() == tok::eof)
+ continue;
+ OS << " " << PrintToken(Tokens[I]);
+ }
+ };
+
+ std::string Dump;
+ llvm::raw_string_ostream OS(Dump);
+
+ OS << "expanded tokens:\n"
+ << " ";
+ // (!) we do not show '<eof>'.
+ DumpTokens(OS, llvm::makeArrayRef(ExpandedTokens).drop_back());
+ OS << "\n";
+
+ std::vector<FileID> Keys;
+ for (auto F : Files)
+ Keys.push_back(F.first);
+ llvm::sort(Keys);
+
+ for (FileID ID : Keys) {
+ const MarkedFile &File = Files.find(ID)->second;
+ auto *Entry = SourceMgr->getFileEntryForID(ID);
+ if (!Entry)
+ continue; // Skip builtin files.
+ OS << llvm::formatv("file '{0}'\n", Entry->getName())
+ << " spelled tokens:\n"
+ << " ";
+ DumpTokens(OS, File.SpelledTokens);
+ OS << "\n";
+
+ if (File.Mappings.empty()) {
+ OS << " no mappings.\n";
+ continue;
+ }
+ OS << " mappings:\n";
+ for (auto &M : File.Mappings) {
+ OS << llvm::formatv(
+ " ['{0}'_{1}, '{2}'_{3}) => ['{4}'_{5}, '{6}'_{7})\n",
+ PrintToken(File.SpelledTokens[M.BeginSpelled]), M.BeginSpelled,
+ M.EndSpelled == File.SpelledTokens.size()
+ ? "<eof>"
+ : PrintToken(File.SpelledTokens[M.EndSpelled]),
+ M.EndSpelled, PrintToken(ExpandedTokens[M.BeginExpanded]),
+ M.BeginExpanded, PrintToken(ExpandedTokens[M.EndExpanded]),
+ M.EndExpanded);
+ }
+ }
+ return OS.str();
+}
diff --git a/lib/Tooling/Syntax/Tree.cpp b/lib/Tooling/Syntax/Tree.cpp
new file mode 100644
index 000000000000..1549b6724fa4
--- /dev/null
+++ b/lib/Tooling/Syntax/Tree.cpp
@@ -0,0 +1,149 @@
+//===- Tree.cpp -----------------------------------------------*- C++ -*-=====//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+#include "clang/Tooling/Syntax/Tree.h"
+#include "clang/Basic/TokenKinds.h"
+#include "clang/Tooling/Syntax/Nodes.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/Casting.h"
+
+using namespace clang;
+
+syntax::Arena::Arena(SourceManager &SourceMgr, const LangOptions &LangOpts,
+ TokenBuffer Tokens)
+ : SourceMgr(SourceMgr), LangOpts(LangOpts), Tokens(std::move(Tokens)) {}
+
+const clang::syntax::TokenBuffer &syntax::Arena::tokenBuffer() const {
+ return Tokens;
+}
+
+std::pair<FileID, llvm::ArrayRef<syntax::Token>>
+syntax::Arena::lexBuffer(std::unique_ptr<llvm::MemoryBuffer> Input) {
+ auto FID = SourceMgr.createFileID(std::move(Input));
+ auto It = ExtraTokens.try_emplace(FID, tokenize(FID, SourceMgr, LangOpts));
+ assert(It.second && "duplicate FileID");
+ return {FID, It.first->second};
+}
+
+syntax::Leaf::Leaf(const syntax::Token *Tok) : Node(NodeKind::Leaf), Tok(Tok) {
+ assert(Tok != nullptr);
+}
+
+bool syntax::Leaf::classof(const Node *N) {
+ return N->kind() == NodeKind::Leaf;
+}
+
+syntax::Node::Node(NodeKind Kind)
+ : Parent(nullptr), NextSibling(nullptr), Kind(static_cast<unsigned>(Kind)),
+ Role(static_cast<unsigned>(NodeRole::Detached)) {}
+
+bool syntax::Tree::classof(const Node *N) { return N->kind() > NodeKind::Leaf; }
+
+void syntax::Tree::prependChildLowLevel(Node *Child, NodeRole Role) {
+ assert(Child->Parent == nullptr);
+ assert(Child->NextSibling == nullptr);
+ assert(Child->role() == NodeRole::Detached);
+ assert(Role != NodeRole::Detached);
+
+ Child->Parent = this;
+ Child->NextSibling = this->FirstChild;
+ Child->Role = static_cast<unsigned>(Role);
+ this->FirstChild = Child;
+}
+
+namespace {
+static void traverse(const syntax::Node *N,
+ llvm::function_ref<void(const syntax::Node *)> Visit) {
+ if (auto *T = dyn_cast<syntax::Tree>(N)) {
+ for (auto *C = T->firstChild(); C; C = C->nextSibling())
+ traverse(C, Visit);
+ }
+ Visit(N);
+}
+static void dumpTokens(llvm::raw_ostream &OS, ArrayRef<syntax::Token> Tokens,
+ const SourceManager &SM) {
+ assert(!Tokens.empty());
+ bool First = true;
+ for (const auto &T : Tokens) {
+ if (!First)
+ OS << " ";
+ else
+ First = false;
+ // Handle 'eof' separately, calling text() on it produces an empty string.
+ if (T.kind() == tok::eof) {
+ OS << "<eof>";
+ continue;
+ }
+ OS << T.text(SM);
+ }
+}
+
+static void dumpTree(llvm::raw_ostream &OS, const syntax::Node *N,
+ const syntax::Arena &A, std::vector<bool> IndentMask) {
+ if (N->role() != syntax::NodeRole::Unknown) {
+ // FIXME: print the symbolic name of a role.
+ if (N->role() == syntax::NodeRole::Detached)
+ OS << "*: ";
+ else
+ OS << static_cast<int>(N->role()) << ": ";
+ }
+ if (auto *L = llvm::dyn_cast<syntax::Leaf>(N)) {
+ dumpTokens(OS, *L->token(), A.sourceManager());
+ OS << "\n";
+ return;
+ }
+
+ auto *T = llvm::cast<syntax::Tree>(N);
+ OS << T->kind() << "\n";
+
+ for (auto It = T->firstChild(); It != nullptr; It = It->nextSibling()) {
+ for (bool Filled : IndentMask) {
+ if (Filled)
+ OS << "| ";
+ else
+ OS << " ";
+ }
+ if (!It->nextSibling()) {
+ OS << "`-";
+ IndentMask.push_back(false);
+ } else {
+ OS << "|-";
+ IndentMask.push_back(true);
+ }
+ dumpTree(OS, It, A, IndentMask);
+ IndentMask.pop_back();
+ }
+}
+} // namespace
+
+std::string syntax::Node::dump(const Arena &A) const {
+ std::string Str;
+ llvm::raw_string_ostream OS(Str);
+ dumpTree(OS, this, A, /*IndentMask=*/{});
+ return std::move(OS.str());
+}
+
+std::string syntax::Node::dumpTokens(const Arena &A) const {
+ std::string Storage;
+ llvm::raw_string_ostream OS(Storage);
+ traverse(this, [&](const syntax::Node *N) {
+ auto *L = llvm::dyn_cast<syntax::Leaf>(N);
+ if (!L)
+ return;
+ ::dumpTokens(OS, *L->token(), A.sourceManager());
+ });
+ return OS.str();
+}
+
+syntax::Node *syntax::Tree::findChild(NodeRole R) {
+ for (auto *C = FirstChild; C; C = C->nextSibling()) {
+ if (C->role() == R)
+ return C;
+ }
+ return nullptr;
+}
diff --git a/lib/Tooling/Tooling.cpp b/lib/Tooling/Tooling.cpp
index 63aa64a5330d..291df0ae333d 100644
--- a/lib/Tooling/Tooling.cpp
+++ b/lib/Tooling/Tooling.cpp
@@ -1,9 +1,8 @@
//===- Tooling.cpp - Running clang standalone tools -----------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -302,7 +301,7 @@ bool ToolInvocation::run() {
DiagConsumer ? DiagConsumer : &DiagnosticPrinter, false);
const std::unique_ptr<driver::Driver> Driver(
- newDriver(&Diagnostics, BinaryName, Files->getVirtualFileSystem()));
+ newDriver(&Diagnostics, BinaryName, &Files->getVirtualFileSystem()));
// The "input file not found" diagnostics from the driver are useful.
// The driver is only aware of the VFS working directory, but some clients
// change this at the FileManager level instead.
@@ -482,7 +481,7 @@ int ClangTool::run(ToolAction *Action) {
if (OverlayFileSystem->setCurrentWorkingDirectory(
CompileCommand.Directory))
llvm::report_fatal_error("Cannot chdir into \"" +
- Twine(CompileCommand.Directory) + "\n!");
+ Twine(CompileCommand.Directory) + "\"!");
// Now fill the in-memory VFS with the relative file mappings so it will
// have the correct relative paths. We never remove mappings but that
@@ -518,7 +517,8 @@ int ClangTool::run(ToolAction *Action) {
if (!Invocation.run()) {
// FIXME: Diagnostics should be used instead.
- llvm::errs() << "Error while processing " << File << ".\n";
+ if (PrintErrorMessage)
+ llvm::errs() << "Error while processing " << File << ".\n";
ProcessingFailed = true;
}
}
@@ -570,6 +570,10 @@ void ClangTool::setRestoreWorkingDir(bool RestoreCWD) {
this->RestoreCWD = RestoreCWD;
}
+void ClangTool::setPrintErrorMessage(bool PrintErrorMessage) {
+ this->PrintErrorMessage = PrintErrorMessage;
+}
+
namespace clang {
namespace tooling {